aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl12
-rw-r--r--Documentation/feature-removal-schedule.txt4
-rw-r--r--Documentation/networking/dccp.txt3
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/tsec.txt6
-rw-r--r--drivers/acpi/event.c6
-rw-r--r--drivers/char/pcmcia/synclink_cs.c18
-rw-r--r--drivers/char/synclink.c18
-rw-r--r--drivers/char/synclink_gt.c18
-rw-r--r--drivers/char/synclinkmp.c18
-rw-r--r--drivers/connector/cn_queue.c80
-rw-r--r--drivers/connector/connector.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c6
-rw-r--r--drivers/net/3c501.c17
-rw-r--r--drivers/net/3c505.c18
-rw-r--r--drivers/net/3c507.c15
-rw-r--r--drivers/net/3c509.c25
-rw-r--r--drivers/net/3c515.c21
-rw-r--r--drivers/net/3c523.c25
-rw-r--r--drivers/net/3c527.c19
-rw-r--r--drivers/net/3c59x.c55
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/82596.c17
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/amd8111e.c6
-rw-r--r--drivers/net/arcnet/arc-rawmode.c4
-rw-r--r--drivers/net/arcnet/arcnet.c71
-rw-r--r--drivers/net/arcnet/capmode.c6
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/arcnet/com20020.c11
-rw-r--r--drivers/net/arcnet/rfc1051.c12
-rw-r--r--drivers/net/arcnet/rfc1201.c47
-rw-r--r--drivers/net/arm/ep93xx_eth.c8
-rw-r--r--drivers/net/arm/ixp4xx_eth.c12
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/au1000_eth.c1051
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bfin_mac.c12
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/bnx2.c24
-rw-r--r--drivers/net/bnx2x_main.c7
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c8
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cpmac.c12
-rw-r--r--drivers/net/cxgb3/adapter.h14
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c78
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c12
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h7
-rw-r--r--drivers/net/cxgb3/sge.c120
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/defxx.c2
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c41
-rw-r--r--drivers/net/e1000e/82571.c144
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h8
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c99
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c131
-rw-r--r--drivers/net/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/enic/enic.h3
-rw-r--r--drivers/net/enic/enic_main.c84
-rw-r--r--drivers/net/enic/vnic_dev.c33
-rw-r--r--drivers/net/enic/vnic_dev.h2
-rw-r--r--drivers/net/enic/vnic_devcmd.h8
-rw-r--r--drivers/net/enic/vnic_intr.h14
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/fec.c446
-rw-r--r--drivers/net/fec.h11
-rw-r--r--drivers/net/forcedeth.c167
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/fsl_pq_mdio.c463
-rw-r--r--drivers/net/fsl_pq_mdio.h45
-rw-r--r--drivers/net/gianfar.c82
-rw-r--r--drivers/net/gianfar.h17
-rw-r--r--drivers/net/gianfar_ethtool.c1
-rw-r--r--drivers/net/gianfar_mii.c12
-rw-r--r--drivers/net/gianfar_mii.h54
-rw-r--r--drivers/net/gianfar_sysfs.c33
-rw-r--r--drivers/net/hamachi.c8
-rw-r--r--drivers/net/hamradio/6pack.c16
-rw-r--r--drivers/net/hamradio/baycom_epp.c43
-rw-r--r--drivers/net/hamradio/bpqether.c40
-rw-r--r--drivers/net/hamradio/dmascc.c54
-rw-r--r--drivers/net/hamradio/hdlcdrv.c45
-rw-r--r--drivers/net/hamradio/mkiss.c46
-rw-r--r--drivers/net/hamradio/scc.c21
-rw-r--r--drivers/net/hamradio/yam.c61
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/igb/Makefile2
-rw-r--r--drivers/net/igb/e1000_82575.c97
-rw-r--r--drivers/net/igb/e1000_82575.h4
-rw-r--r--drivers/net/igb/e1000_defines.h10
-rw-r--r--drivers/net/igb/e1000_hw.h31
-rw-r--r--drivers/net/igb/e1000_mac.c35
-rw-r--r--drivers/net/igb/e1000_mac.h3
-rw-r--r--drivers/net/igb/e1000_nvm.c44
-rw-r--r--drivers/net/igb/e1000_phy.c349
-rw-r--r--drivers/net/igb/e1000_phy.h3
-rw-r--r--drivers/net/igb/e1000_regs.h2
-rw-r--r--drivers/net/igb/igb.h40
-rw-r--r--drivers/net/igb/igb_ethtool.c138
-rw-r--r--drivers/net/igb/igb_main.c451
-rw-r--r--drivers/net/irda/sir_dev.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c326
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c143
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c65
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c473
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h47
-rw-r--r--drivers/net/ixp2000/ixpdev.c4
-rw-r--r--drivers/net/jazzsonic.c6
-rw-r--r--drivers/net/jme.h6
-rw-r--r--drivers/net/korina.c4
-rw-r--r--drivers/net/macb.c20
-rw-r--r--drivers/net/macsonic.c15
-rw-r--r--drivers/net/mlx4/en_rx.c5
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/niu.c11
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/phy/mdio_bus.c54
-rw-r--r--drivers/net/ppp_generic.c275
-rw-r--r--drivers/net/pppoe.c515
-rw-r--r--drivers/net/pppol2tp.c192
-rw-r--r--drivers/net/pppox.c3
-rw-r--r--drivers/net/ps3_gelic_net.c26
-rw-r--r--drivers/net/ps3_gelic_wireless.c28
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/qlge/qlge_main.c7
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c6
-rw-r--r--drivers/net/s2io.c14
-rw-r--r--drivers/net/sb1250-mac.c16
-rw-r--r--drivers/net/sc92031.c27
-rw-r--r--drivers/net/sfc/Kconfig1
-rw-r--r--drivers/net/sfc/bitfield.h4
-rw-r--r--drivers/net/sfc/efx.c15
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/net_driver.h9
-rw-r--r--drivers/net/sfc/rx.c209
-rw-r--r--drivers/net/sfc/rx.h3
-rw-r--r--drivers/net/sfc/sfe4001.c1
-rw-r--r--drivers/net/sfc/tenxpress.c1
-rw-r--r--drivers/net/skge.c6
-rw-r--r--drivers/net/sky2.c267
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc91x.c140
-rw-r--r--drivers/net/smc91x.h10
-rw-r--r--drivers/net/smsc911x.c152
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/tc35815.c10
-rw-r--r--drivers/net/tehuti.c6
-rw-r--r--drivers/net/tg3.c18
-rw-r--r--drivers/net/tokenring/3c359.c17
-rw-r--r--drivers/net/tokenring/abyss.c10
-rw-r--r--drivers/net/tokenring/ibmtr.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.h1
-rw-r--r--drivers/net/tokenring/olympic.c38
-rw-r--r--drivers/net/tokenring/olympic.h1
-rw-r--r--drivers/net/tokenring/tms380tr.c21
-rw-r--r--drivers/net/tokenring/tms380tr.h1
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/tsi108_eth.c8
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tun.c528
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/typhoon.h234
-rw-r--r--drivers/net/ucc_geth.c22
-rw-r--r--drivers/net/ucc_geth.h14
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ucc_geth_mii.c295
-rw-r--r--drivers/net/ucc_geth_mii.h101
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/rndis_host.c25
-rw-r--r--drivers/net/usb/smsc95xx.c8
-rw-r--r--drivers/net/via-rhine.c4
-rw-r--r--drivers/net/via-velocity.h6
-rw-r--r--drivers/net/virtio_net.c244
-rw-r--r--drivers/net/wan/c101.c12
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/dscc4.c18
-rw-r--r--drivers/net/wan/farsync.c18
-rw-r--r--drivers/net/wan/hd64572.c4
-rw-r--r--drivers/net/wan/hdlc.c31
-rw-r--r--drivers/net/wan/hdlc_cisco.c17
-rw-r--r--drivers/net/wan/hdlc_fr.c44
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wan/hdlc_raw.c5
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c24
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c19
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c17
-rw-r--r--drivers/net/wan/n2.c12
-rw-r--r--drivers/net/wan/pc300too.c12
-rw-r--r--drivers/net/wan/pci200syn.c12
-rw-r--r--drivers/net/wan/sealevel.c12
-rw-r--r--drivers/net/wan/wanxl.c14
-rw-r--r--drivers/net/wimax/i2400m/fw.c12
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h16
-rw-r--r--drivers/net/wimax/i2400m/netdev.c16
-rw-r--r--drivers/net/wimax/i2400m/sdio.c16
-rw-r--r--drivers/net/wimax/i2400m/usb.c16
-rw-r--r--drivers/net/wireless/Kconfig128
-rw-r--r--drivers/net/wireless/airo.c1137
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h74
-rw-r--r--drivers/net/wireless/ath5k/attach.c18
-rw-r--r--drivers/net/wireless/ath5k/base.c119
-rw-r--r--drivers/net/wireless/ath5k/base.h3
-rw-r--r--drivers/net/wireless/ath5k/caps.c6
-rw-r--r--drivers/net/wireless/ath5k/debug.c43
-rw-r--r--drivers/net/wireless/ath5k/debug.h1
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c168
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/ath5k/gpio.c10
-rw-r--r--drivers/net/wireless/ath5k/initvals.c1575
-rw-r--r--drivers/net/wireless/ath5k/pcu.c24
-rw-r--r--drivers/net/wireless/ath5k/phy.c2029
-rw-r--r--drivers/net/wireless/ath5k/qcu.c47
-rw-r--r--drivers/net/wireless/ath5k/reg.h124
-rw-r--r--drivers/net/wireless/ath5k/reset.c948
-rw-r--r--drivers/net/wireless/ath5k/rfbuffer.h1181
-rw-r--r--drivers/net/wireless/ath5k/rfgain.h516
-rw-r--r--drivers/net/wireless/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath9k/ahb.c185
-rw-r--r--drivers/net/wireless/ath9k/ani.c281
-rw-r--r--drivers/net/wireless/ath9k/ani.h138
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h1651
-rw-r--r--drivers/net/wireless/ath9k/beacon.c126
-rw-r--r--drivers/net/wireless/ath9k/calib.c398
-rw-r--r--drivers/net/wireless/ath9k/calib.h124
-rw-r--r--drivers/net/wireless/ath9k/core.h754
-rw-r--r--drivers/net/wireless/ath9k/debug.c228
-rw-r--r--drivers/net/wireless/ath9k/debug.h153
-rw-r--r--drivers/net/wireless/ath9k/eeprom.c3000
-rw-r--r--drivers/net/wireless/ath9k/eeprom.h473
-rw-r--r--drivers/net/wireless/ath9k/hw.c1487
-rw-r--r--drivers/net/wireless/ath9k/hw.h1555
-rw-r--r--drivers/net/wireless/ath9k/initvals.h565
-rw-r--r--drivers/net/wireless/ath9k/mac.c203
-rw-r--r--drivers/net/wireless/ath9k/mac.h676
-rw-r--r--drivers/net/wireless/ath9k/main.c1543
-rw-r--r--drivers/net/wireless/ath9k/pci.c303
-rw-r--r--drivers/net/wireless/ath9k/phy.c221
-rw-r--r--drivers/net/wireless/ath9k/phy.h14
-rw-r--r--drivers/net/wireless/ath9k/rc.c210
-rw-r--r--drivers/net/wireless/ath9k/rc.h18
-rw-r--r--drivers/net/wireless/ath9k/recv.c87
-rw-r--r--drivers/net/wireless/ath9k/reg.h91
-rw-r--r--drivers/net/wireless/ath9k/regd.c1211
-rw-r--r--drivers/net/wireless/ath9k/regd.h203
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2058
-rw-r--r--drivers/net/wireless/ath9k/xmit.c2839
-rw-r--r--drivers/net/wireless/atmel.c20
-rw-r--r--drivers/net/wireless/b43/Kconfig14
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h40
-rw-r--r--drivers/net/wireless/b43/debugfs.c44
-rw-r--r--drivers/net/wireless/b43/debugfs.h5
-rw-r--r--drivers/net/wireless/b43/main.c378
-rw-r--r--drivers/net/wireless/b43/main.h25
-rw-r--r--drivers/net/wireless/b43/phy_g.c3
-rw-r--r--drivers/net/wireless/b43/phy_lp.c395
-rw-r--r--drivers/net/wireless/b43/phy_lp.h329
-rw-r--r--drivers/net/wireless/b43/tables_lpphy.c394
-rw-r--r--drivers/net/wireless/b43/tables_lpphy.h31
-rw-r--r--drivers/net/wireless/b43legacy/leds.c8
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c6
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig53
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-100.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-commands.h1702
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debug.h167
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h227
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-io.h404
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c115
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c165
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h206
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c1161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h761
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c215
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c237
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-core.h)59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c158
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c210
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1205
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h511
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c648
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h183
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h140
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c287
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c5106
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/debugfs.c14
-rw-r--r--drivers/net/wireless/libertas/defs.h2
-rw-r--r--drivers/net/wireless/libertas/host.h1
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h8
-rw-r--r--drivers/net/wireless/libertas/if_cs.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1218
-rw-r--r--drivers/net/wireless/libertas/if_spi.h208
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/orinoco/Kconfig120
-rw-r--r--drivers/net/wireless/orinoco/Makefile3
-rw-r--r--drivers/net/wireless/orinoco/airport.c37
-rw-r--r--drivers/net/wireless/orinoco/fw.c340
-rw-r--r--drivers/net/wireless/orinoco/fw.h16
-rw-r--r--drivers/net/wireless/orinoco/hermes.c116
-rw-r--r--drivers/net/wireless/orinoco/hermes.h35
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c37
-rw-r--r--drivers/net/wireless/orinoco/hw.c586
-rw-r--r--drivers/net/wireless/orinoco/hw.h47
-rw-r--r--drivers/net/wireless/orinoco/main.c2654
-rw-r--r--drivers/net/wireless/orinoco/main.h63
-rw-r--r--drivers/net/wireless/orinoco/mic.c79
-rw-r--r--drivers/net/wireless/orinoco/mic.h22
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c6148
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h26
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c33
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c7
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c5
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c4
-rw-r--r--drivers/net/wireless/orinoco/scan.c233
-rw-r--r--drivers/net/wireless/orinoco/scan.h29
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c19
-rw-r--r--drivers/net/wireless/orinoco/wext.c2325
-rw-r--r--drivers/net/wireless/orinoco/wext.h13
-rw-r--r--drivers/net/wireless/p54/Kconfig10
-rw-r--r--drivers/net/wireless/p54/Makefile1
-rw-r--r--drivers/net/wireless/p54/p54.h38
-rw-r--r--drivers/net/wireless/p54/p54common.c631
-rw-r--r--drivers/net/wireless/p54/p54common.h125
-rw-r--r--drivers/net/wireless/p54/p54pci.c6
-rw-r--r--drivers/net/wireless/p54/p54spi.c770
-rw-r--r--drivers/net/wireless/p54/p54spi.h125
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h678
-rw-r--r--drivers/net/wireless/p54/p54usb.c87
-rw-r--r--drivers/net/wireless/p54/p54usb.h1
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h6
-rw-r--r--drivers/net/wireless/rndis_wlan.c103
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c100
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c128
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h154
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c416
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h106
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c461
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c77
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c216
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c127
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c64
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c241
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c196
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c86
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c9
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c25
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/net/xtsonic.c2
-rw-r--r--drivers/net/znet.c17
-rw-r--r--drivers/s390/net/claw.c445
-rw-r--r--drivers/s390/net/ctcm_main.c24
-rw-r--r--drivers/s390/net/lcs.c21
-rw-r--r--drivers/s390/net/netiucv.c14
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c12
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/b43_pci_bridge.c2
-rw-r--r--drivers/ssb/driver_chipcommon.c14
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c508
-rw-r--r--drivers/ssb/pci.c74
-rw-r--r--drivers/usb/gadget/rndis.c92
-rw-r--r--drivers/video/uvesafb.c5
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dquot.c5
-rw-r--r--include/linux/arcdevice.h9
-rw-r--r--include/linux/ath9k_platform.h28
-rw-r--r--include/linux/com20020.h1
-rw-r--r--include/linux/connector.h8
-rw-r--r--include/linux/dccp.h51
-rw-r--r--include/linux/etherdevice.h21
-rw-r--r--include/linux/hdlc.h5
-rw-r--r--include/linux/hdlcdrv.h1
-rw-r--r--include/linux/ibmtr.h2
-rw-r--r--include/linux/ieee80211.h181
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h8
-rw-r--r--include/linux/if_pppox.h20
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/if_tunnel.h16
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/mroute.h18
-rw-r--r--include/linux/ncp_no.h26
-rw-r--r--include/linux/netdevice.h103
-rw-r--r--include/linux/netfilter_bridge.h4
-rw-r--r--include/linux/nl80211.h105
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/pim.h4
-rw-r--r--include/linux/ppp_channel.h4
-rw-r--r--include/linux/sctp.h90
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/smsc911x.h3
-rw-r--r--include/linux/spi/libertas_spi.h32
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h224
-rw-r--r--include/linux/ssb/ssb_regs.h36
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/tcp.h20
-rw-r--r--include/linux/usb/rndis_host.h85
-rw-r--r--include/linux/virtio_net.h71
-rw-r--r--include/linux/wireless.h12
-rw-r--r--include/net/atmclip.h1
-rw-r--r--include/net/cfg80211.h238
-rw-r--r--include/net/inet_ecn.h4
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/ip_vs.h4
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/ipx.h2
-rw-r--r--include/net/mac80211.h234
-rw-r--r--include/net/netns/ipv4.h13
-rw-r--r--include/net/netrom.h4
-rw-r--r--include/net/phonet/phonet.h1
-rw-r--r--include/net/phonet/pn_dev.h5
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/transp_v6.h2
-rw-r--r--include/net/wireless.h71
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c43
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/appletalk/dev.c10
-rw-r--r--net/atm/br2684.c58
-rw-r--r--net/atm/clip.c30
-rw-r--r--net/atm/lec.c64
-rw-r--r--net/atm/lec.h1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_iface.c13
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/core/dev.c232
-rw-r--r--net/core/skbuff.c169
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/dccp.h21
-rw-r--r--net/dccp/feat.c232
-rw-r--r--net/dccp/feat.h21
-rw-r--r--net/dccp/minisocks.c11
-rw-r--r--net/dccp/options.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/sysctl.c43
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dsa/mv88e6123_61_65.c2
-rw-r--r--net/dsa/mv88e6131.c2
-rw-r--r--net/dsa/tag_dsa.c2
-rw-r--r--net/dsa/tag_edsa.c2
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ipv4/af_inet.c26
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/inet_connection_sock.c42
-rw-r--r--net/ipv4/inet_hashtables.c12
-rw-r--r--net/ipv4/ip_gre.c131
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipmr.c464
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c27
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c45
-rw-r--r--net/ipv6/af_inet6.c32
-rw-r--r--net/ipv6/ndisc.c16
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipx/af_ipx.c4
-rw-r--r--net/irda/irmod.c2
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/mac80211/Makefile5
-rw-r--r--net/mac80211/aes_cmac.c135
-rw-r--r--net/mac80211/aes_cmac.h19
-rw-r--r--net/mac80211/agg-rx.c302
-rw-r--r--net/mac80211/agg-tx.c636
-rw-r--r--net/mac80211/cfg.c164
-rw-r--r--net/mac80211/debugfs.c59
-rw-r--r--net/mac80211/debugfs_key.c79
-rw-r--r--net/mac80211/debugfs_key.h10
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/ht.c884
-rw-r--r--net/mac80211/ieee80211_i.h147
-rw-r--r--net/mac80211/iface.c97
-rw-r--r--net/mac80211/key.c113
-rw-r--r--net/mac80211/key.h16
-rw-r--r--net/mac80211/main.c150
-rw-r--r--net/mac80211/mesh.c15
-rw-r--r--net/mac80211/mesh.h10
-rw-r--r--net/mac80211/mesh_hwmp.c7
-rw-r--r--net/mac80211/mesh_plink.c38
-rw-r--r--net/mac80211/mlme.c942
-rw-r--r--net/mac80211/pm.c117
-rw-r--r--net/mac80211/rx.c385
-rw-r--r--net/mac80211/scan.c642
-rw-r--r--net/mac80211/spectmgmt.c103
-rw-r--r--net/mac80211/sta_info.c37
-rw-r--r--net/mac80211/sta_info.h8
-rw-r--r--net/mac80211/tx.c93
-rw-r--r--net/mac80211/util.c29
-rw-r--r--net/mac80211/wext.c243
-rw-r--r--net/mac80211/wpa.c152
-rw-r--r--net/mac80211/wpa.h5
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c4
-rw-r--r--net/netfilter/nf_conntrack_amanda.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c8
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_tproxy_core.c1
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/netrom/nr_dev.c26
-rw-r--r--net/phonet/af_phonet.c31
-rw-r--r--net/phonet/pn_dev.c119
-rw-r--r--net/phonet/pn_netlink.c24
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rose/rose_dev.c22
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c42
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sunrpc/xprtsock.c53
-rw-r--r--net/wimax/op-msg.c9
-rw-r--r--net/wimax/stack.c12
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c20
-rw-r--r--net/wireless/core.h20
-rw-r--r--net/wireless/nl80211.c485
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/reg.c287
-rw-r--r--net/wireless/reg.h9
-rw-r--r--net/wireless/scan.c836
-rw-r--r--net/wireless/sysfs.c30
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/x25/af_x25.c2
655 files changed, 47622 insertions, 41649 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index 77c3c202991..8af6d962687 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -17,8 +17,7 @@
17 </authorgroup> 17 </authorgroup>
18 18
19 <copyright> 19 <copyright>
20 <year>2007</year> 20 <year>2007-2009</year>
21 <year>2008</year>
22 <holder>Johannes Berg</holder> 21 <holder>Johannes Berg</holder>
23 </copyright> 22 </copyright>
24 23
@@ -165,8 +164,8 @@ usage should require reading the full document.
165!Pinclude/net/mac80211.h Frame format 164!Pinclude/net/mac80211.h Frame format
166 </sect1> 165 </sect1>
167 <sect1> 166 <sect1>
168 <title>Alignment issues</title> 167 <title>Packet alignment</title>
169 <para>TBD</para> 168!Pnet/mac80211/rx.c Packet alignment
170 </sect1> 169 </sect1>
171 <sect1> 170 <sect1>
172 <title>Calling into mac80211 from interrupts</title> 171 <title>Calling into mac80211 from interrupts</title>
@@ -223,6 +222,11 @@ usage should require reading the full document.
223!Finclude/net/mac80211.h ieee80211_key_flags 222!Finclude/net/mac80211.h ieee80211_key_flags
224 </chapter> 223 </chapter>
225 224
225 <chapter id="powersave">
226 <title>Powersave support</title>
227!Pinclude/net/mac80211.h Powersave support
228 </chapter>
229
226 <chapter id="qos"> 230 <chapter id="qos">
227 <title>Multiple queues and QoS support</title> 231 <title>Multiple queues and QoS support</title>
228 <para>TBD</para> 232 <para>TBD</para>
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 5ddbe350487..ac98851f7a0 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -229,7 +229,9 @@ Who: Jan Engelhardt <jengelh@computergmbh.de>
229--------------------------- 229---------------------------
230 230
231What: b43 support for firmware revision < 410 231What: b43 support for firmware revision < 410
232When: July 2008 232When: The schedule was July 2008, but it was decided that we are going to keep the
233 code as long as there are no major maintanance headaches.
234 So it _could_ be removed _any_ time now, if it conflicts with something new.
233Why: The support code for the old firmware hurts code readability/maintainability 235Why: The support code for the old firmware hurts code readability/maintainability
234 and slightly hurts runtime performance. Bugfixes for the old firmware 236 and slightly hurts runtime performance. Bugfixes for the old firmware
235 are not provided by Broadcom anymore. 237 are not provided by Broadcom anymore.
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index 7a3bb1abb83..b132e4a3cf0 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -141,7 +141,8 @@ rx_ccid = 2
141 Default CCID for the receiver-sender half-connection; see tx_ccid. 141 Default CCID for the receiver-sender half-connection; see tx_ccid.
142 142
143seq_window = 100 143seq_window = 100
144 The initial sequence window (sec. 7.5.2). 144 The initial sequence window (sec. 7.5.2) of the sender. This influences
145 the local ackno validity and the remote seqno validity windows (7.5.1).
145 146
146tx_qlen = 5 147tx_qlen = 5
147 The size of the transmit buffer in packets. A value of 0 corresponds 148 The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7712787933..ff3f219ee4d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -782,6 +782,12 @@ arp_ignore - INTEGER
782 The max value from conf/{all,interface}/arp_ignore is used 782 The max value from conf/{all,interface}/arp_ignore is used
783 when ARP request is received on the {interface} 783 when ARP request is received on the {interface}
784 784
785arp_notify - BOOLEAN
786 Define mode for notification of address and device changes.
787 0 - (default): do nothing
788 1 - Generate gratuitous arp replies when device is brought up
789 or hardware address changes.
790
785arp_accept - BOOLEAN 791arp_accept - BOOLEAN
786 Define behavior when gratuitous arp replies are received: 792 Define behavior when gratuitous arp replies are received:
787 0 - drop gratuitous arp frames 793 0 - drop gratuitous arp frames
diff --git a/Documentation/powerpc/dts-bindings/fsl/tsec.txt b/Documentation/powerpc/dts-bindings/fsl/tsec.txt
index 7fa4b27574b..edb7ae19e86 100644
--- a/Documentation/powerpc/dts-bindings/fsl/tsec.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/tsec.txt
@@ -56,6 +56,12 @@ Properties:
56 hardware. 56 hardware.
57 - fsl,magic-packet : If present, indicates that the hardware supports 57 - fsl,magic-packet : If present, indicates that the hardware supports
58 waking up via magic packet. 58 waking up via magic packet.
59 - bd-stash : If present, indicates that the hardware supports stashing
60 buffer descriptors in the L2.
61 - rx-stash-len : Denotes the number of bytes of a received buffer to stash
62 in the L2.
63 - rx-stash-idx : Denotes the index of the first byte from the received
64 buffer to stash in the L2.
59 65
60Example: 66Example:
61 ethernet@24000 { 67 ethernet@24000 {
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 0c24bd4d656..aeb7e5fb4a0 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -235,11 +235,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
235 return result; 235 return result;
236 } 236 }
237 237
238 result = 238 genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC);
239 genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC);
240 if (result)
241 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
242 "Failed to send a Genetlink message!\n"));
243 return 0; 239 return 0;
244} 240}
245 241
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index dc073e167ab..5608a1e5a3b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4311,10 +4311,17 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
4311 dev->stats.rx_bytes += size; 4311 dev->stats.rx_bytes += size;
4312 4312
4313 netif_rx(skb); 4313 netif_rx(skb);
4314
4315 dev->last_rx = jiffies;
4316} 4314}
4317 4315
4316static const struct net_device_ops hdlcdev_ops = {
4317 .ndo_open = hdlcdev_open,
4318 .ndo_stop = hdlcdev_close,
4319 .ndo_change_mtu = hdlc_change_mtu,
4320 .ndo_start_xmit = hdlc_start_xmit,
4321 .ndo_do_ioctl = hdlcdev_ioctl,
4322 .ndo_tx_timeout = hdlcdev_tx_timeout,
4323};
4324
4318/** 4325/**
4319 * called by device driver when adding device instance 4326 * called by device driver when adding device instance
4320 * do generic HDLC initialization 4327 * do generic HDLC initialization
@@ -4341,11 +4348,8 @@ static int hdlcdev_init(MGSLPC_INFO *info)
4341 dev->irq = info->irq_level; 4348 dev->irq = info->irq_level;
4342 4349
4343 /* network layer callbacks and settings */ 4350 /* network layer callbacks and settings */
4344 dev->do_ioctl = hdlcdev_ioctl; 4351 dev->netdev_ops = &hdlcdev_ops;
4345 dev->open = hdlcdev_open; 4352 dev->watchdog_timeo = 10 * HZ;
4346 dev->stop = hdlcdev_close;
4347 dev->tx_timeout = hdlcdev_tx_timeout;
4348 dev->watchdog_timeo = 10*HZ;
4349 dev->tx_queue_len = 50; 4353 dev->tx_queue_len = 50;
4350 4354
4351 /* generic HDLC layer callbacks and settings */ 4355 /* generic HDLC layer callbacks and settings */
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index b8063d4cad3..0057a8f58cb 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -8007,10 +8007,17 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8007 dev->stats.rx_bytes += size; 8007 dev->stats.rx_bytes += size;
8008 8008
8009 netif_rx(skb); 8009 netif_rx(skb);
8010
8011 dev->last_rx = jiffies;
8012} 8010}
8013 8011
8012static const struct net_device_ops hdlcdev_ops = {
8013 .ndo_open = hdlcdev_open,
8014 .ndo_stop = hdlcdev_close,
8015 .ndo_change_mtu = hdlc_change_mtu,
8016 .ndo_start_xmit = hdlc_start_xmit,
8017 .ndo_do_ioctl = hdlcdev_ioctl,
8018 .ndo_tx_timeout = hdlcdev_tx_timeout,
8019};
8020
8014/** 8021/**
8015 * called by device driver when adding device instance 8022 * called by device driver when adding device instance
8016 * do generic HDLC initialization 8023 * do generic HDLC initialization
@@ -8038,11 +8045,8 @@ static int hdlcdev_init(struct mgsl_struct *info)
8038 dev->dma = info->dma_level; 8045 dev->dma = info->dma_level;
8039 8046
8040 /* network layer callbacks and settings */ 8047 /* network layer callbacks and settings */
8041 dev->do_ioctl = hdlcdev_ioctl; 8048 dev->netdev_ops = &hdlcdev_ops;
8042 dev->open = hdlcdev_open; 8049 dev->watchdog_timeo = 10 * HZ;
8043 dev->stop = hdlcdev_close;
8044 dev->tx_timeout = hdlcdev_tx_timeout;
8045 dev->watchdog_timeo = 10*HZ;
8046 dev->tx_queue_len = 50; 8050 dev->tx_queue_len = 50;
8047 8051
8048 /* generic HDLC layer callbacks and settings */ 8052 /* generic HDLC layer callbacks and settings */
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index f329f459817..efb3dc928a4 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1763,10 +1763,17 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
1763 dev->stats.rx_bytes += size; 1763 dev->stats.rx_bytes += size;
1764 1764
1765 netif_rx(skb); 1765 netif_rx(skb);
1766
1767 dev->last_rx = jiffies;
1768} 1766}
1769 1767
1768static const struct net_device_ops hdlcdev_ops = {
1769 .ndo_open = hdlcdev_open,
1770 .ndo_stop = hdlcdev_close,
1771 .ndo_change_mtu = hdlc_change_mtu,
1772 .ndo_start_xmit = hdlc_start_xmit,
1773 .ndo_do_ioctl = hdlcdev_ioctl,
1774 .ndo_tx_timeout = hdlcdev_tx_timeout,
1775};
1776
1770/** 1777/**
1771 * called by device driver when adding device instance 1778 * called by device driver when adding device instance
1772 * do generic HDLC initialization 1779 * do generic HDLC initialization
@@ -1794,11 +1801,8 @@ static int hdlcdev_init(struct slgt_info *info)
1794 dev->irq = info->irq_level; 1801 dev->irq = info->irq_level;
1795 1802
1796 /* network layer callbacks and settings */ 1803 /* network layer callbacks and settings */
1797 dev->do_ioctl = hdlcdev_ioctl; 1804 dev->netdev_ops = &hdlcdev_ops;
1798 dev->open = hdlcdev_open; 1805 dev->watchdog_timeo = 10 * HZ;
1799 dev->stop = hdlcdev_close;
1800 dev->tx_timeout = hdlcdev_tx_timeout;
1801 dev->watchdog_timeo = 10*HZ;
1802 dev->tx_queue_len = 50; 1806 dev->tx_queue_len = 50;
1803 1807
1804 /* generic HDLC layer callbacks and settings */ 1808 /* generic HDLC layer callbacks and settings */
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 7b0c5b2dd26..8eb6c89a980 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -1907,10 +1907,17 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size)
1907 dev->stats.rx_bytes += size; 1907 dev->stats.rx_bytes += size;
1908 1908
1909 netif_rx(skb); 1909 netif_rx(skb);
1910
1911 dev->last_rx = jiffies;
1912} 1910}
1913 1911
1912static const struct net_device_ops hdlcdev_ops = {
1913 .ndo_open = hdlcdev_open,
1914 .ndo_stop = hdlcdev_close,
1915 .ndo_change_mtu = hdlc_change_mtu,
1916 .ndo_start_xmit = hdlc_start_xmit,
1917 .ndo_do_ioctl = hdlcdev_ioctl,
1918 .ndo_tx_timeout = hdlcdev_tx_timeout,
1919};
1920
1914/** 1921/**
1915 * called by device driver when adding device instance 1922 * called by device driver when adding device instance
1916 * do generic HDLC initialization 1923 * do generic HDLC initialization
@@ -1938,11 +1945,8 @@ static int hdlcdev_init(SLMP_INFO *info)
1938 dev->irq = info->irq_level; 1945 dev->irq = info->irq_level;
1939 1946
1940 /* network layer callbacks and settings */ 1947 /* network layer callbacks and settings */
1941 dev->do_ioctl = hdlcdev_ioctl; 1948 dev->netdev_ops = &hdlcdev_ops;
1942 dev->open = hdlcdev_open; 1949 dev->watchdog_timeo = 10 * HZ;
1943 dev->stop = hdlcdev_close;
1944 dev->tx_timeout = hdlcdev_tx_timeout;
1945 dev->watchdog_timeo = 10*HZ;
1946 dev->tx_queue_len = 50; 1950 dev->tx_queue_len = 50;
1947 1951
1948 /* generic HDLC layer callbacks and settings */ 1952 /* generic HDLC layer callbacks and settings */
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index b6fe7e7a2c2..c769ef269fb 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * cn_queue.c 2 * cn_queue.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -31,6 +31,48 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34
35/*
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
42 */
43static void cn_queue_create(struct work_struct *work)
44{
45 struct cn_queue_dev *dev;
46
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
48
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev->cn_queue);
52}
53
54/*
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
58 * creation too.
59 */
60int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61{
62 struct cn_queue_dev *pdev = cbq->pdev;
63
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
66
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
70 else
71 atomic_dec(&pdev->wq_requested);
72
73 return schedule_work(work);
74}
75
34void cn_queue_wrapper(struct work_struct *work) 76void cn_queue_wrapper(struct work_struct *work)
35{ 77{
36 struct cn_callback_entry *cbq = 78 struct cn_callback_entry *cbq =
@@ -58,14 +100,17 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
58 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 100 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
59 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 101 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
60 cbq->data.callback = callback; 102 cbq->data.callback = callback;
61 103
62 INIT_WORK(&cbq->work, &cn_queue_wrapper); 104 INIT_WORK(&cbq->work, &cn_queue_wrapper);
63 return cbq; 105 return cbq;
64} 106}
65 107
66static void cn_queue_free_callback(struct cn_callback_entry *cbq) 108static void cn_queue_free_callback(struct cn_callback_entry *cbq)
67{ 109{
68 flush_workqueue(cbq->pdev->cn_queue); 110 /* The first jobs have been sent to kevent, flush them too */
111 flush_scheduled_work();
112 if (cbq->pdev->cn_queue)
113 flush_workqueue(cbq->pdev->cn_queue);
69 114
70 kfree(cbq); 115 kfree(cbq);
71} 116}
@@ -143,14 +188,11 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
143 atomic_set(&dev->refcnt, 0); 188 atomic_set(&dev->refcnt, 0);
144 INIT_LIST_HEAD(&dev->queue_list); 189 INIT_LIST_HEAD(&dev->queue_list);
145 spin_lock_init(&dev->queue_lock); 190 spin_lock_init(&dev->queue_lock);
191 init_waitqueue_head(&dev->wq_created);
146 192
147 dev->nls = nls; 193 dev->nls = nls;
148 194
149 dev->cn_queue = create_singlethread_workqueue(dev->name); 195 INIT_WORK(&dev->wq_creation, cn_queue_create);
150 if (!dev->cn_queue) {
151 kfree(dev);
152 return NULL;
153 }
154 196
155 return dev; 197 return dev;
156} 198}
@@ -158,9 +200,25 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
158void cn_queue_free_dev(struct cn_queue_dev *dev) 200void cn_queue_free_dev(struct cn_queue_dev *dev)
159{ 201{
160 struct cn_callback_entry *cbq, *n; 202 struct cn_callback_entry *cbq, *n;
203 long timeout;
204 DEFINE_WAIT(wait);
205
206 /* Flush the first pending jobs queued on kevent */
207 flush_scheduled_work();
208
209 /* If the connector workqueue creation is still pending, wait for it */
210 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
211 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
212 timeout = schedule_timeout(HZ * 2);
213 if (!timeout && !dev->cn_queue)
214 WARN_ON(1);
215 }
216 finish_wait(&dev->wq_created, &wait);
161 217
162 flush_workqueue(dev->cn_queue); 218 if (dev->cn_queue) {
163 destroy_workqueue(dev->cn_queue); 219 flush_workqueue(dev->cn_queue);
220 destroy_workqueue(dev->cn_queue);
221 }
164 222
165 spin_lock_bh(&dev->queue_lock); 223 spin_lock_bh(&dev->queue_lock);
166 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) 224 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index bf4830082a1..fd336c5a905 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * connector.c 2 * connector.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -145,14 +145,13 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
145 __cbq->data.ddata = data; 145 __cbq->data.ddata = data;
146 __cbq->data.destruct_data = destruct_data; 146 __cbq->data.destruct_data = destruct_data;
147 147
148 if (queue_work(dev->cbdev->cn_queue, 148 if (queue_cn_work(__cbq, &__cbq->work))
149 &__cbq->work))
150 err = 0; 149 err = 0;
151 else 150 else
152 err = -EINVAL; 151 err = -EINVAL;
153 } else { 152 } else {
154 struct cn_callback_data *d; 153 struct cn_callback_data *d;
155 154
156 err = -ENOMEM; 155 err = -ENOMEM;
157 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 156 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
158 if (__new_cbq) { 157 if (__new_cbq) {
@@ -163,10 +162,12 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
163 d->destruct_data = destruct_data; 162 d->destruct_data = destruct_data;
164 d->free = __new_cbq; 163 d->free = __new_cbq;
165 164
165 __new_cbq->pdev = __cbq->pdev;
166
166 INIT_WORK(&__new_cbq->work, 167 INIT_WORK(&__new_cbq->work,
167 &cn_queue_wrapper); 168 &cn_queue_wrapper);
168 169
169 if (queue_work(dev->cbdev->cn_queue, 170 if (queue_cn_work(__new_cbq,
170 &__new_cbq->work)) 171 &__new_cbq->work))
171 err = 0; 172 err = 0;
172 else { 173 else {
@@ -237,7 +238,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
237 238
238 req = (struct cn_notify_req *)ctl->data; 239 req = (struct cn_notify_req *)ctl->data;
239 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { 240 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
240 if (id->idx >= req->first && 241 if (id->idx >= req->first &&
241 id->idx < req->first + req->range) { 242 id->idx < req->first + req->range) {
242 idx_found = 1; 243 idx_found = 1;
243 break; 244 break;
@@ -245,7 +246,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
245 } 246 }
246 247
247 for (i = 0; i < ctl->val_notify_num; ++i, ++req) { 248 for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
248 if (id->val >= req->first && 249 if (id->val >= req->first &&
249 id->val < req->first + req->range) { 250 id->val < req->first + req->range) {
250 val_found = 1; 251 val_found = 1;
251 break; 252 break;
@@ -459,7 +460,7 @@ static int __devinit cn_init(void)
459 netlink_kernel_release(dev->nls); 460 netlink_kernel_release(dev->nls);
460 return -EINVAL; 461 return -EINVAL;
461 } 462 }
462 463
463 cn_already_initialized = 1; 464 cn_already_initialized = 1;
464 465
465 err = cn_add_callback(&dev->id, "connector", &cn_callback); 466 err = cn_add_callback(&dev->id, "connector", &cn_callback);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 4dcf08b3fd8..11efd3528ce 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -701,6 +701,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 u32 stag_idx; 701 u32 stag_idx;
702 u32 wptr; 702 u32 wptr;
703 703
704 if (rdev_p->flags)
705 return -EIO;
706
704 stag_state = stag_state > 0; 707 stag_state = stag_state > 0;
705 stag_idx = (*stag) >> 8; 708 stag_idx = (*stag) >> 8;
706 709
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 656fe47bc84..9ed65b05517 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -108,6 +108,8 @@ struct cxio_rdev {
108 struct gen_pool *pbl_pool; 108 struct gen_pool *pbl_pool;
109 struct gen_pool *rqt_pool; 109 struct gen_pool *rqt_pool;
110 struct list_head entry; 110 struct list_head entry;
111 u32 flags;
112#define CXIO_ERROR_FATAL 1
111}; 113};
112 114
113static inline int cxio_num_stags(struct cxio_rdev *rdev_p) 115static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 4489c89d671..37a4fc264a0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,13 +51,15 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51 51
52static void open_rnic_dev(struct t3cdev *); 52static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 53static void close_rnic_dev(struct t3cdev *);
54static void iwch_err_handler(struct t3cdev *, u32, u32);
54 55
55struct cxgb3_client t3c_client = { 56struct cxgb3_client t3c_client = {
56 .name = "iw_cxgb3", 57 .name = "iw_cxgb3",
57 .add = open_rnic_dev, 58 .add = open_rnic_dev,
58 .remove = close_rnic_dev, 59 .remove = close_rnic_dev,
59 .handlers = t3c_handlers, 60 .handlers = t3c_handlers,
60 .redirect = iwch_ep_redirect 61 .redirect = iwch_ep_redirect,
62 .err_handler = iwch_err_handler
61}; 63};
62 64
63static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
@@ -160,6 +162,17 @@ static void close_rnic_dev(struct t3cdev *tdev)
160 mutex_unlock(&dev_mutex); 162 mutex_unlock(&dev_mutex);
161} 163}
162 164
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
166{
167 struct cxio_rdev *rdev = tdev->ulp;
168
169 if (status == OFFLOAD_STATUS_DOWN)
170 rdev->flags = CXIO_ERROR_FATAL;
171
172 return;
173
174}
175
163static int __init iwch_init_module(void) 176static int __init iwch_init_module(void)
164{ 177{
165 int err; 178 int err;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 5d139db1b77..53df9de2342 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2541,7 +2541,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic
2541{ 2541{
2542 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); 2542 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
2543 2543
2544 netif_rx_schedule(&nesvnic->napi); 2544 napi_schedule(&nesvnic->napi);
2545} 2545}
2546 2546
2547 2547
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 57a47cf7e51..f5484ad1279 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -111,7 +111,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget)
111 nes_nic_ce_handler(nesdev, nescq); 111 nes_nic_ce_handler(nesdev, nescq);
112 112
113 if (nescq->cqes_pending == 0) { 113 if (nescq->cqes_pending == 0) {
114 netif_rx_complete(napi); 114 napi_complete(napi);
115 /* clear out completed cqes and arm */ 115 /* clear out completed cqes and arm */
116 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | 116 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
117 nescq->cq_number | (nescq->cqe_allocs_pending << 16)); 117 nescq->cq_number | (nescq->cqe_allocs_pending << 16));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a1925810be3..da608273983 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -446,11 +446,11 @@ poll_more:
446 if (dev->features & NETIF_F_LRO) 446 if (dev->features & NETIF_F_LRO)
447 lro_flush_all(&priv->lro.lro_mgr); 447 lro_flush_all(&priv->lro.lro_mgr);
448 448
449 netif_rx_complete(napi); 449 napi_complete(napi);
450 if (unlikely(ib_req_notify_cq(priv->recv_cq, 450 if (unlikely(ib_req_notify_cq(priv->recv_cq,
451 IB_CQ_NEXT_COMP | 451 IB_CQ_NEXT_COMP |
452 IB_CQ_REPORT_MISSED_EVENTS)) && 452 IB_CQ_REPORT_MISSED_EVENTS)) &&
453 netif_rx_reschedule(napi)) 453 napi_reschedule(napi))
454 goto poll_more; 454 goto poll_more;
455 } 455 }
456 456
@@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
462 struct net_device *dev = dev_ptr; 462 struct net_device *dev = dev_ptr;
463 struct ipoib_dev_priv *priv = netdev_priv(dev); 463 struct ipoib_dev_priv *priv = netdev_priv(dev);
464 464
465 netif_rx_schedule(&priv->napi); 465 napi_schedule(&priv->napi);
466} 466}
467 467
468static void drain_tx_cq(struct net_device *dev) 468static void drain_tx_cq(struct net_device *dev)
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3d1318a3e68..1c5344aa57c 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -197,6 +197,17 @@ out:
197 return ERR_PTR(err); 197 return ERR_PTR(err);
198} 198}
199 199
200static const struct net_device_ops el_netdev_ops = {
201 .ndo_open = el_open,
202 .ndo_stop = el1_close,
203 .ndo_start_xmit = el_start_xmit,
204 .ndo_tx_timeout = el_timeout,
205 .ndo_set_multicast_list = set_multicast_list,
206 .ndo_change_mtu = eth_change_mtu,
207 .ndo_set_mac_address = eth_mac_addr,
208 .ndo_validate_addr = eth_validate_addr,
209};
210
200/** 211/**
201 * el1_probe1: 212 * el1_probe1:
202 * @dev: The device structure to use 213 * @dev: The device structure to use
@@ -305,12 +316,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
305 * The EL1-specific entries in the device structure. 316 * The EL1-specific entries in the device structure.
306 */ 317 */
307 318
308 dev->open = &el_open; 319 dev->netdev_ops = &el_netdev_ops;
309 dev->hard_start_xmit = &el_start_xmit;
310 dev->tx_timeout = &el_timeout;
311 dev->watchdog_timeo = HZ; 320 dev->watchdog_timeo = HZ;
312 dev->stop = &el1_close;
313 dev->set_multicast_list = &set_multicast_list;
314 dev->ethtool_ops = &netdev_ethtool_ops; 321 dev->ethtool_ops = &netdev_ethtool_ops;
315 return 0; 322 return 0;
316} 323}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index a8107f992fb..2de1c9cd7bd 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1354,6 +1354,17 @@ static int __init elp_autodetect(struct net_device *dev)
1354 return 0; /* Because of this, the layer above will return -ENODEV */ 1354 return 0; /* Because of this, the layer above will return -ENODEV */
1355} 1355}
1356 1356
1357static const struct net_device_ops elp_netdev_ops = {
1358 .ndo_open = elp_open,
1359 .ndo_stop = elp_close,
1360 .ndo_get_stats = elp_get_stats,
1361 .ndo_start_xmit = elp_start_xmit,
1362 .ndo_tx_timeout = elp_timeout,
1363 .ndo_set_multicast_list = elp_set_mc_list,
1364 .ndo_change_mtu = eth_change_mtu,
1365 .ndo_set_mac_address = eth_mac_addr,
1366 .ndo_validate_addr = eth_validate_addr,
1367};
1357 1368
1358/****************************************************** 1369/******************************************************
1359 * 1370 *
@@ -1558,13 +1569,8 @@ static int __init elplus_setup(struct net_device *dev)
1558 printk(KERN_ERR "%s: adapter configuration failed\n", dev->name); 1569 printk(KERN_ERR "%s: adapter configuration failed\n", dev->name);
1559 } 1570 }
1560 1571
1561 dev->open = elp_open; /* local */ 1572 dev->netdev_ops = &elp_netdev_ops;
1562 dev->stop = elp_close; /* local */
1563 dev->get_stats = elp_get_stats; /* local */
1564 dev->hard_start_xmit = elp_start_xmit; /* local */
1565 dev->tx_timeout = elp_timeout; /* local */
1566 dev->watchdog_timeo = 10*HZ; 1573 dev->watchdog_timeo = 10*HZ;
1567 dev->set_multicast_list = elp_set_mc_list; /* local */
1568 dev->ethtool_ops = &netdev_ethtool_ops; /* local */ 1574 dev->ethtool_ops = &netdev_ethtool_ops; /* local */
1569 1575
1570 dev->mem_start = dev->mem_end = 0; 1576 dev->mem_start = dev->mem_end = 0;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 423e65d0ba7..fbbaf826def 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -352,6 +352,16 @@ out:
352 return ERR_PTR(err); 352 return ERR_PTR(err);
353} 353}
354 354
355static const struct net_device_ops netdev_ops = {
356 .ndo_open = el16_open,
357 .ndo_stop = el16_close,
358 .ndo_start_xmit = el16_send_packet,
359 .ndo_tx_timeout = el16_tx_timeout,
360 .ndo_change_mtu = eth_change_mtu,
361 .ndo_set_mac_address = eth_mac_addr,
362 .ndo_validate_addr = eth_validate_addr,
363};
364
355static int __init el16_probe1(struct net_device *dev, int ioaddr) 365static int __init el16_probe1(struct net_device *dev, int ioaddr)
356{ 366{
357 static unsigned char init_ID_done, version_printed; 367 static unsigned char init_ID_done, version_printed;
@@ -449,10 +459,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
449 goto out1; 459 goto out1;
450 } 460 }
451 461
452 dev->open = el16_open; 462 dev->netdev_ops = &netdev_ops;
453 dev->stop = el16_close;
454 dev->hard_start_xmit = el16_send_packet;
455 dev->tx_timeout = el16_tx_timeout;
456 dev->watchdog_timeo = TX_TIMEOUT; 463 dev->watchdog_timeo = TX_TIMEOUT;
457 dev->ethtool_ops = &netdev_ethtool_ops; 464 dev->ethtool_ops = &netdev_ethtool_ops;
458 dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ 465 dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 8c694213035..fbb37192199 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -537,6 +537,21 @@ static struct mca_driver el3_mca_driver = {
537static int mca_registered; 537static int mca_registered;
538#endif /* CONFIG_MCA */ 538#endif /* CONFIG_MCA */
539 539
540static const struct net_device_ops netdev_ops = {
541 .ndo_open = el3_open,
542 .ndo_stop = el3_close,
543 .ndo_start_xmit = el3_start_xmit,
544 .ndo_get_stats = el3_get_stats,
545 .ndo_set_multicast_list = set_multicast_list,
546 .ndo_tx_timeout = el3_tx_timeout,
547 .ndo_change_mtu = eth_change_mtu,
548 .ndo_set_mac_address = eth_mac_addr,
549 .ndo_validate_addr = eth_validate_addr,
550#ifdef CONFIG_NET_POLL_CONTROLLER
551 .ndo_poll_controller = el3_poll_controller,
552#endif
553};
554
540static int __devinit el3_common_init(struct net_device *dev) 555static int __devinit el3_common_init(struct net_device *dev)
541{ 556{
542 struct el3_private *lp = netdev_priv(dev); 557 struct el3_private *lp = netdev_priv(dev);
@@ -553,16 +568,8 @@ static int __devinit el3_common_init(struct net_device *dev)
553 } 568 }
554 569
555 /* The EL3-specific entries in the device structure. */ 570 /* The EL3-specific entries in the device structure. */
556 dev->open = &el3_open; 571 dev->netdev_ops = &netdev_ops;
557 dev->hard_start_xmit = &el3_start_xmit;
558 dev->stop = &el3_close;
559 dev->get_stats = &el3_get_stats;
560 dev->set_multicast_list = &set_multicast_list;
561 dev->tx_timeout = el3_tx_timeout;
562 dev->watchdog_timeo = TX_TIMEOUT; 572 dev->watchdog_timeo = TX_TIMEOUT;
563#ifdef CONFIG_NET_POLL_CONTROLLER
564 dev->poll_controller = el3_poll_controller;
565#endif
566 SET_ETHTOOL_OPS(dev, &ethtool_ops); 573 SET_ETHTOOL_OPS(dev, &ethtool_ops);
567 574
568 err = register_netdev(dev); 575 err = register_netdev(dev);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 39ac12233aa..167bf23066e 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -563,6 +563,20 @@ no_pnp:
563 return NULL; 563 return NULL;
564} 564}
565 565
566
567static const struct net_device_ops netdev_ops = {
568 .ndo_open = corkscrew_open,
569 .ndo_stop = corkscrew_close,
570 .ndo_start_xmit = corkscrew_start_xmit,
571 .ndo_tx_timeout = corkscrew_timeout,
572 .ndo_get_stats = corkscrew_get_stats,
573 .ndo_set_multicast_list = set_rx_mode,
574 .ndo_change_mtu = eth_change_mtu,
575 .ndo_set_mac_address = eth_mac_addr,
576 .ndo_validate_addr = eth_validate_addr,
577};
578
579
566static int corkscrew_setup(struct net_device *dev, int ioaddr, 580static int corkscrew_setup(struct net_device *dev, int ioaddr,
567 struct pnp_dev *idev, int card_number) 581 struct pnp_dev *idev, int card_number)
568{ 582{
@@ -681,13 +695,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
681 vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; 695 vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
682 696
683 /* The 3c51x-specific entries in the device structure. */ 697 /* The 3c51x-specific entries in the device structure. */
684 dev->open = &corkscrew_open; 698 dev->netdev_ops = &netdev_ops;
685 dev->hard_start_xmit = &corkscrew_start_xmit;
686 dev->tx_timeout = &corkscrew_timeout;
687 dev->watchdog_timeo = (400 * HZ) / 1000; 699 dev->watchdog_timeo = (400 * HZ) / 1000;
688 dev->stop = &corkscrew_close;
689 dev->get_stats = &corkscrew_get_stats;
690 dev->set_multicast_list = &set_rx_mode;
691 dev->ethtool_ops = &netdev_ethtool_ops; 700 dev->ethtool_ops = &netdev_ethtool_ops;
692 701
693 return register_netdev(dev); 702 return register_netdev(dev);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index ff41e1ff560..8f734d74b51 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -403,6 +403,20 @@ static int elmc_getinfo(char *buf, int slot, void *d)
403 return len; 403 return len;
404} /* elmc_getinfo() */ 404} /* elmc_getinfo() */
405 405
406static const struct net_device_ops netdev_ops = {
407 .ndo_open = elmc_open,
408 .ndo_stop = elmc_close,
409 .ndo_get_stats = elmc_get_stats,
410 .ndo_start_xmit = elmc_send_packet,
411 .ndo_tx_timeout = elmc_timeout,
412#ifdef ELMC_MULTICAST
413 .ndo_set_multicast_list = set_multicast_list,
414#endif
415 .ndo_change_mtu = eth_change_mtu,
416 .ndo_set_mac_address = eth_mac_addr,
417 .ndo_validate_addr = eth_validate_addr,
418};
419
406/*****************************************************************/ 420/*****************************************************************/
407 421
408static int __init do_elmc_probe(struct net_device *dev) 422static int __init do_elmc_probe(struct net_device *dev)
@@ -544,17 +558,8 @@ static int __init do_elmc_probe(struct net_device *dev)
544 printk(KERN_INFO "%s: hardware address %pM\n", 558 printk(KERN_INFO "%s: hardware address %pM\n",
545 dev->name, dev->dev_addr); 559 dev->name, dev->dev_addr);
546 560
547 dev->open = &elmc_open; 561 dev->netdev_ops = &netdev_ops;
548 dev->stop = &elmc_close;
549 dev->get_stats = &elmc_get_stats;
550 dev->hard_start_xmit = &elmc_send_packet;
551 dev->tx_timeout = &elmc_timeout;
552 dev->watchdog_timeo = HZ; 562 dev->watchdog_timeo = HZ;
553#ifdef ELMC_MULTICAST
554 dev->set_multicast_list = &set_multicast_list;
555#else
556 dev->set_multicast_list = NULL;
557#endif
558 dev->ethtool_ops = &netdev_ethtool_ops; 563 dev->ethtool_ops = &netdev_ethtool_ops;
559 564
560 /* note that we haven't actually requested the IRQ from the kernel. 565 /* note that we haven't actually requested the IRQ from the kernel.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 2df3af3b9b2..b61073c42bf 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -288,6 +288,18 @@ struct net_device *__init mc32_probe(int unit)
288 return ERR_PTR(-ENODEV); 288 return ERR_PTR(-ENODEV);
289} 289}
290 290
291static const struct net_device_ops netdev_ops = {
292 .ndo_open = mc32_open,
293 .ndo_stop = mc32_close,
294 .ndo_start_xmit = mc32_send_packet,
295 .ndo_get_stats = mc32_get_stats,
296 .ndo_set_multicast_list = mc32_set_multicast_list,
297 .ndo_tx_timeout = mc32_timeout,
298 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr,
301};
302
291/** 303/**
292 * mc32_probe1 - Check a given slot for a board and test the card 304 * mc32_probe1 - Check a given slot for a board and test the card
293 * @dev: Device structure to fill in 305 * @dev: Device structure to fill in
@@ -518,12 +530,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
518 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", 530 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
519 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); 531 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
520 532
521 dev->open = mc32_open; 533 dev->netdev_ops = &netdev_ops;
522 dev->stop = mc32_close;
523 dev->hard_start_xmit = mc32_send_packet;
524 dev->get_stats = mc32_get_stats;
525 dev->set_multicast_list = mc32_set_multicast_list;
526 dev->tx_timeout = mc32_timeout;
527 dev->watchdog_timeo = HZ*5; /* Board does all the work */ 534 dev->watchdog_timeo = HZ*5; /* Board does all the work */
528 dev->ethtool_ops = &netdev_ethtool_ops; 535 dev->ethtool_ops = &netdev_ethtool_ops;
529 536
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index cdbbb6226fc..b2563d384cf 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -992,6 +992,42 @@ out:
992 return rc; 992 return rc;
993} 993}
994 994
995static const struct net_device_ops boomrang_netdev_ops = {
996 .ndo_open = vortex_open,
997 .ndo_stop = vortex_close,
998 .ndo_start_xmit = boomerang_start_xmit,
999 .ndo_tx_timeout = vortex_tx_timeout,
1000 .ndo_get_stats = vortex_get_stats,
1001#ifdef CONFIG_PCI
1002 .ndo_do_ioctl = vortex_ioctl,
1003#endif
1004 .ndo_set_multicast_list = set_rx_mode,
1005 .ndo_change_mtu = eth_change_mtu,
1006 .ndo_set_mac_address = eth_mac_addr,
1007 .ndo_validate_addr = eth_validate_addr,
1008#ifdef CONFIG_NET_POLL_CONTROLLER
1009 .ndo_poll_controller = poll_vortex,
1010#endif
1011};
1012
1013static const struct net_device_ops vortex_netdev_ops = {
1014 .ndo_open = vortex_open,
1015 .ndo_stop = vortex_close,
1016 .ndo_start_xmit = vortex_start_xmit,
1017 .ndo_tx_timeout = vortex_tx_timeout,
1018 .ndo_get_stats = vortex_get_stats,
1019#ifdef CONFIG_PCI
1020 .ndo_do_ioctl = vortex_ioctl,
1021#endif
1022 .ndo_set_multicast_list = set_rx_mode,
1023 .ndo_change_mtu = eth_change_mtu,
1024 .ndo_set_mac_address = eth_mac_addr,
1025 .ndo_validate_addr = eth_validate_addr,
1026#ifdef CONFIG_NET_POLL_CONTROLLER
1027 .ndo_poll_controller = poll_vortex,
1028#endif
1029};
1030
995/* 1031/*
996 * Start up the PCI/EISA device which is described by *gendev. 1032 * Start up the PCI/EISA device which is described by *gendev.
997 * Return 0 on success. 1033 * Return 0 on success.
@@ -1366,18 +1402,16 @@ static int __devinit vortex_probe1(struct device *gendev,
1366 } 1402 }
1367 1403
1368 /* The 3c59x-specific entries in the device structure. */ 1404 /* The 3c59x-specific entries in the device structure. */
1369 dev->open = vortex_open;
1370 if (vp->full_bus_master_tx) { 1405 if (vp->full_bus_master_tx) {
1371 dev->hard_start_xmit = boomerang_start_xmit; 1406 dev->netdev_ops = &boomrang_netdev_ops;
1372 /* Actually, it still should work with iommu. */ 1407 /* Actually, it still should work with iommu. */
1373 if (card_idx < MAX_UNITS && 1408 if (card_idx < MAX_UNITS &&
1374 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || 1409 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1375 hw_checksums[card_idx] == 1)) { 1410 hw_checksums[card_idx] == 1)) {
1376 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1411 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1377 } 1412 }
1378 } else { 1413 } else
1379 dev->hard_start_xmit = vortex_start_xmit; 1414 dev->netdev_ops = &vortex_netdev_ops;
1380 }
1381 1415
1382 if (print_info) { 1416 if (print_info) {
1383 printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", 1417 printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
@@ -1386,18 +1420,9 @@ static int __devinit vortex_probe1(struct device *gendev,
1386 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); 1420 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1387 } 1421 }
1388 1422
1389 dev->stop = vortex_close;
1390 dev->get_stats = vortex_get_stats;
1391#ifdef CONFIG_PCI
1392 dev->do_ioctl = vortex_ioctl;
1393#endif
1394 dev->ethtool_ops = &vortex_ethtool_ops; 1423 dev->ethtool_ops = &vortex_ethtool_ops;
1395 dev->set_multicast_list = set_rx_mode;
1396 dev->tx_timeout = vortex_tx_timeout;
1397 dev->watchdog_timeo = (watchdog * HZ) / 1000; 1424 dev->watchdog_timeo = (watchdog * HZ) / 1000;
1398#ifdef CONFIG_NET_POLL_CONTROLLER 1425
1399 dev->poll_controller = poll_vortex;
1400#endif
1401 if (pdev) { 1426 if (pdev) {
1402 vp->pm_state_valid = 1; 1427 vp->pm_state_valid = 1;
1403 pci_save_state(VORTEX_PCI(vp)); 1428 pci_save_state(VORTEX_PCI(vp));
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4e19ae3ce6b..35517b06ec3 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -604,7 +604,7 @@ rx_next:
604 604
605 spin_lock_irqsave(&cp->lock, flags); 605 spin_lock_irqsave(&cp->lock, flags);
606 cpw16_f(IntrMask, cp_intr_mask); 606 cpw16_f(IntrMask, cp_intr_mask);
607 __netif_rx_complete(napi); 607 __napi_complete(napi);
608 spin_unlock_irqrestore(&cp->lock, flags); 608 spin_unlock_irqrestore(&cp->lock, flags);
609 } 609 }
610 610
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
641 } 641 }
642 642
643 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) 643 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
644 if (netif_rx_schedule_prep(&cp->napi)) { 644 if (napi_schedule_prep(&cp->napi)) {
645 cpw16_f(IntrMask, cp_norx_intr_mask); 645 cpw16_f(IntrMask, cp_norx_intr_mask);
646 __netif_rx_schedule(&cp->napi); 646 __napi_schedule(&cp->napi);
647 } 647 }
648 648
649 if (status & (TxOK | TxErr | TxEmpty | SWInt)) 649 if (status & (TxOK | TxErr | TxEmpty | SWInt))
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a5b24202d56..5341da604e8 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2128 */ 2128 */
2129 spin_lock_irqsave(&tp->lock, flags); 2129 spin_lock_irqsave(&tp->lock, flags);
2130 RTL_W16_F(IntrMask, rtl8139_intr_mask); 2130 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2131 __netif_rx_complete(napi); 2131 __napi_complete(napi);
2132 spin_unlock_irqrestore(&tp->lock, flags); 2132 spin_unlock_irqrestore(&tp->lock, flags);
2133 } 2133 }
2134 spin_unlock(&tp->rx_lock); 2134 spin_unlock(&tp->rx_lock);
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2178 /* Receive packets are processed by poll routine. 2178 /* Receive packets are processed by poll routine.
2179 If not running start it now. */ 2179 If not running start it now. */
2180 if (status & RxAckBits){ 2180 if (status & RxAckBits){
2181 if (netif_rx_schedule_prep(&tp->napi)) { 2181 if (napi_schedule_prep(&tp->napi)) {
2182 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); 2182 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
2183 __netif_rx_schedule(&tp->napi); 2183 __napi_schedule(&tp->napi);
2184 } 2184 }
2185 } 2185 }
2186 2186
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index b273596368e..cca94b9c08a 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1122,6 +1122,17 @@ static void print_eth(unsigned char *add, char *str)
1122static int io = 0x300; 1122static int io = 0x300;
1123static int irq = 10; 1123static int irq = 10;
1124 1124
1125static const struct net_device_ops i596_netdev_ops = {
1126 .ndo_open = i596_open,
1127 .ndo_stop = i596_close,
1128 .ndo_start_xmit = i596_start_xmit,
1129 .ndo_set_multicast_list = set_multicast_list,
1130 .ndo_tx_timeout = i596_tx_timeout,
1131 .ndo_change_mtu = eth_change_mtu,
1132 .ndo_set_mac_address = eth_mac_addr,
1133 .ndo_validate_addr = eth_validate_addr,
1134};
1135
1125struct net_device * __init i82596_probe(int unit) 1136struct net_device * __init i82596_probe(int unit)
1126{ 1137{
1127 struct net_device *dev; 1138 struct net_device *dev;
@@ -1232,11 +1243,7 @@ found:
1232 DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); 1243 DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1233 1244
1234 /* The 82596-specific entries in the device structure. */ 1245 /* The 82596-specific entries in the device structure. */
1235 dev->open = i596_open; 1246 dev->netdev_ops = &i596_netdev_ops;
1236 dev->stop = i596_close;
1237 dev->hard_start_xmit = i596_start_xmit;
1238 dev->set_multicast_list = set_multicast_list;
1239 dev->tx_timeout = i596_tx_timeout;
1240 dev->watchdog_timeo = TX_TIMEOUT; 1247 dev->watchdog_timeo = TX_TIMEOUT;
1241 1248
1242 dev->ml_priv = (void *)(dev->mem_start); 1249 dev->ml_priv = (void *)(dev->mem_start);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6bdfd47d679..62bc0223a8e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1829,10 +1829,10 @@ config 68360_ENET
1829 1829
1830config FEC 1830config FEC
1831 bool "FEC ethernet controller (of ColdFire CPUs)" 1831 bool "FEC ethernet controller (of ColdFire CPUs)"
1832 depends on M523x || M527x || M5272 || M528x || M520x || M532x 1832 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27
1833 help 1833 help
1834 Say Y here if you want to use the built-in 10/100 Fast ethernet 1834 Say Y here if you want to use the built-in 10/100 Fast ethernet
1835 controller on some Motorola ColdFire processors. 1835 controller on some Motorola ColdFire and Freescale i.MX processors.
1836 1836
1837config FEC2 1837config FEC2
1838 bool "Second FEC ethernet controller (on some ColdFire CPUs)" 1838 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
@@ -2022,7 +2022,6 @@ config IGB
2022config IGB_LRO 2022config IGB_LRO
2023 bool "Use software LRO" 2023 bool "Use software LRO"
2024 depends on IGB && INET 2024 depends on IGB && INET
2025 select INET_LRO
2026 ---help--- 2025 ---help---
2027 Say Y here if you want to use large receive offload. 2026 Say Y here if you want to use large receive offload.
2028 2027
@@ -2273,9 +2272,17 @@ config GELIC_WIRELESS_OLD_PSK_INTERFACE
2273 2272
2274 If unsure, say N. 2273 If unsure, say N.
2275 2274
2275config FSL_PQ_MDIO
2276 tristate "Freescale PQ MDIO"
2277 depends on FSL_SOC
2278 select PHYLIB
2279 help
2280 This driver supports the MDIO bus used by the gianfar and UCC drivers.
2281
2276config GIANFAR 2282config GIANFAR
2277 tristate "Gianfar Ethernet" 2283 tristate "Gianfar Ethernet"
2278 depends on FSL_SOC 2284 depends on FSL_SOC
2285 select FSL_PQ_MDIO
2279 select PHYLIB 2286 select PHYLIB
2280 select CRC32 2287 select CRC32
2281 help 2288 help
@@ -2285,6 +2292,7 @@ config GIANFAR
2285config UCC_GETH 2292config UCC_GETH
2286 tristate "Freescale QE Gigabit Ethernet" 2293 tristate "Freescale QE Gigabit Ethernet"
2287 depends on QUICC_ENGINE 2294 depends on QUICC_ENGINE
2295 select FSL_PQ_MDIO
2288 select PHYLIB 2296 select PHYLIB
2289 help 2297 help
2290 This driver supports the Gigabit Ethernet mode of the QUICC Engine, 2298 This driver supports the Gigabit Ethernet mode of the QUICC Engine,
@@ -2408,7 +2416,6 @@ config CHELSIO_T3
2408 tristate "Chelsio Communications T3 10Gb Ethernet support" 2416 tristate "Chelsio Communications T3 10Gb Ethernet support"
2409 depends on CHELSIO_T3_DEPENDS 2417 depends on CHELSIO_T3_DEPENDS
2410 select FW_LOADER 2418 select FW_LOADER
2411 select INET_LRO
2412 help 2419 help
2413 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2420 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2414 adapters. 2421 adapters.
@@ -2444,7 +2451,6 @@ config ENIC
2444config IXGBE 2451config IXGBE
2445 tristate "Intel(R) 10GbE PCI Express adapters support" 2452 tristate "Intel(R) 10GbE PCI Express adapters support"
2446 depends on PCI && INET 2453 depends on PCI && INET
2447 select INET_LRO
2448 ---help--- 2454 ---help---
2449 This driver supports Intel(R) 10GbE PCI Express family of 2455 This driver supports Intel(R) 10GbE PCI Express family of
2450 adapters. For more information on how to identify your adapter, go 2456 adapters. For more information on how to identify your adapter, go
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a3c5c002f22..ad87ba72cf1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -24,11 +24,12 @@ obj-$(CONFIG_JME) += jme.o
24 24
25gianfar_driver-objs := gianfar.o \ 25gianfar_driver-objs := gianfar.o \
26 gianfar_ethtool.o \ 26 gianfar_ethtool.o \
27 gianfar_mii.o \
28 gianfar_sysfs.o 27 gianfar_sysfs.o
29 28
30obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o 29obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
31ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o ucc_geth_ethtool.o 30ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
31
32obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
32 33
33# 34#
34# link order important here 35# link order important here
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 7709992bb6b..cb9c95d3ed0 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
831 if (rx_pkt_limit > 0) { 831 if (rx_pkt_limit > 0) {
832 /* Receive descriptor is empty now */ 832 /* Receive descriptor is empty now */
833 spin_lock_irqsave(&lp->lock, flags); 833 spin_lock_irqsave(&lp->lock, flags);
834 __netif_rx_complete(napi); 834 __napi_complete(napi);
835 writel(VAL0|RINTEN0, mmio + INTEN0); 835 writel(VAL0|RINTEN0, mmio + INTEN0);
836 writel(VAL2 | RDMD0, mmio + CMD0); 836 writel(VAL2 | RDMD0, mmio + CMD0);
837 spin_unlock_irqrestore(&lp->lock, flags); 837 spin_unlock_irqrestore(&lp->lock, flags);
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1170 1170
1171 /* Check if Receive Interrupt has occurred. */ 1171 /* Check if Receive Interrupt has occurred. */
1172 if (intr0 & RINT0) { 1172 if (intr0 & RINT0) {
1173 if (netif_rx_schedule_prep(&lp->napi)) { 1173 if (napi_schedule_prep(&lp->napi)) {
1174 /* Disable receive interupts */ 1174 /* Disable receive interupts */
1175 writel(RINTEN0, mmio + INTEN0); 1175 writel(RINTEN0, mmio + INTEN0);
1176 /* Schedule a polling routine */ 1176 /* Schedule a polling routine */
1177 __netif_rx_schedule(&lp->napi); 1177 __napi_schedule(&lp->napi);
1178 } else if (intren0 & RINTEN0) { 1178 } else if (intren0 & RINTEN0) {
1179 printk("************Driver bug! \ 1179 printk("************Driver bug! \
1180 interrupt while in poll\n"); 1180 interrupt while in poll\n");
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 3ff9affb1a9..646dfc5f50c 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -102,7 +102,7 @@ static void rx(struct net_device *dev, int bufnum,
102 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 102 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
103 if (skb == NULL) { 103 if (skb == NULL) {
104 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 104 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
105 lp->stats.rx_dropped++; 105 dev->stats.rx_dropped++;
106 return; 106 return;
107 } 107 }
108 skb_put(skb, length + ARC_HDR_SIZE); 108 skb_put(skb, length + ARC_HDR_SIZE);
@@ -122,7 +122,7 @@ static void rx(struct net_device *dev, int bufnum,
122 122
123 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); 123 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
124 124
125 skb->protocol = __constant_htons(ETH_P_ARCNET); 125 skb->protocol = cpu_to_be16(ETH_P_ARCNET);
126; 126;
127 netif_rx(skb); 127 netif_rx(skb);
128} 128}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 6b53e5ed125..a80d4a30a46 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -95,17 +95,16 @@ EXPORT_SYMBOL(arcnet_unregister_proto);
95EXPORT_SYMBOL(arcnet_debug); 95EXPORT_SYMBOL(arcnet_debug);
96EXPORT_SYMBOL(alloc_arcdev); 96EXPORT_SYMBOL(alloc_arcdev);
97EXPORT_SYMBOL(arcnet_interrupt); 97EXPORT_SYMBOL(arcnet_interrupt);
98EXPORT_SYMBOL(arcnet_open);
99EXPORT_SYMBOL(arcnet_close);
100EXPORT_SYMBOL(arcnet_send_packet);
101EXPORT_SYMBOL(arcnet_timeout);
98 102
99/* Internal function prototypes */ 103/* Internal function prototypes */
100static int arcnet_open(struct net_device *dev);
101static int arcnet_close(struct net_device *dev);
102static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
103static void arcnet_timeout(struct net_device *dev);
104static int arcnet_header(struct sk_buff *skb, struct net_device *dev, 104static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
105 unsigned short type, const void *daddr, 105 unsigned short type, const void *daddr,
106 const void *saddr, unsigned len); 106 const void *saddr, unsigned len);
107static int arcnet_rebuild_header(struct sk_buff *skb); 107static int arcnet_rebuild_header(struct sk_buff *skb);
108static struct net_device_stats *arcnet_get_stats(struct net_device *dev);
109static int go_tx(struct net_device *dev); 108static int go_tx(struct net_device *dev);
110 109
111static int debug = ARCNET_DEBUG; 110static int debug = ARCNET_DEBUG;
@@ -322,11 +321,18 @@ static const struct header_ops arcnet_header_ops = {
322 .rebuild = arcnet_rebuild_header, 321 .rebuild = arcnet_rebuild_header,
323}; 322};
324 323
324static const struct net_device_ops arcnet_netdev_ops = {
325 .ndo_open = arcnet_open,
326 .ndo_stop = arcnet_close,
327 .ndo_start_xmit = arcnet_send_packet,
328 .ndo_tx_timeout = arcnet_timeout,
329};
325 330
326/* Setup a struct device for ARCnet. */ 331/* Setup a struct device for ARCnet. */
327static void arcdev_setup(struct net_device *dev) 332static void arcdev_setup(struct net_device *dev)
328{ 333{
329 dev->type = ARPHRD_ARCNET; 334 dev->type = ARPHRD_ARCNET;
335 dev->netdev_ops = &arcnet_netdev_ops;
330 dev->header_ops = &arcnet_header_ops; 336 dev->header_ops = &arcnet_header_ops;
331 dev->hard_header_len = sizeof(struct archdr); 337 dev->hard_header_len = sizeof(struct archdr);
332 dev->mtu = choose_mtu(); 338 dev->mtu = choose_mtu();
@@ -339,18 +345,9 @@ static void arcdev_setup(struct net_device *dev)
339 /* New-style flags. */ 345 /* New-style flags. */
340 dev->flags = IFF_BROADCAST; 346 dev->flags = IFF_BROADCAST;
341 347
342 /*
343 * Put in this stuff here, so we don't have to export the symbols to
344 * the chipset drivers.
345 */
346 dev->open = arcnet_open;
347 dev->stop = arcnet_close;
348 dev->hard_start_xmit = arcnet_send_packet;
349 dev->tx_timeout = arcnet_timeout;
350 dev->get_stats = arcnet_get_stats;
351} 348}
352 349
353struct net_device *alloc_arcdev(char *name) 350struct net_device *alloc_arcdev(const char *name)
354{ 351{
355 struct net_device *dev; 352 struct net_device *dev;
356 353
@@ -372,7 +369,7 @@ struct net_device *alloc_arcdev(char *name)
372 * that "should" only need to be set once at boot, so that there is 369 * that "should" only need to be set once at boot, so that there is
373 * non-reboot way to recover if something goes wrong. 370 * non-reboot way to recover if something goes wrong.
374 */ 371 */
375static int arcnet_open(struct net_device *dev) 372int arcnet_open(struct net_device *dev)
376{ 373{
377 struct arcnet_local *lp = netdev_priv(dev); 374 struct arcnet_local *lp = netdev_priv(dev);
378 int count, newmtu, error; 375 int count, newmtu, error;
@@ -472,7 +469,7 @@ static int arcnet_open(struct net_device *dev)
472 469
473 470
474/* The inverse routine to arcnet_open - shuts down the card. */ 471/* The inverse routine to arcnet_open - shuts down the card. */
475static int arcnet_close(struct net_device *dev) 472int arcnet_close(struct net_device *dev)
476{ 473{
477 struct arcnet_local *lp = netdev_priv(dev); 474 struct arcnet_local *lp = netdev_priv(dev);
478 475
@@ -583,8 +580,8 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
583 } else { 580 } else {
584 BUGMSG(D_NORMAL, 581 BUGMSG(D_NORMAL,
585 "I don't understand ethernet protocol %Xh addresses!\n", type); 582 "I don't understand ethernet protocol %Xh addresses!\n", type);
586 lp->stats.tx_errors++; 583 dev->stats.tx_errors++;
587 lp->stats.tx_aborted_errors++; 584 dev->stats.tx_aborted_errors++;
588 } 585 }
589 586
590 /* if we couldn't resolve the address... give up. */ 587 /* if we couldn't resolve the address... give up. */
@@ -601,7 +598,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
601 598
602 599
603/* Called by the kernel in order to transmit a packet. */ 600/* Called by the kernel in order to transmit a packet. */
604static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) 601int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
605{ 602{
606 struct arcnet_local *lp = netdev_priv(dev); 603 struct arcnet_local *lp = netdev_priv(dev);
607 struct archdr *pkt; 604 struct archdr *pkt;
@@ -645,7 +642,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
645 !proto->ack_tx) { 642 !proto->ack_tx) {
646 /* done right away and we don't want to acknowledge 643 /* done right away and we don't want to acknowledge
647 the package later - forget about it now */ 644 the package later - forget about it now */
648 lp->stats.tx_bytes += skb->len; 645 dev->stats.tx_bytes += skb->len;
649 freeskb = 1; 646 freeskb = 1;
650 } else { 647 } else {
651 /* do it the 'split' way */ 648 /* do it the 'split' way */
@@ -709,7 +706,7 @@ static int go_tx(struct net_device *dev)
709 /* start sending */ 706 /* start sending */
710 ACOMMAND(TXcmd | (lp->cur_tx << 3)); 707 ACOMMAND(TXcmd | (lp->cur_tx << 3));
711 708
712 lp->stats.tx_packets++; 709 dev->stats.tx_packets++;
713 lp->lasttrans_dest = lp->lastload_dest; 710 lp->lasttrans_dest = lp->lastload_dest;
714 lp->lastload_dest = 0; 711 lp->lastload_dest = 0;
715 lp->excnak_pending = 0; 712 lp->excnak_pending = 0;
@@ -720,7 +717,7 @@ static int go_tx(struct net_device *dev)
720 717
721 718
722/* Called by the kernel when transmit times out */ 719/* Called by the kernel when transmit times out */
723static void arcnet_timeout(struct net_device *dev) 720void arcnet_timeout(struct net_device *dev)
724{ 721{
725 unsigned long flags; 722 unsigned long flags;
726 struct arcnet_local *lp = netdev_priv(dev); 723 struct arcnet_local *lp = netdev_priv(dev);
@@ -732,11 +729,11 @@ static void arcnet_timeout(struct net_device *dev)
732 msg = " - missed IRQ?"; 729 msg = " - missed IRQ?";
733 } else { 730 } else {
734 msg = ""; 731 msg = "";
735 lp->stats.tx_aborted_errors++; 732 dev->stats.tx_aborted_errors++;
736 lp->timed_out = 1; 733 lp->timed_out = 1;
737 ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); 734 ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
738 } 735 }
739 lp->stats.tx_errors++; 736 dev->stats.tx_errors++;
740 737
741 /* make sure we didn't miss a TX or a EXC NAK IRQ */ 738 /* make sure we didn't miss a TX or a EXC NAK IRQ */
742 AINTMASK(0); 739 AINTMASK(0);
@@ -865,8 +862,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
865 "transmit was not acknowledged! " 862 "transmit was not acknowledged! "
866 "(status=%Xh, dest=%02Xh)\n", 863 "(status=%Xh, dest=%02Xh)\n",
867 status, lp->lasttrans_dest); 864 status, lp->lasttrans_dest);
868 lp->stats.tx_errors++; 865 dev->stats.tx_errors++;
869 lp->stats.tx_carrier_errors++; 866 dev->stats.tx_carrier_errors++;
870 } else { 867 } else {
871 BUGMSG(D_DURING, 868 BUGMSG(D_DURING,
872 "broadcast was not acknowledged; that's normal " 869 "broadcast was not acknowledged; that's normal "
@@ -905,7 +902,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
905 if (txbuf != -1) { 902 if (txbuf != -1) {
906 if (lp->outgoing.proto->continue_tx(dev, txbuf)) { 903 if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
907 /* that was the last segment */ 904 /* that was the last segment */
908 lp->stats.tx_bytes += lp->outgoing.skb->len; 905 dev->stats.tx_bytes += lp->outgoing.skb->len;
909 if(!lp->outgoing.proto->ack_tx) 906 if(!lp->outgoing.proto->ack_tx)
910 { 907 {
911 dev_kfree_skb_irq(lp->outgoing.skb); 908 dev_kfree_skb_irq(lp->outgoing.skb);
@@ -930,7 +927,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
930 } 927 }
931 if (status & lp->intmask & RECONflag) { 928 if (status & lp->intmask & RECONflag) {
932 ACOMMAND(CFLAGScmd | CONFIGclear); 929 ACOMMAND(CFLAGScmd | CONFIGclear);
933 lp->stats.tx_carrier_errors++; 930 dev->stats.tx_carrier_errors++;
934 931
935 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", 932 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
936 status); 933 status);
@@ -1038,8 +1035,8 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1038 "(%d+4 bytes)\n", 1035 "(%d+4 bytes)\n",
1039 bufnum, pkt.hard.source, pkt.hard.dest, length); 1036 bufnum, pkt.hard.source, pkt.hard.dest, length);
1040 1037
1041 lp->stats.rx_packets++; 1038 dev->stats.rx_packets++;
1042 lp->stats.rx_bytes += length + ARC_HDR_SIZE; 1039 dev->stats.rx_bytes += length + ARC_HDR_SIZE;
1043 1040
1044 /* call the right receiver for the protocol */ 1041 /* call the right receiver for the protocol */
1045 if (arc_proto_map[soft->proto]->is_ip) { 1042 if (arc_proto_map[soft->proto]->is_ip) {
@@ -1067,18 +1064,6 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1067} 1064}
1068 1065
1069 1066
1070
1071/*
1072 * Get the current statistics. This may be called with the card open or
1073 * closed.
1074 */
1075static struct net_device_stats *arcnet_get_stats(struct net_device *dev)
1076{
1077 struct arcnet_local *lp = netdev_priv(dev);
1078 return &lp->stats;
1079}
1080
1081
1082static void null_rx(struct net_device *dev, int bufnum, 1067static void null_rx(struct net_device *dev, int bufnum,
1083 struct archdr *pkthdr, int length) 1068 struct archdr *pkthdr, int length)
1084{ 1069{
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 30580bbe252..083e21094b2 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -119,7 +119,7 @@ static void rx(struct net_device *dev, int bufnum,
119 skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); 119 skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
120 if (skb == NULL) { 120 if (skb == NULL) {
121 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 121 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
122 lp->stats.rx_dropped++; 122 dev->stats.rx_dropped++;
123 return; 123 return;
124 } 124 }
125 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); 125 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
@@ -148,7 +148,7 @@ static void rx(struct net_device *dev, int bufnum,
148 148
149 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); 149 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
150 150
151 skb->protocol = __constant_htons(ETH_P_ARCNET); 151 skb->protocol = cpu_to_be16(ETH_P_ARCNET);
152; 152;
153 netif_rx(skb); 153 netif_rx(skb);
154} 154}
@@ -282,7 +282,7 @@ static int ack_tx(struct net_device *dev, int acked)
282 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", 282 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
283 *((int*)&ackpkt->soft.cap.cookie[0])); 283 *((int*)&ackpkt->soft.cap.cookie[0]));
284 284
285 ackskb->protocol = __constant_htons(ETH_P_ARCNET); 285 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
286 286
287 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); 287 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
288 netif_rx(ackskb); 288 netif_rx(ackskb);
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index ea53a940272..db08fc24047 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,6 +151,8 @@ static int __init com20020_init(void)
151 if (node && node != 0xff) 151 if (node && node != 0xff)
152 dev->dev_addr[0] = node; 152 dev->dev_addr[0] = node;
153 153
154 dev->netdev_ops = &com20020_netdev_ops;
155
154 lp = netdev_priv(dev); 156 lp = netdev_priv(dev);
155 lp->backplane = backplane; 157 lp->backplane = backplane;
156 lp->clockp = clockp & 7; 158 lp->clockp = clockp & 7;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 8b51f632581..dbf4de39754 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -72,6 +72,9 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de
72 dev = alloc_arcdev(device); 72 dev = alloc_arcdev(device);
73 if (!dev) 73 if (!dev)
74 return -ENOMEM; 74 return -ENOMEM;
75
76 dev->netdev_ops = &com20020_netdev_ops;
77
75 lp = netdev_priv(dev); 78 lp = netdev_priv(dev);
76 79
77 pci_set_drvdata(pdev, dev); 80 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 103688358fb..651275a5f3d 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -149,6 +149,14 @@ int com20020_check(struct net_device *dev)
149 return 0; 149 return 0;
150} 150}
151 151
152const struct net_device_ops com20020_netdev_ops = {
153 .ndo_open = arcnet_open,
154 .ndo_stop = arcnet_close,
155 .ndo_start_xmit = arcnet_send_packet,
156 .ndo_tx_timeout = arcnet_timeout,
157 .ndo_set_multicast_list = com20020_set_mc_list,
158};
159
152/* Set up the struct net_device associated with this card. Called after 160/* Set up the struct net_device associated with this card. Called after
153 * probing succeeds. 161 * probing succeeds.
154 */ 162 */
@@ -170,8 +178,6 @@ int com20020_found(struct net_device *dev, int shared)
170 lp->hw.copy_from_card = com20020_copy_from_card; 178 lp->hw.copy_from_card = com20020_copy_from_card;
171 lp->hw.close = com20020_close; 179 lp->hw.close = com20020_close;
172 180
173 dev->set_multicast_list = com20020_set_mc_list;
174
175 if (!dev->dev_addr[0]) 181 if (!dev->dev_addr[0])
176 dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ 182 dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
177 183
@@ -342,6 +348,7 @@ static void com20020_set_mc_list(struct net_device *dev)
342 defined(CONFIG_ARCNET_COM20020_CS_MODULE) 348 defined(CONFIG_ARCNET_COM20020_CS_MODULE)
343EXPORT_SYMBOL(com20020_check); 349EXPORT_SYMBOL(com20020_check);
344EXPORT_SYMBOL(com20020_found); 350EXPORT_SYMBOL(com20020_found);
351EXPORT_SYMBOL(com20020_netdev_ops);
345#endif 352#endif
346 353
347MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 49d39a9cb69..06f8fa2f8f2 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -88,7 +88,6 @@ MODULE_LICENSE("GPL");
88 */ 88 */
89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) 89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
90{ 90{
91 struct arcnet_local *lp = netdev_priv(dev);
92 struct archdr *pkt = (struct archdr *) skb->data; 91 struct archdr *pkt = (struct archdr *) skb->data;
93 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 92 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 93 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
@@ -112,8 +111,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
112 return htons(ETH_P_ARP); 111 return htons(ETH_P_ARP);
113 112
114 default: 113 default:
115 lp->stats.rx_errors++; 114 dev->stats.rx_errors++;
116 lp->stats.rx_crc_errors++; 115 dev->stats.rx_crc_errors++;
117 return 0; 116 return 0;
118 } 117 }
119 118
@@ -140,7 +139,7 @@ static void rx(struct net_device *dev, int bufnum,
140 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 139 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
141 if (skb == NULL) { 140 if (skb == NULL) {
142 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 141 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
143 lp->stats.rx_dropped++; 142 dev->stats.rx_dropped++;
144 return; 143 return;
145 } 144 }
146 skb_put(skb, length + ARC_HDR_SIZE); 145 skb_put(skb, length + ARC_HDR_SIZE);
@@ -168,7 +167,6 @@ static void rx(struct net_device *dev, int bufnum,
168static int build_header(struct sk_buff *skb, struct net_device *dev, 167static int build_header(struct sk_buff *skb, struct net_device *dev,
169 unsigned short type, uint8_t daddr) 168 unsigned short type, uint8_t daddr)
170{ 169{
171 struct arcnet_local *lp = netdev_priv(dev);
172 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 170 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
173 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); 171 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
174 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 172 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
@@ -184,8 +182,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
184 default: 182 default:
185 BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n", 183 BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n",
186 type, type); 184 type, type);
187 lp->stats.tx_errors++; 185 dev->stats.tx_errors++;
188 lp->stats.tx_aborted_errors++; 186 dev->stats.tx_aborted_errors++;
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 2303d3a1f4b..745530651c4 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -92,7 +92,6 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
92{ 92{
93 struct archdr *pkt = (struct archdr *) skb->data; 93 struct archdr *pkt = (struct archdr *) skb->data;
94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201; 94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
95 struct arcnet_local *lp = netdev_priv(dev);
96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; 95 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
97 96
98 /* Pull off the arcnet header. */ 97 /* Pull off the arcnet header. */
@@ -121,8 +120,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
121 case ARC_P_NOVELL_EC: 120 case ARC_P_NOVELL_EC:
122 return htons(ETH_P_802_3); 121 return htons(ETH_P_802_3);
123 default: 122 default:
124 lp->stats.rx_errors++; 123 dev->stats.rx_errors++;
125 lp->stats.rx_crc_errors++; 124 dev->stats.rx_crc_errors++;
126 return 0; 125 return 0;
127 } 126 }
128 127
@@ -172,8 +171,8 @@ static void rx(struct net_device *dev, int bufnum,
172 in->sequence, soft->split_flag, soft->sequence); 171 in->sequence, soft->split_flag, soft->sequence);
173 lp->rfc1201.aborted_seq = soft->sequence; 172 lp->rfc1201.aborted_seq = soft->sequence;
174 dev_kfree_skb_irq(in->skb); 173 dev_kfree_skb_irq(in->skb);
175 lp->stats.rx_errors++; 174 dev->stats.rx_errors++;
176 lp->stats.rx_missed_errors++; 175 dev->stats.rx_missed_errors++;
177 in->skb = NULL; 176 in->skb = NULL;
178 } 177 }
179 in->sequence = soft->sequence; 178 in->sequence = soft->sequence;
@@ -181,7 +180,7 @@ static void rx(struct net_device *dev, int bufnum,
181 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 180 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
182 if (skb == NULL) { 181 if (skb == NULL) {
183 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 182 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
184 lp->stats.rx_dropped++; 183 dev->stats.rx_dropped++;
185 return; 184 return;
186 } 185 }
187 skb_put(skb, length + ARC_HDR_SIZE); 186 skb_put(skb, length + ARC_HDR_SIZE);
@@ -213,7 +212,7 @@ static void rx(struct net_device *dev, int bufnum,
213 BUGMSG(D_EXTRA, 212 BUGMSG(D_EXTRA,
214 "ARP source address was 00h, set to %02Xh.\n", 213 "ARP source address was 00h, set to %02Xh.\n",
215 saddr); 214 saddr);
216 lp->stats.rx_crc_errors++; 215 dev->stats.rx_crc_errors++;
217 *cptr = saddr; 216 *cptr = saddr;
218 } else { 217 } else {
219 BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", 218 BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
@@ -222,8 +221,8 @@ static void rx(struct net_device *dev, int bufnum,
222 } else { 221 } else {
223 BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", 222 BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
224 arp->ar_hln, arp->ar_pln); 223 arp->ar_hln, arp->ar_pln);
225 lp->stats.rx_errors++; 224 dev->stats.rx_errors++;
226 lp->stats.rx_crc_errors++; 225 dev->stats.rx_crc_errors++;
227 } 226 }
228 } 227 }
229 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); 228 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
@@ -257,8 +256,8 @@ static void rx(struct net_device *dev, int bufnum,
257 soft->split_flag); 256 soft->split_flag);
258 dev_kfree_skb_irq(in->skb); 257 dev_kfree_skb_irq(in->skb);
259 in->skb = NULL; 258 in->skb = NULL;
260 lp->stats.rx_errors++; 259 dev->stats.rx_errors++;
261 lp->stats.rx_missed_errors++; 260 dev->stats.rx_missed_errors++;
262 in->lastpacket = in->numpackets = 0; 261 in->lastpacket = in->numpackets = 0;
263 } 262 }
264 if (soft->split_flag & 1) { /* first packet in split */ 263 if (soft->split_flag & 1) { /* first packet in split */
@@ -269,8 +268,8 @@ static void rx(struct net_device *dev, int bufnum,
269 "(splitflag=%d, seq=%d)\n", 268 "(splitflag=%d, seq=%d)\n",
270 in->sequence, soft->split_flag, 269 in->sequence, soft->split_flag,
271 soft->sequence); 270 soft->sequence);
272 lp->stats.rx_errors++; 271 dev->stats.rx_errors++;
273 lp->stats.rx_missed_errors++; 272 dev->stats.rx_missed_errors++;
274 dev_kfree_skb_irq(in->skb); 273 dev_kfree_skb_irq(in->skb);
275 } 274 }
276 in->sequence = soft->sequence; 275 in->sequence = soft->sequence;
@@ -281,8 +280,8 @@ static void rx(struct net_device *dev, int bufnum,
281 BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", 280 BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
282 soft->split_flag); 281 soft->split_flag);
283 lp->rfc1201.aborted_seq = soft->sequence; 282 lp->rfc1201.aborted_seq = soft->sequence;
284 lp->stats.rx_errors++; 283 dev->stats.rx_errors++;
285 lp->stats.rx_length_errors++; 284 dev->stats.rx_length_errors++;
286 return; 285 return;
287 } 286 }
288 in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, 287 in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
@@ -290,7 +289,7 @@ static void rx(struct net_device *dev, int bufnum,
290 if (skb == NULL) { 289 if (skb == NULL) {
291 BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); 290 BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
292 lp->rfc1201.aborted_seq = soft->sequence; 291 lp->rfc1201.aborted_seq = soft->sequence;
293 lp->stats.rx_dropped++; 292 dev->stats.rx_dropped++;
294 return; 293 return;
295 } 294 }
296 skb->dev = dev; 295 skb->dev = dev;
@@ -314,8 +313,8 @@ static void rx(struct net_device *dev, int bufnum,
314 "first! (splitflag=%d, seq=%d, aborted=%d)\n", 313 "first! (splitflag=%d, seq=%d, aborted=%d)\n",
315 soft->split_flag, soft->sequence, 314 soft->split_flag, soft->sequence,
316 lp->rfc1201.aborted_seq); 315 lp->rfc1201.aborted_seq);
317 lp->stats.rx_errors++; 316 dev->stats.rx_errors++;
318 lp->stats.rx_missed_errors++; 317 dev->stats.rx_missed_errors++;
319 } 318 }
320 return; 319 return;
321 } 320 }
@@ -325,8 +324,8 @@ static void rx(struct net_device *dev, int bufnum,
325 if (packetnum <= in->lastpacket - 1) { 324 if (packetnum <= in->lastpacket - 1) {
326 BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n", 325 BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n",
327 soft->split_flag); 326 soft->split_flag);
328 lp->stats.rx_errors++; 327 dev->stats.rx_errors++;
329 lp->stats.rx_frame_errors++; 328 dev->stats.rx_frame_errors++;
330 return; 329 return;
331 } 330 }
332 /* "bad" duplicate, kill reassembly */ 331 /* "bad" duplicate, kill reassembly */
@@ -336,8 +335,8 @@ static void rx(struct net_device *dev, int bufnum,
336 lp->rfc1201.aborted_seq = soft->sequence; 335 lp->rfc1201.aborted_seq = soft->sequence;
337 dev_kfree_skb_irq(in->skb); 336 dev_kfree_skb_irq(in->skb);
338 in->skb = NULL; 337 in->skb = NULL;
339 lp->stats.rx_errors++; 338 dev->stats.rx_errors++;
340 lp->stats.rx_missed_errors++; 339 dev->stats.rx_missed_errors++;
341 in->lastpacket = in->numpackets = 0; 340 in->lastpacket = in->numpackets = 0;
342 return; 341 return;
343 } 342 }
@@ -404,8 +403,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
404 default: 403 default:
405 BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n", 404 BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n",
406 type, type); 405 type, type);
407 lp->stats.tx_errors++; 406 dev->stats.tx_errors++;
408 lp->stats.tx_aborted_errors++; 407 dev->stats.tx_aborted_errors++;
409 return 0; 408 return 0;
410 } 409 }
411 410
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 3ec20cc18b0..cc7708775da 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -298,7 +298,7 @@ poll_some_more:
298 int more = 0; 298 int more = 0;
299 299
300 spin_lock_irq(&ep->rx_lock); 300 spin_lock_irq(&ep->rx_lock);
301 __netif_rx_complete(napi); 301 __napi_complete(napi);
302 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 302 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
303 if (ep93xx_have_more_rx(ep)) { 303 if (ep93xx_have_more_rx(ep)) {
304 wrl(ep, REG_INTEN, REG_INTEN_TX); 304 wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -307,7 +307,7 @@ poll_some_more:
307 } 307 }
308 spin_unlock_irq(&ep->rx_lock); 308 spin_unlock_irq(&ep->rx_lock);
309 309
310 if (more && netif_rx_reschedule(napi)) 310 if (more && napi_reschedule(napi))
311 goto poll_some_more; 311 goto poll_some_more;
312 } 312 }
313 313
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
415 415
416 if (status & REG_INTSTS_RX) { 416 if (status & REG_INTSTS_RX) {
417 spin_lock(&ep->rx_lock); 417 spin_lock(&ep->rx_lock);
418 if (likely(netif_rx_schedule_prep(&ep->napi))) { 418 if (likely(napi_schedule_prep(&ep->napi))) {
419 wrl(ep, REG_INTEN, REG_INTEN_TX); 419 wrl(ep, REG_INTEN, REG_INTEN_TX);
420 __netif_rx_schedule(&ep->napi); 420 __napi_schedule(&ep->napi);
421 } 421 }
422 spin_unlock(&ep->rx_lock); 422 spin_unlock(&ep->rx_lock);
423 } 423 }
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 5fce1d5c1a1..5fe17d5eaa5 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -473,7 +473,7 @@ static void eth_rx_irq(void *pdev)
473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
474#endif 474#endif
475 qmgr_disable_irq(port->plat->rxq); 475 qmgr_disable_irq(port->plat->rxq);
476 netif_rx_schedule(&port->napi); 476 napi_schedule(&port->napi);
477} 477}
478 478
479static int eth_poll(struct napi_struct *napi, int budget) 479static int eth_poll(struct napi_struct *napi, int budget)
@@ -498,16 +498,16 @@ static int eth_poll(struct napi_struct *napi, int budget)
498 498
499 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 499 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
500#if DEBUG_RX 500#if DEBUG_RX
501 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", 501 printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
502 dev->name); 502 dev->name);
503#endif 503#endif
504 netif_rx_complete(napi); 504 napi_complete(napi);
505 qmgr_enable_irq(rxq); 505 qmgr_enable_irq(rxq);
506 if (!qmgr_stat_empty(rxq) && 506 if (!qmgr_stat_empty(rxq) &&
507 netif_rx_reschedule(napi)) { 507 napi_reschedule(napi)) {
508#if DEBUG_RX 508#if DEBUG_RX
509 printk(KERN_DEBUG "%s: eth_poll" 509 printk(KERN_DEBUG "%s: eth_poll"
510 " netif_rx_reschedule successed\n", 510 " napi_reschedule successed\n",
511 dev->name); 511 dev->name);
512#endif 512#endif
513 qmgr_disable_irq(rxq); 513 qmgr_disable_irq(rxq);
@@ -1036,7 +1036,7 @@ static int eth_open(struct net_device *dev)
1036 } 1036 }
1037 ports_open++; 1037 ports_open++;
1038 /* we may already have RX data, enables IRQ */ 1038 /* we may already have RX data, enables IRQ */
1039 netif_rx_schedule(&port->napi); 1039 napi_schedule(&port->napi);
1040 return 0; 1040 return 0;
1041} 1041}
1042 1042
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 1cf2f949c0b..b39210cf4fb 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1059,7 +1059,7 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1059{ 1059{
1060 strlcpy(info->driver, MODULENAME, sizeof(info->driver)); 1060 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1061 strlcpy(info->version, MODULEVERSION, sizeof(info->version)); 1061 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1062 strlcpy(info->bus_info, ndev->dev.parent->bus_id, 1062 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1063 sizeof(info->bus_info)); 1063 sizeof(info->bus_info));
1064} 1064}
1065 1065
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index bb9094d4cbc..c758884728a 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1326 AT_WRITE_REG(hw, REG_IMR, 1326 AT_WRITE_REG(hw, REG_IMR,
1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT); 1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1328 AT_WRITE_FLUSH(hw); 1328 AT_WRITE_FLUSH(hw);
1329 if (likely(netif_rx_schedule_prep( 1329 if (likely(napi_schedule_prep(
1330 &adapter->napi))) 1330 &adapter->napi)))
1331 __netif_rx_schedule(&adapter->napi); 1331 __napi_schedule(&adapter->napi);
1332 } 1332 }
1333 } while (--max_ints > 0); 1333 } while (--max_ints > 0);
1334 /* re-enable Interrupt*/ 1334 /* re-enable Interrupt*/
@@ -1514,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
1514 /* If no Tx and not enough Rx work done, exit the polling mode */ 1514 /* If no Tx and not enough Rx work done, exit the polling mode */
1515 if (work_done < budget) { 1515 if (work_done < budget) {
1516quit_polling: 1516quit_polling:
1517 netif_rx_complete(napi); 1517 napi_complete(napi);
1518 imr_data = AT_READ_REG(&adapter->hw, REG_IMR); 1518 imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1519 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); 1519 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1520 /* test debug */ 1520 /* test debug */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 9c875bb3f76..4274e4ac963 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -81,24 +81,6 @@ MODULE_AUTHOR(DRV_AUTHOR);
81MODULE_DESCRIPTION(DRV_DESC); 81MODULE_DESCRIPTION(DRV_DESC);
82MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
83 83
84// prototypes
85static void hard_stop(struct net_device *);
86static void enable_rx_tx(struct net_device *dev);
87static struct net_device * au1000_probe(int port_num);
88static int au1000_init(struct net_device *);
89static int au1000_open(struct net_device *);
90static int au1000_close(struct net_device *);
91static int au1000_tx(struct sk_buff *, struct net_device *);
92static int au1000_rx(struct net_device *);
93static irqreturn_t au1000_interrupt(int, void *);
94static void au1000_tx_timeout(struct net_device *);
95static void set_rx_mode(struct net_device *);
96static int au1000_ioctl(struct net_device *, struct ifreq *, int);
97static int au1000_mdio_read(struct net_device *, int, int);
98static void au1000_mdio_write(struct net_device *, int, int, u16);
99static void au1000_adjust_link(struct net_device *);
100static void enable_mac(struct net_device *, int);
101
102/* 84/*
103 * Theory of operation 85 * Theory of operation
104 * 86 *
@@ -188,6 +170,26 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
188# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet 170# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
189#endif 171#endif
190 172
173static void enable_mac(struct net_device *dev, int force_reset)
174{
175 unsigned long flags;
176 struct au1000_private *aup = netdev_priv(dev);
177
178 spin_lock_irqsave(&aup->lock, flags);
179
180 if(force_reset || (!aup->mac_enabled)) {
181 *aup->enable = MAC_EN_CLOCK_ENABLE;
182 au_sync_delay(2);
183 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
184 | MAC_EN_CLOCK_ENABLE);
185 au_sync_delay(2);
186
187 aup->mac_enabled = 1;
188 }
189
190 spin_unlock_irqrestore(&aup->lock, flags);
191}
192
191/* 193/*
192 * MII operations 194 * MII operations
193 */ 195 */
@@ -281,6 +283,107 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
281 return 0; 283 return 0;
282} 284}
283 285
286static void hard_stop(struct net_device *dev)
287{
288 struct au1000_private *aup = netdev_priv(dev);
289
290 if (au1000_debug > 4)
291 printk(KERN_INFO "%s: hard stop\n", dev->name);
292
293 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
294 au_sync_delay(10);
295}
296
297static void enable_rx_tx(struct net_device *dev)
298{
299 struct au1000_private *aup = netdev_priv(dev);
300
301 if (au1000_debug > 4)
302 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
303
304 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
305 au_sync_delay(10);
306}
307
308static void
309au1000_adjust_link(struct net_device *dev)
310{
311 struct au1000_private *aup = netdev_priv(dev);
312 struct phy_device *phydev = aup->phy_dev;
313 unsigned long flags;
314
315 int status_change = 0;
316
317 BUG_ON(!aup->phy_dev);
318
319 spin_lock_irqsave(&aup->lock, flags);
320
321 if (phydev->link && (aup->old_speed != phydev->speed)) {
322 // speed changed
323
324 switch(phydev->speed) {
325 case SPEED_10:
326 case SPEED_100:
327 break;
328 default:
329 printk(KERN_WARNING
330 "%s: Speed (%d) is not 10/100 ???\n",
331 dev->name, phydev->speed);
332 break;
333 }
334
335 aup->old_speed = phydev->speed;
336
337 status_change = 1;
338 }
339
340 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
341 // duplex mode changed
342
343 /* switching duplex mode requires to disable rx and tx! */
344 hard_stop(dev);
345
346 if (DUPLEX_FULL == phydev->duplex)
347 aup->mac->control = ((aup->mac->control
348 | MAC_FULL_DUPLEX)
349 & ~MAC_DISABLE_RX_OWN);
350 else
351 aup->mac->control = ((aup->mac->control
352 & ~MAC_FULL_DUPLEX)
353 | MAC_DISABLE_RX_OWN);
354 au_sync_delay(1);
355
356 enable_rx_tx(dev);
357 aup->old_duplex = phydev->duplex;
358
359 status_change = 1;
360 }
361
362 if(phydev->link != aup->old_link) {
363 // link state changed
364
365 if (!phydev->link) {
366 /* link went down */
367 aup->old_speed = 0;
368 aup->old_duplex = -1;
369 }
370
371 aup->old_link = phydev->link;
372 status_change = 1;
373 }
374
375 spin_unlock_irqrestore(&aup->lock, flags);
376
377 if (status_change) {
378 if (phydev->link)
379 printk(KERN_INFO "%s: link up (%d/%s)\n",
380 dev->name, phydev->speed,
381 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
382 else
383 printk(KERN_INFO "%s: link down\n", dev->name);
384 }
385}
386
284static int mii_probe (struct net_device *dev) 387static int mii_probe (struct net_device *dev)
285{ 388{
286 struct au1000_private *const aup = netdev_priv(dev); 389 struct au1000_private *const aup = netdev_priv(dev);
@@ -355,8 +458,8 @@ static int mii_probe (struct net_device *dev)
355 /* now we are supposed to have a proper phydev, to attach to... */ 458 /* now we are supposed to have a proper phydev, to attach to... */
356 BUG_ON(phydev->attached_dev); 459 BUG_ON(phydev->attached_dev);
357 460
358 phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0, 461 phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
359 PHY_INTERFACE_MODE_MII); 462 0, PHY_INTERFACE_MODE_MII);
360 463
361 if (IS_ERR(phydev)) { 464 if (IS_ERR(phydev)) {
362 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 465 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -381,8 +484,8 @@ static int mii_probe (struct net_device *dev)
381 aup->phy_dev = phydev; 484 aup->phy_dev = phydev;
382 485
383 printk(KERN_INFO "%s: attached PHY driver [%s] " 486 printk(KERN_INFO "%s: attached PHY driver [%s] "
384 "(mii_bus:phy_addr=%s, irq=%d)\n", 487 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
385 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 488 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
386 489
387 return 0; 490 return 0;
388} 491}
@@ -412,48 +515,6 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
412 aup->pDBfree = pDB; 515 aup->pDBfree = pDB;
413} 516}
414 517
415static void enable_rx_tx(struct net_device *dev)
416{
417 struct au1000_private *aup = netdev_priv(dev);
418
419 if (au1000_debug > 4)
420 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
421
422 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
423 au_sync_delay(10);
424}
425
426static void hard_stop(struct net_device *dev)
427{
428 struct au1000_private *aup = netdev_priv(dev);
429
430 if (au1000_debug > 4)
431 printk(KERN_INFO "%s: hard stop\n", dev->name);
432
433 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
434 au_sync_delay(10);
435}
436
437static void enable_mac(struct net_device *dev, int force_reset)
438{
439 unsigned long flags;
440 struct au1000_private *aup = netdev_priv(dev);
441
442 spin_lock_irqsave(&aup->lock, flags);
443
444 if(force_reset || (!aup->mac_enabled)) {
445 *aup->enable = MAC_EN_CLOCK_ENABLE;
446 au_sync_delay(2);
447 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
448 | MAC_EN_CLOCK_ENABLE);
449 au_sync_delay(2);
450
451 aup->mac_enabled = 1;
452 }
453
454 spin_unlock_irqrestore(&aup->lock, flags);
455}
456
457static void reset_mac_unlocked(struct net_device *dev) 518static void reset_mac_unlocked(struct net_device *dev)
458{ 519{
459 struct au1000_private *const aup = netdev_priv(dev); 520 struct au1000_private *const aup = netdev_priv(dev);
@@ -542,30 +603,6 @@ static struct {
542static int num_ifs; 603static int num_ifs;
543 604
544/* 605/*
545 * Setup the base address and interrupt of the Au1xxx ethernet macs
546 * based on cpu type and whether the interface is enabled in sys_pinfunc
547 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
548 */
549static int __init au1000_init_module(void)
550{
551 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
552 struct net_device *dev;
553 int i, found_one = 0;
554
555 num_ifs = NUM_ETH_INTERFACES - ni;
556
557 for(i = 0; i < num_ifs; i++) {
558 dev = au1000_probe(i);
559 iflist[i].dev = dev;
560 if (dev)
561 found_one++;
562 }
563 if (!found_one)
564 return -ENODEV;
565 return 0;
566}
567
568/*
569 * ethtool operations 606 * ethtool operations
570 */ 607 */
571 608
@@ -611,199 +648,6 @@ static const struct ethtool_ops au1000_ethtool_ops = {
611 .get_link = ethtool_op_get_link, 648 .get_link = ethtool_op_get_link,
612}; 649};
613 650
614static struct net_device * au1000_probe(int port_num)
615{
616 static unsigned version_printed = 0;
617 struct au1000_private *aup = NULL;
618 struct net_device *dev = NULL;
619 db_dest_t *pDB, *pDBfree;
620 char ethaddr[6];
621 int irq, i, err;
622 u32 base, macen;
623
624 if (port_num >= NUM_ETH_INTERFACES)
625 return NULL;
626
627 base = CPHYSADDR(iflist[port_num].base_addr );
628 macen = CPHYSADDR(iflist[port_num].macen_addr);
629 irq = iflist[port_num].irq;
630
631 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
632 !request_mem_region(macen, 4, "Au1x00 ENET"))
633 return NULL;
634
635 if (version_printed++ == 0)
636 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
637
638 dev = alloc_etherdev(sizeof(struct au1000_private));
639 if (!dev) {
640 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
641 return NULL;
642 }
643
644 if ((err = register_netdev(dev)) != 0) {
645 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
646 DRV_NAME, err);
647 free_netdev(dev);
648 return NULL;
649 }
650
651 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
652 dev->name, base, irq);
653
654 aup = netdev_priv(dev);
655
656 spin_lock_init(&aup->lock);
657
658 /* Allocate the data buffers */
659 /* Snooping works fine with eth on all au1xxx */
660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
661 (NUM_TX_BUFFS + NUM_RX_BUFFS),
662 &aup->dma_addr, 0);
663 if (!aup->vaddr) {
664 free_netdev(dev);
665 release_mem_region( base, MAC_IOSIZE);
666 release_mem_region(macen, 4);
667 return NULL;
668 }
669
670 /* aup->mac is the base address of the MAC's registers */
671 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
672
673 /* Setup some variables for quick register address access */
674 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
675 aup->mac_id = port_num;
676 au_macs[port_num] = aup;
677
678 if (port_num == 0) {
679 if (prom_get_ethernet_addr(ethaddr) == 0)
680 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
681 else {
682 printk(KERN_INFO "%s: No MAC address found\n",
683 dev->name);
684 /* Use the hard coded MAC addresses */
685 }
686
687 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
688 } else if (port_num == 1)
689 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
690
691 /*
692 * Assign to the Ethernet ports two consecutive MAC addresses
693 * to match those that are printed on their stickers
694 */
695 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
696 dev->dev_addr[5] += port_num;
697
698 *aup->enable = 0;
699 aup->mac_enabled = 0;
700
701 aup->mii_bus = mdiobus_alloc();
702 if (aup->mii_bus == NULL)
703 goto err_out;
704
705 aup->mii_bus->priv = dev;
706 aup->mii_bus->read = au1000_mdiobus_read;
707 aup->mii_bus->write = au1000_mdiobus_write;
708 aup->mii_bus->reset = au1000_mdiobus_reset;
709 aup->mii_bus->name = "au1000_eth_mii";
710 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
711 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
712 for(i = 0; i < PHY_MAX_ADDR; ++i)
713 aup->mii_bus->irq[i] = PHY_POLL;
714
715 /* if known, set corresponding PHY IRQs */
716#if defined(AU1XXX_PHY_STATIC_CONFIG)
717# if defined(AU1XXX_PHY0_IRQ)
718 if (AU1XXX_PHY0_BUSID == aup->mac_id)
719 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
720# endif
721# if defined(AU1XXX_PHY1_IRQ)
722 if (AU1XXX_PHY1_BUSID == aup->mac_id)
723 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
724# endif
725#endif
726 mdiobus_register(aup->mii_bus);
727
728 if (mii_probe(dev) != 0) {
729 goto err_out;
730 }
731
732 pDBfree = NULL;
733 /* setup the data buffer descriptors and attach a buffer to each one */
734 pDB = aup->db;
735 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
736 pDB->pnext = pDBfree;
737 pDBfree = pDB;
738 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
739 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
740 pDB++;
741 }
742 aup->pDBfree = pDBfree;
743
744 for (i = 0; i < NUM_RX_DMA; i++) {
745 pDB = GetFreeDB(aup);
746 if (!pDB) {
747 goto err_out;
748 }
749 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
750 aup->rx_db_inuse[i] = pDB;
751 }
752 for (i = 0; i < NUM_TX_DMA; i++) {
753 pDB = GetFreeDB(aup);
754 if (!pDB) {
755 goto err_out;
756 }
757 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
758 aup->tx_dma_ring[i]->len = 0;
759 aup->tx_db_inuse[i] = pDB;
760 }
761
762 dev->base_addr = base;
763 dev->irq = irq;
764 dev->open = au1000_open;
765 dev->hard_start_xmit = au1000_tx;
766 dev->stop = au1000_close;
767 dev->set_multicast_list = &set_rx_mode;
768 dev->do_ioctl = &au1000_ioctl;
769 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
770 dev->tx_timeout = au1000_tx_timeout;
771 dev->watchdog_timeo = ETH_TX_TIMEOUT;
772
773 /*
774 * The boot code uses the ethernet controller, so reset it to start
775 * fresh. au1000_init() expects that the device is in reset state.
776 */
777 reset_mac(dev);
778
779 return dev;
780
781err_out:
782 if (aup->mii_bus != NULL) {
783 mdiobus_unregister(aup->mii_bus);
784 mdiobus_free(aup->mii_bus);
785 }
786
787 /* here we should have a valid dev plus aup-> register addresses
788 * so we can reset the mac properly.*/
789 reset_mac(dev);
790
791 for (i = 0; i < NUM_RX_DMA; i++) {
792 if (aup->rx_db_inuse[i])
793 ReleaseDB(aup, aup->rx_db_inuse[i]);
794 }
795 for (i = 0; i < NUM_TX_DMA; i++) {
796 if (aup->tx_db_inuse[i])
797 ReleaseDB(aup, aup->tx_db_inuse[i]);
798 }
799 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
800 (void *)aup->vaddr, aup->dma_addr);
801 unregister_netdev(dev);
802 free_netdev(dev);
803 release_mem_region( base, MAC_IOSIZE);
804 release_mem_region(macen, 4);
805 return NULL;
806}
807 651
808/* 652/*
809 * Initialize the interface. 653 * Initialize the interface.
@@ -864,83 +708,170 @@ static int au1000_init(struct net_device *dev)
864 return 0; 708 return 0;
865} 709}
866 710
867static void 711static inline void update_rx_stats(struct net_device *dev, u32 status)
868au1000_adjust_link(struct net_device *dev)
869{ 712{
870 struct au1000_private *aup = netdev_priv(dev); 713 struct au1000_private *aup = netdev_priv(dev);
871 struct phy_device *phydev = aup->phy_dev; 714 struct net_device_stats *ps = &dev->stats;
872 unsigned long flags;
873 715
874 int status_change = 0; 716 ps->rx_packets++;
717 if (status & RX_MCAST_FRAME)
718 ps->multicast++;
875 719
876 BUG_ON(!aup->phy_dev); 720 if (status & RX_ERROR) {
721 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
725 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++;
728 if (status & RX_COLL)
729 ps->collisions++;
730 }
731 else
732 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
877 733
878 spin_lock_irqsave(&aup->lock, flags); 734}
879 735
880 if (phydev->link && (aup->old_speed != phydev->speed)) { 736/*
881 // speed changed 737 * Au1000 receive routine.
738 */
739static int au1000_rx(struct net_device *dev)
740{
741 struct au1000_private *aup = netdev_priv(dev);
742 struct sk_buff *skb;
743 volatile rx_dma_t *prxd;
744 u32 buff_stat, status;
745 db_dest_t *pDB;
746 u32 frmlen;
882 747
883 switch(phydev->speed) { 748 if (au1000_debug > 5)
884 case SPEED_10: 749 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
885 case SPEED_100:
886 break;
887 default:
888 printk(KERN_WARNING
889 "%s: Speed (%d) is not 10/100 ???\n",
890 dev->name, phydev->speed);
891 break;
892 }
893 750
894 aup->old_speed = phydev->speed; 751 prxd = aup->rx_dma_ring[aup->rx_head];
752 buff_stat = prxd->buff_stat;
753 while (buff_stat & RX_T_DONE) {
754 status = prxd->status;
755 pDB = aup->rx_db_inuse[aup->rx_head];
756 update_rx_stats(dev, status);
757 if (!(status & RX_ERROR)) {
895 758
896 status_change = 1; 759 /* good frame */
760 frmlen = (status & RX_FRAME_LEN_MASK);
761 frmlen -= 4; /* Remove FCS */
762 skb = dev_alloc_skb(frmlen + 2);
763 if (skb == NULL) {
764 printk(KERN_ERR
765 "%s: Memory squeeze, dropping packet.\n",
766 dev->name);
767 dev->stats.rx_dropped++;
768 continue;
769 }
770 skb_reserve(skb, 2); /* 16 byte IP header align */
771 skb_copy_to_linear_data(skb,
772 (unsigned char *)pDB->vaddr, frmlen);
773 skb_put(skb, frmlen);
774 skb->protocol = eth_type_trans(skb, dev);
775 netif_rx(skb); /* pass the packet to upper layers */
776 }
777 else {
778 if (au1000_debug > 4) {
779 if (status & RX_MISSED_FRAME)
780 printk("rx miss\n");
781 if (status & RX_WDOG_TIMER)
782 printk("rx wdog\n");
783 if (status & RX_RUNT)
784 printk("rx runt\n");
785 if (status & RX_OVERLEN)
786 printk("rx overlen\n");
787 if (status & RX_COLL)
788 printk("rx coll\n");
789 if (status & RX_MII_ERROR)
790 printk("rx mii error\n");
791 if (status & RX_CRC_ERROR)
792 printk("rx crc error\n");
793 if (status & RX_LEN_ERROR)
794 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 }
800 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
802 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
803 au_sync();
804
805 /* next descriptor */
806 prxd = aup->rx_dma_ring[aup->rx_head];
807 buff_stat = prxd->buff_stat;
897 } 808 }
809 return 0;
810}
898 811
899 if (phydev->link && (aup->old_duplex != phydev->duplex)) { 812static void update_tx_stats(struct net_device *dev, u32 status)
900 // duplex mode changed 813{
814 struct au1000_private *aup = netdev_priv(dev);
815 struct net_device_stats *ps = &dev->stats;
901 816
902 /* switching duplex mode requires to disable rx and tx! */ 817 if (status & TX_FRAME_ABORTED) {
903 hard_stop(dev); 818 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
819 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
820 /* any other tx errors are only valid
821 * in half duplex mode */
822 ps->tx_errors++;
823 ps->tx_aborted_errors++;
824 }
825 }
826 else {
827 ps->tx_errors++;
828 ps->tx_aborted_errors++;
829 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
830 ps->tx_carrier_errors++;
831 }
832 }
833}
904 834
905 if (DUPLEX_FULL == phydev->duplex) 835/*
906 aup->mac->control = ((aup->mac->control 836 * Called from the interrupt service routine to acknowledge
907 | MAC_FULL_DUPLEX) 837 * the TX DONE bits. This is a must if the irq is setup as
908 & ~MAC_DISABLE_RX_OWN); 838 * edge triggered.
909 else 839 */
910 aup->mac->control = ((aup->mac->control 840static void au1000_tx_ack(struct net_device *dev)
911 & ~MAC_FULL_DUPLEX) 841{
912 | MAC_DISABLE_RX_OWN); 842 struct au1000_private *aup = netdev_priv(dev);
913 au_sync_delay(1); 843 volatile tx_dma_t *ptxd;
914 844
915 enable_rx_tx(dev); 845 ptxd = aup->tx_dma_ring[aup->tx_tail];
916 aup->old_duplex = phydev->duplex;
917 846
918 status_change = 1; 847 while (ptxd->buff_stat & TX_T_DONE) {
919 } 848 update_tx_stats(dev, ptxd->status);
849 ptxd->buff_stat &= ~TX_T_DONE;
850 ptxd->len = 0;
851 au_sync();
920 852
921 if(phydev->link != aup->old_link) { 853 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
922 // link state changed 854 ptxd = aup->tx_dma_ring[aup->tx_tail];
923 855
924 if (!phydev->link) { 856 if (aup->tx_full) {
925 /* link went down */ 857 aup->tx_full = 0;
926 aup->old_speed = 0; 858 netif_wake_queue(dev);
927 aup->old_duplex = -1;
928 } 859 }
929
930 aup->old_link = phydev->link;
931 status_change = 1;
932 } 860 }
861}
933 862
934 spin_unlock_irqrestore(&aup->lock, flags); 863/*
864 * Au1000 interrupt service routine.
865 */
866static irqreturn_t au1000_interrupt(int irq, void *dev_id)
867{
868 struct net_device *dev = dev_id;
935 869
936 if (status_change) { 870 /* Handle RX interrupts first to minimize chance of overrun */
937 if (phydev->link) 871
938 printk(KERN_INFO "%s: link up (%d/%s)\n", 872 au1000_rx(dev);
939 dev->name, phydev->speed, 873 au1000_tx_ack(dev);
940 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 874 return IRQ_RETVAL(1);
941 else
942 printk(KERN_INFO "%s: link down\n", dev->name);
943 }
944} 875}
945 876
946static int au1000_open(struct net_device *dev) 877static int au1000_open(struct net_device *dev)
@@ -1003,88 +934,6 @@ static int au1000_close(struct net_device *dev)
1003 return 0; 934 return 0;
1004} 935}
1005 936
1006static void __exit au1000_cleanup_module(void)
1007{
1008 int i, j;
1009 struct net_device *dev;
1010 struct au1000_private *aup;
1011
1012 for (i = 0; i < num_ifs; i++) {
1013 dev = iflist[i].dev;
1014 if (dev) {
1015 aup = netdev_priv(dev);
1016 unregister_netdev(dev);
1017 mdiobus_unregister(aup->mii_bus);
1018 mdiobus_free(aup->mii_bus);
1019 for (j = 0; j < NUM_RX_DMA; j++)
1020 if (aup->rx_db_inuse[j])
1021 ReleaseDB(aup, aup->rx_db_inuse[j]);
1022 for (j = 0; j < NUM_TX_DMA; j++)
1023 if (aup->tx_db_inuse[j])
1024 ReleaseDB(aup, aup->tx_db_inuse[j]);
1025 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1026 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1027 (void *)aup->vaddr, aup->dma_addr);
1028 release_mem_region(dev->base_addr, MAC_IOSIZE);
1029 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1030 free_netdev(dev);
1031 }
1032 }
1033}
1034
1035static void update_tx_stats(struct net_device *dev, u32 status)
1036{
1037 struct au1000_private *aup = netdev_priv(dev);
1038 struct net_device_stats *ps = &dev->stats;
1039
1040 if (status & TX_FRAME_ABORTED) {
1041 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
1042 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1043 /* any other tx errors are only valid
1044 * in half duplex mode */
1045 ps->tx_errors++;
1046 ps->tx_aborted_errors++;
1047 }
1048 }
1049 else {
1050 ps->tx_errors++;
1051 ps->tx_aborted_errors++;
1052 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1053 ps->tx_carrier_errors++;
1054 }
1055 }
1056}
1057
1058
1059/*
1060 * Called from the interrupt service routine to acknowledge
1061 * the TX DONE bits. This is a must if the irq is setup as
1062 * edge triggered.
1063 */
1064static void au1000_tx_ack(struct net_device *dev)
1065{
1066 struct au1000_private *aup = netdev_priv(dev);
1067 volatile tx_dma_t *ptxd;
1068
1069 ptxd = aup->tx_dma_ring[aup->tx_tail];
1070
1071 while (ptxd->buff_stat & TX_T_DONE) {
1072 update_tx_stats(dev, ptxd->status);
1073 ptxd->buff_stat &= ~TX_T_DONE;
1074 ptxd->len = 0;
1075 au_sync();
1076
1077 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1078 ptxd = aup->tx_dma_ring[aup->tx_tail];
1079
1080 if (aup->tx_full) {
1081 aup->tx_full = 0;
1082 netif_wake_queue(dev);
1083 }
1084 }
1085}
1086
1087
1088/* 937/*
1089 * Au1000 transmit routine. 938 * Au1000 transmit routine.
1090 */ 939 */
@@ -1142,123 +991,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1142 return 0; 991 return 0;
1143} 992}
1144 993
1145static inline void update_rx_stats(struct net_device *dev, u32 status)
1146{
1147 struct au1000_private *aup = netdev_priv(dev);
1148 struct net_device_stats *ps = &dev->stats;
1149
1150 ps->rx_packets++;
1151 if (status & RX_MCAST_FRAME)
1152 ps->multicast++;
1153
1154 if (status & RX_ERROR) {
1155 ps->rx_errors++;
1156 if (status & RX_MISSED_FRAME)
1157 ps->rx_missed_errors++;
1158 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1159 ps->rx_length_errors++;
1160 if (status & RX_CRC_ERROR)
1161 ps->rx_crc_errors++;
1162 if (status & RX_COLL)
1163 ps->collisions++;
1164 }
1165 else
1166 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1167
1168}
1169
1170/*
1171 * Au1000 receive routine.
1172 */
1173static int au1000_rx(struct net_device *dev)
1174{
1175 struct au1000_private *aup = netdev_priv(dev);
1176 struct sk_buff *skb;
1177 volatile rx_dma_t *prxd;
1178 u32 buff_stat, status;
1179 db_dest_t *pDB;
1180 u32 frmlen;
1181
1182 if (au1000_debug > 5)
1183 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1184
1185 prxd = aup->rx_dma_ring[aup->rx_head];
1186 buff_stat = prxd->buff_stat;
1187 while (buff_stat & RX_T_DONE) {
1188 status = prxd->status;
1189 pDB = aup->rx_db_inuse[aup->rx_head];
1190 update_rx_stats(dev, status);
1191 if (!(status & RX_ERROR)) {
1192
1193 /* good frame */
1194 frmlen = (status & RX_FRAME_LEN_MASK);
1195 frmlen -= 4; /* Remove FCS */
1196 skb = dev_alloc_skb(frmlen + 2);
1197 if (skb == NULL) {
1198 printk(KERN_ERR
1199 "%s: Memory squeeze, dropping packet.\n",
1200 dev->name);
1201 dev->stats.rx_dropped++;
1202 continue;
1203 }
1204 skb_reserve(skb, 2); /* 16 byte IP header align */
1205 skb_copy_to_linear_data(skb,
1206 (unsigned char *)pDB->vaddr, frmlen);
1207 skb_put(skb, frmlen);
1208 skb->protocol = eth_type_trans(skb, dev);
1209 netif_rx(skb); /* pass the packet to upper layers */
1210 }
1211 else {
1212 if (au1000_debug > 4) {
1213 if (status & RX_MISSED_FRAME)
1214 printk("rx miss\n");
1215 if (status & RX_WDOG_TIMER)
1216 printk("rx wdog\n");
1217 if (status & RX_RUNT)
1218 printk("rx runt\n");
1219 if (status & RX_OVERLEN)
1220 printk("rx overlen\n");
1221 if (status & RX_COLL)
1222 printk("rx coll\n");
1223 if (status & RX_MII_ERROR)
1224 printk("rx mii error\n");
1225 if (status & RX_CRC_ERROR)
1226 printk("rx crc error\n");
1227 if (status & RX_LEN_ERROR)
1228 printk("rx len error\n");
1229 if (status & RX_U_CNTRL_FRAME)
1230 printk("rx u control frame\n");
1231 if (status & RX_MISSED_FRAME)
1232 printk("rx miss\n");
1233 }
1234 }
1235 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
1236 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
1237 au_sync();
1238
1239 /* next descriptor */
1240 prxd = aup->rx_dma_ring[aup->rx_head];
1241 buff_stat = prxd->buff_stat;
1242 }
1243 return 0;
1244}
1245
1246
1247/*
1248 * Au1000 interrupt service routine.
1249 */
1250static irqreturn_t au1000_interrupt(int irq, void *dev_id)
1251{
1252 struct net_device *dev = dev_id;
1253
1254 /* Handle RX interrupts first to minimize chance of overrun */
1255
1256 au1000_rx(dev);
1257 au1000_tx_ack(dev);
1258 return IRQ_RETVAL(1);
1259}
1260
1261
1262/* 994/*
1263 * The Tx ring has been full longer than the watchdog timeout 995 * The Tx ring has been full longer than the watchdog timeout
1264 * value. The transmitter must be hung? 996 * value. The transmitter must be hung?
@@ -1315,5 +1047,252 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1315 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
1316} 1048}
1317 1049
1050static struct net_device * au1000_probe(int port_num)
1051{
1052 static unsigned version_printed = 0;
1053 struct au1000_private *aup = NULL;
1054 struct net_device *dev = NULL;
1055 db_dest_t *pDB, *pDBfree;
1056 char ethaddr[6];
1057 int irq, i, err;
1058 u32 base, macen;
1059
1060 if (port_num >= NUM_ETH_INTERFACES)
1061 return NULL;
1062
1063 base = CPHYSADDR(iflist[port_num].base_addr );
1064 macen = CPHYSADDR(iflist[port_num].macen_addr);
1065 irq = iflist[port_num].irq;
1066
1067 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
1068 !request_mem_region(macen, 4, "Au1x00 ENET"))
1069 return NULL;
1070
1071 if (version_printed++ == 0)
1072 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1073
1074 dev = alloc_etherdev(sizeof(struct au1000_private));
1075 if (!dev) {
1076 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
1077 return NULL;
1078 }
1079
1080 if ((err = register_netdev(dev)) != 0) {
1081 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1082 DRV_NAME, err);
1083 free_netdev(dev);
1084 return NULL;
1085 }
1086
1087 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1088 dev->name, base, irq);
1089
1090 aup = netdev_priv(dev);
1091
1092 spin_lock_init(&aup->lock);
1093
1094 /* Allocate the data buffers */
1095 /* Snooping works fine with eth on all au1xxx */
1096 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1097 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1098 &aup->dma_addr, 0);
1099 if (!aup->vaddr) {
1100 free_netdev(dev);
1101 release_mem_region( base, MAC_IOSIZE);
1102 release_mem_region(macen, 4);
1103 return NULL;
1104 }
1105
1106 /* aup->mac is the base address of the MAC's registers */
1107 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
1108
1109 /* Setup some variables for quick register address access */
1110 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
1111 aup->mac_id = port_num;
1112 au_macs[port_num] = aup;
1113
1114 if (port_num == 0) {
1115 if (prom_get_ethernet_addr(ethaddr) == 0)
1116 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1117 else {
1118 printk(KERN_INFO "%s: No MAC address found\n",
1119 dev->name);
1120 /* Use the hard coded MAC addresses */
1121 }
1122
1123 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1124 } else if (port_num == 1)
1125 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1126
1127 /*
1128 * Assign to the Ethernet ports two consecutive MAC addresses
1129 * to match those that are printed on their stickers
1130 */
1131 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1132 dev->dev_addr[5] += port_num;
1133
1134 *aup->enable = 0;
1135 aup->mac_enabled = 0;
1136
1137 aup->mii_bus = mdiobus_alloc();
1138 if (aup->mii_bus == NULL)
1139 goto err_out;
1140
1141 aup->mii_bus->priv = dev;
1142 aup->mii_bus->read = au1000_mdiobus_read;
1143 aup->mii_bus->write = au1000_mdiobus_write;
1144 aup->mii_bus->reset = au1000_mdiobus_reset;
1145 aup->mii_bus->name = "au1000_eth_mii";
1146 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
1147 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1148 for(i = 0; i < PHY_MAX_ADDR; ++i)
1149 aup->mii_bus->irq[i] = PHY_POLL;
1150
1151 /* if known, set corresponding PHY IRQs */
1152#if defined(AU1XXX_PHY_STATIC_CONFIG)
1153# if defined(AU1XXX_PHY0_IRQ)
1154 if (AU1XXX_PHY0_BUSID == aup->mac_id)
1155 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
1156# endif
1157# if defined(AU1XXX_PHY1_IRQ)
1158 if (AU1XXX_PHY1_BUSID == aup->mac_id)
1159 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
1160# endif
1161#endif
1162 mdiobus_register(aup->mii_bus);
1163
1164 if (mii_probe(dev) != 0) {
1165 goto err_out;
1166 }
1167
1168 pDBfree = NULL;
1169 /* setup the data buffer descriptors and attach a buffer to each one */
1170 pDB = aup->db;
1171 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1172 pDB->pnext = pDBfree;
1173 pDBfree = pDB;
1174 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1175 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1176 pDB++;
1177 }
1178 aup->pDBfree = pDBfree;
1179
1180 for (i = 0; i < NUM_RX_DMA; i++) {
1181 pDB = GetFreeDB(aup);
1182 if (!pDB) {
1183 goto err_out;
1184 }
1185 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1186 aup->rx_db_inuse[i] = pDB;
1187 }
1188 for (i = 0; i < NUM_TX_DMA; i++) {
1189 pDB = GetFreeDB(aup);
1190 if (!pDB) {
1191 goto err_out;
1192 }
1193 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1194 aup->tx_dma_ring[i]->len = 0;
1195 aup->tx_db_inuse[i] = pDB;
1196 }
1197
1198 dev->base_addr = base;
1199 dev->irq = irq;
1200 dev->open = au1000_open;
1201 dev->hard_start_xmit = au1000_tx;
1202 dev->stop = au1000_close;
1203 dev->set_multicast_list = &set_rx_mode;
1204 dev->do_ioctl = &au1000_ioctl;
1205 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1206 dev->tx_timeout = au1000_tx_timeout;
1207 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1208
1209 /*
1210 * The boot code uses the ethernet controller, so reset it to start
1211 * fresh. au1000_init() expects that the device is in reset state.
1212 */
1213 reset_mac(dev);
1214
1215 return dev;
1216
1217err_out:
1218 if (aup->mii_bus != NULL) {
1219 mdiobus_unregister(aup->mii_bus);
1220 mdiobus_free(aup->mii_bus);
1221 }
1222
1223 /* here we should have a valid dev plus aup-> register addresses
1224 * so we can reset the mac properly.*/
1225 reset_mac(dev);
1226
1227 for (i = 0; i < NUM_RX_DMA; i++) {
1228 if (aup->rx_db_inuse[i])
1229 ReleaseDB(aup, aup->rx_db_inuse[i]);
1230 }
1231 for (i = 0; i < NUM_TX_DMA; i++) {
1232 if (aup->tx_db_inuse[i])
1233 ReleaseDB(aup, aup->tx_db_inuse[i]);
1234 }
1235 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1236 (void *)aup->vaddr, aup->dma_addr);
1237 unregister_netdev(dev);
1238 free_netdev(dev);
1239 release_mem_region( base, MAC_IOSIZE);
1240 release_mem_region(macen, 4);
1241 return NULL;
1242}
1243
1244/*
1245 * Setup the base address and interrupt of the Au1xxx ethernet macs
1246 * based on cpu type and whether the interface is enabled in sys_pinfunc
1247 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1248 */
1249static int __init au1000_init_module(void)
1250{
1251 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1252 struct net_device *dev;
1253 int i, found_one = 0;
1254
1255 num_ifs = NUM_ETH_INTERFACES - ni;
1256
1257 for(i = 0; i < num_ifs; i++) {
1258 dev = au1000_probe(i);
1259 iflist[i].dev = dev;
1260 if (dev)
1261 found_one++;
1262 }
1263 if (!found_one)
1264 return -ENODEV;
1265 return 0;
1266}
1267
1268static void __exit au1000_cleanup_module(void)
1269{
1270 int i, j;
1271 struct net_device *dev;
1272 struct au1000_private *aup;
1273
1274 for (i = 0; i < num_ifs; i++) {
1275 dev = iflist[i].dev;
1276 if (dev) {
1277 aup = netdev_priv(dev);
1278 unregister_netdev(dev);
1279 mdiobus_unregister(aup->mii_bus);
1280 mdiobus_free(aup->mii_bus);
1281 for (j = 0; j < NUM_RX_DMA; j++)
1282 if (aup->rx_db_inuse[j])
1283 ReleaseDB(aup, aup->rx_db_inuse[j]);
1284 for (j = 0; j < NUM_TX_DMA; j++)
1285 if (aup->tx_db_inuse[j])
1286 ReleaseDB(aup, aup->tx_db_inuse[j]);
1287 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1288 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1289 (void *)aup->vaddr, aup->dma_addr);
1290 release_mem_region(dev->base_addr, MAC_IOSIZE);
1291 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1292 free_netdev(dev);
1293 }
1294 }
1295}
1296
1318module_init(au1000_init_module); 1297module_init(au1000_init_module);
1319module_exit(au1000_cleanup_module); 1298module_exit(au1000_cleanup_module);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c38512ebcea..2a51c757997 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -874,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
874 } 874 }
875 875
876 if (work_done < budget) { 876 if (work_done < budget) {
877 netif_rx_complete(napi); 877 napi_complete(napi);
878 b44_enable_ints(bp); 878 b44_enable_ints(bp);
879 } 879 }
880 880
@@ -906,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
906 goto irq_ack; 906 goto irq_ack;
907 } 907 }
908 908
909 if (netif_rx_schedule_prep(&bp->napi)) { 909 if (napi_schedule_prep(&bp->napi)) {
910 /* NOTE: These writes are posted by the readback of 910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below. 911 * the ISTAT register below.
912 */ 912 */
913 bp->istat = istat; 913 bp->istat = istat;
914 __b44_disable_ints(bp); 914 __b44_disable_ints(bp);
915 __netif_rx_schedule(&bp->napi); 915 __napi_schedule(&bp->napi);
916 } else { 916 } else {
917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", 917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
918 dev->name); 918 dev->name);
@@ -973,7 +973,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
973 ssb_dma_unmap_single(bp->sdev, mapping, len, 973 ssb_dma_unmap_single(bp->sdev, mapping, len,
974 DMA_TO_DEVICE); 974 DMA_TO_DEVICE);
975 975
976 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); 976 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
977 if (!bounce_skb) 977 if (!bounce_skb)
978 goto err_out; 978 goto err_out;
979 979
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 78e31aa861e..9afe8092dfc 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -415,11 +415,11 @@ static int mii_probe(struct net_device *dev)
415 } 415 }
416 416
417#if defined(CONFIG_BFIN_MAC_RMII) 417#if defined(CONFIG_BFIN_MAC_RMII)
418 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, 418 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
419 PHY_INTERFACE_MODE_RMII); 419 0, PHY_INTERFACE_MODE_RMII);
420#else 420#else
421 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, 421 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
422 PHY_INTERFACE_MODE_MII); 422 0, PHY_INTERFACE_MODE_MII);
423#endif 423#endif
424 424
425 if (IS_ERR(phydev)) { 425 if (IS_ERR(phydev)) {
@@ -447,7 +447,7 @@ static int mii_probe(struct net_device *dev)
447 printk(KERN_INFO "%s: attached PHY driver [%s] " 447 printk(KERN_INFO "%s: attached PHY driver [%s] "
448 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 448 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
449 "@sclk=%dMHz)\n", 449 "@sclk=%dMHz)\n",
450 DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq, 450 DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
451 MDC_CLK, mdc_div, sclk/1000000); 451 MDC_CLK, mdc_div, sclk/1000000);
452 452
453 return 0; 453 return 0;
@@ -488,7 +488,7 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
488 strcpy(info->driver, DRV_NAME); 488 strcpy(info->driver, DRV_NAME);
489 strcpy(info->version, DRV_VERSION); 489 strcpy(info->version, DRV_VERSION);
490 strcpy(info->fw_version, "N/A"); 490 strcpy(info->fw_version, "N/A");
491 strcpy(info->bus_info, dev->dev.bus_id); 491 strcpy(info->bus_info, dev_name(&dev->dev));
492} 492}
493 493
494static struct ethtool_ops bfin_mac_ethtool_ops = { 494static struct ethtool_ops bfin_mac_ethtool_ops = {
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 8a546a33d58..1ab58375d06 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1240,7 +1240,7 @@ static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
1240{ 1240{
1241 struct bmac_data *bp = netdev_priv(dev); 1241 struct bmac_data *bp = netdev_priv(dev);
1242 strcpy(info->driver, "bmac"); 1242 strcpy(info->driver, "bmac");
1243 strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); 1243 strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1244} 1244}
1245 1245
1246static const struct ethtool_ops bmac_ethtool_ops = { 1246static const struct ethtool_ops bmac_ethtool_ops = {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 6500b7c4739..8466d351a70 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1497,6 +1497,8 @@ static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497 1497
1498static int 1498static int
1499bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) 1499bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500__releases(&bp->phy_lock)
1501__acquires(&bp->phy_lock)
1500{ 1502{
1501 u32 speed_arg = 0, pause_adv; 1503 u32 speed_arg = 0, pause_adv;
1502 1504
@@ -1554,6 +1556,8 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1554 1556
1555static int 1557static int
1556bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) 1558bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559__releases(&bp->phy_lock)
1560__acquires(&bp->phy_lock)
1557{ 1561{
1558 u32 adv, bmcr; 1562 u32 adv, bmcr;
1559 u32 new_adv = 0; 1563 u32 new_adv = 0;
@@ -1866,6 +1870,8 @@ bnx2_set_remote_link(struct bnx2 *bp)
1866 1870
1867static int 1871static int
1868bnx2_setup_copper_phy(struct bnx2 *bp) 1872bnx2_setup_copper_phy(struct bnx2 *bp)
1873__releases(&bp->phy_lock)
1874__acquires(&bp->phy_lock)
1869{ 1875{
1870 u32 bmcr; 1876 u32 bmcr;
1871 u32 new_bmcr; 1877 u32 new_bmcr;
@@ -1963,6 +1969,8 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
1963 1969
1964static int 1970static int
1965bnx2_setup_phy(struct bnx2 *bp, u8 port) 1971bnx2_setup_phy(struct bnx2 *bp, u8 port)
1972__releases(&bp->phy_lock)
1973__acquires(&bp->phy_lock)
1966{ 1974{
1967 if (bp->loopback == MAC_LOOPBACK) 1975 if (bp->loopback == MAC_LOOPBACK)
1968 return 0; 1976 return 0;
@@ -2176,6 +2184,8 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2176 2184
2177static int 2185static int
2178bnx2_init_phy(struct bnx2 *bp, int reset_phy) 2186bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2187__releases(&bp->phy_lock)
2188__acquires(&bp->phy_lock)
2179{ 2189{
2180 u32 val; 2190 u32 val;
2181 int rc = 0; 2191 int rc = 0;
@@ -3005,6 +3015,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3005 skb->ip_summed = CHECKSUM_UNNECESSARY; 3015 skb->ip_summed = CHECKSUM_UNNECESSARY;
3006 } 3016 }
3007 3017
3018 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3019
3008#ifdef BCM_VLAN 3020#ifdef BCM_VLAN
3009 if (hw_vlan) 3021 if (hw_vlan)
3010 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 3022 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
@@ -3061,7 +3073,7 @@ bnx2_msi(int irq, void *dev_instance)
3061 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3073 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3062 return IRQ_HANDLED; 3074 return IRQ_HANDLED;
3063 3075
3064 netif_rx_schedule(&bnapi->napi); 3076 napi_schedule(&bnapi->napi);
3065 3077
3066 return IRQ_HANDLED; 3078 return IRQ_HANDLED;
3067} 3079}
@@ -3078,7 +3090,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
3078 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3090 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3079 return IRQ_HANDLED; 3091 return IRQ_HANDLED;
3080 3092
3081 netif_rx_schedule(&bnapi->napi); 3093 napi_schedule(&bnapi->napi);
3082 3094
3083 return IRQ_HANDLED; 3095 return IRQ_HANDLED;
3084} 3096}
@@ -3114,9 +3126,9 @@ bnx2_interrupt(int irq, void *dev_instance)
3114 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3126 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3115 return IRQ_HANDLED; 3127 return IRQ_HANDLED;
3116 3128
3117 if (netif_rx_schedule_prep(&bnapi->napi)) { 3129 if (napi_schedule_prep(&bnapi->napi)) {
3118 bnapi->last_status_idx = sblk->status_idx; 3130 bnapi->last_status_idx = sblk->status_idx;
3119 __netif_rx_schedule(&bnapi->napi); 3131 __napi_schedule(&bnapi->napi);
3120 } 3132 }
3121 3133
3122 return IRQ_HANDLED; 3134 return IRQ_HANDLED;
@@ -3226,7 +3238,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3226 rmb(); 3238 rmb();
3227 if (likely(!bnx2_has_fast_work(bnapi))) { 3239 if (likely(!bnx2_has_fast_work(bnapi))) {
3228 3240
3229 netif_rx_complete(napi); 3241 napi_complete(napi);
3230 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3242 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3231 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3243 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3232 bnapi->last_status_idx); 3244 bnapi->last_status_idx);
@@ -3259,7 +3271,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3259 3271
3260 rmb(); 3272 rmb();
3261 if (likely(!bnx2_has_work(bnapi))) { 3273 if (likely(!bnx2_has_work(bnapi))) {
3262 netif_rx_complete(napi); 3274 napi_complete(napi);
3263 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { 3275 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3264 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3276 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3265 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3277 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index d3e7775a9cc..88da14c141f 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1325,6 +1325,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 1325
1326 skb->protocol = eth_type_trans(skb, bp->dev); 1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY; 1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328 skb_record_rx_queue(skb, queue);
1328 1329
1329 { 1330 {
1330 struct iphdr *iph; 1331 struct iphdr *iph;
@@ -1654,7 +1655,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1654 prefetch(&fp->status_blk->c_status_block.status_block_index); 1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 prefetch(&fp->status_blk->u_status_block.status_block_index); 1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1656 1657
1657 netif_rx_schedule(&bnx2x_fp(bp, index, napi)); 1658 napi_schedule(&bnx2x_fp(bp, index, napi));
1658 1659
1659 return IRQ_HANDLED; 1660 return IRQ_HANDLED;
1660} 1661}
@@ -1693,7 +1694,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1693 prefetch(&fp->status_blk->c_status_block.status_block_index); 1694 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 prefetch(&fp->status_blk->u_status_block.status_block_index); 1695 prefetch(&fp->status_blk->u_status_block.status_block_index);
1695 1696
1696 netif_rx_schedule(&bnx2x_fp(bp, 0, napi)); 1697 napi_schedule(&bnx2x_fp(bp, 0, napi));
1697 1698
1698 status &= ~mask; 1699 status &= ~mask;
1699 } 1700 }
@@ -9374,7 +9375,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9374#ifdef BNX2X_STOP_ON_ERROR 9375#ifdef BNX2X_STOP_ON_ERROR
9375poll_panic: 9376poll_panic:
9376#endif 9377#endif
9377 netif_rx_complete(napi); 9378 napi_complete(napi);
9378 9379
9379 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 9380 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9380 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 9381 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 8a83eb283c2..a306230381c 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -29,7 +29,7 @@
29 29
30// General definitions 30// General definitions
31#define BOND_ETH_P_LACPDU 0x8809 31#define BOND_ETH_P_LACPDU 0x8809
32#define PKT_TYPE_LACPDU __constant_htons(BOND_ETH_P_LACPDU) 32#define PKT_TYPE_LACPDU cpu_to_be16(BOND_ETH_P_LACPDU)
33#define AD_TIMER_INTERVAL 100 /*msec*/ 33#define AD_TIMER_INTERVAL 100 /*msec*/
34 34
35#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} 35#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 27fb7f5c21c..409b1407427 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond)
822 _unlock_rx_hashtbl(bond); 822 _unlock_rx_hashtbl(bond);
823 823
824 /*initialize packet type*/ 824 /*initialize packet type*/
825 pk_type->type = __constant_htons(ETH_P_ARP); 825 pk_type->type = cpu_to_be16(ETH_P_ARP);
826 pk_type->dev = NULL; 826 pk_type->dev = NULL;
827 pk_type->func = rlb_arp_recv; 827 pk_type->func = rlb_arp_recv;
828 828
@@ -892,7 +892,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
892 memset(&pkt, 0, size); 892 memset(&pkt, 0, size);
893 memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); 893 memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
894 memcpy(pkt.mac_src, mac_addr, ETH_ALEN); 894 memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
895 pkt.type = __constant_htons(ETH_P_LOOP); 895 pkt.type = cpu_to_be16(ETH_P_LOOP);
896 896
897 for (i = 0; i < MAX_LP_BURST; i++) { 897 for (i = 0; i < MAX_LP_BURST; i++) {
898 struct sk_buff *skb; 898 struct sk_buff *skb;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9fb388388fb..21bce2c0fde 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3369,7 +3369,7 @@ static int bond_info_seq_show(struct seq_file *seq, void *v)
3369 return 0; 3369 return 0;
3370} 3370}
3371 3371
3372static struct seq_operations bond_info_seq_ops = { 3372static const struct seq_operations bond_info_seq_ops = {
3373 .start = bond_info_seq_start, 3373 .start = bond_info_seq_start,
3374 .next = bond_info_seq_next, 3374 .next = bond_info_seq_next,
3375 .stop = bond_info_seq_stop, 3375 .stop = bond_info_seq_stop,
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index bbbc3bb08aa..0effefa1b88 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2507#ifdef USE_NAPI 2507#ifdef USE_NAPI
2508 cas_mask_intr(cp); 2508 cas_mask_intr(cp);
2509 netif_rx_schedule(&cp->napi); 2509 napi_schedule(&cp->napi);
2510#else 2510#else
2511 cas_rx_ringN(cp, ring, 0); 2511 cas_rx_ringN(cp, ring, 0);
2512#endif 2512#endif
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2558#ifdef USE_NAPI 2558#ifdef USE_NAPI
2559 cas_mask_intr(cp); 2559 cas_mask_intr(cp);
2560 netif_rx_schedule(&cp->napi); 2560 napi_schedule(&cp->napi);
2561#else 2561#else
2562 cas_rx_ringN(cp, 1, 0); 2562 cas_rx_ringN(cp, 1, 0);
2563#endif 2563#endif
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
2613 if (status & INTR_RX_DONE) { 2613 if (status & INTR_RX_DONE) {
2614#ifdef USE_NAPI 2614#ifdef USE_NAPI
2615 cas_mask_intr(cp); 2615 cas_mask_intr(cp);
2616 netif_rx_schedule(&cp->napi); 2616 napi_schedule(&cp->napi);
2617#else 2617#else
2618 cas_rx_ringN(cp, 0, 0); 2618 cas_rx_ringN(cp, 0, 0);
2619#endif 2619#endif
@@ -2691,7 +2691,7 @@ rx_comp:
2691#endif 2691#endif
2692 spin_unlock_irqrestore(&cp->lock, flags); 2692 spin_unlock_irqrestore(&cp->lock, flags);
2693 if (enable_intr) { 2693 if (enable_intr) {
2694 netif_rx_complete(napi); 2694 napi_complete(napi);
2695 cas_unmask_intr(cp); 2695 cas_unmask_intr(cp);
2696 } 2696 }
2697 return credits; 2697 return credits;
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d984b799576..840da83fb3c 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1612,7 +1612,7 @@ int t1_poll(struct napi_struct *napi, int budget)
1612 int work_done = process_responses(adapter, budget); 1612 int work_done = process_responses(adapter, budget);
1613 1613
1614 if (likely(work_done < budget)) { 1614 if (likely(work_done < budget)) {
1615 netif_rx_complete(napi); 1615 napi_complete(napi);
1616 writel(adapter->sge->respQ.cidx, 1616 writel(adapter->sge->respQ.cidx,
1617 adapter->regs + A_SG_SLEEPING); 1617 adapter->regs + A_SG_SLEEPING);
1618 } 1618 }
@@ -1630,7 +1630,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
1630 1630
1631 if (napi_schedule_prep(&adapter->napi)) { 1631 if (napi_schedule_prep(&adapter->napi)) {
1632 if (process_pure_responses(adapter)) 1632 if (process_pure_responses(adapter))
1633 __netif_rx_schedule(&adapter->napi); 1633 __napi_schedule(&adapter->napi);
1634 else { 1634 else {
1635 /* no data, no NAPI needed */ 1635 /* no data, no NAPI needed */
1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index f66548751c3..3f476c7c073 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
428 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 428 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
429 priv->dev->name); 429 priv->dev->name);
430 spin_unlock(&priv->rx_lock); 430 spin_unlock(&priv->rx_lock);
431 netif_rx_complete(napi); 431 napi_complete(napi);
432 return 0; 432 return 0;
433 } 433 }
434 434
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
514 if (processed == 0) { 514 if (processed == 0) {
515 /* we ran out of packets to read, 515 /* we ran out of packets to read,
516 * revert to interrupt-driven mode */ 516 * revert to interrupt-driven mode */
517 netif_rx_complete(napi); 517 napi_complete(napi);
518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
519 return 0; 519 return 0;
520 } 520 }
@@ -536,7 +536,7 @@ fatal_error:
536 } 536 }
537 537
538 spin_unlock(&priv->rx_lock); 538 spin_unlock(&priv->rx_lock);
539 netif_rx_complete(napi); 539 napi_complete(napi);
540 netif_tx_stop_all_queues(priv->dev); 540 netif_tx_stop_all_queues(priv->dev);
541 napi_disable(&priv->napi); 541 napi_disable(&priv->napi);
542 542
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
802 802
803 if (status & MAC_INT_RX) { 803 if (status & MAC_INT_RX) {
804 queue = (status >> 8) & 7; 804 queue = (status >> 8) & 7;
805 if (netif_rx_schedule_prep(&priv->napi)) { 805 if (napi_schedule_prep(&priv->napi)) {
806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
807 __netif_rx_schedule(&priv->napi); 807 __napi_schedule(&priv->napi);
808 } 808 }
809 } 809 }
810 810
@@ -1161,7 +1161,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1161 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1161 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1162 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1162 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
1163 1163
1164 priv->phy = phy_connect(dev, cpmac_mii->phy_map[phy_id]->dev.bus_id, 1164 priv->phy = phy_connect(dev, dev_name(&cpmac_mii->phy_map[phy_id]->dev),
1165 &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1165 &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1166 if (IS_ERR(priv->phy)) { 1166 if (IS_ERR(priv->phy)) {
1167 if (netif_msg_drv(priv)) 1167 if (netif_msg_drv(priv))
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index a89d8cc5120..fbe15699584 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,7 +42,6 @@
42#include <linux/cache.h> 42#include <linux/cache.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/inet_lro.h>
46#include "t3cdev.h" 45#include "t3cdev.h"
47#include <asm/io.h> 46#include <asm/io.h>
48 47
@@ -178,15 +177,11 @@ enum { /* per port SGE statistics */
178 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 177 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
179 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 178 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
180 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 179 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
181 SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
182 SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
183 SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
184 180
185 SGE_PSTAT_MAX /* must be last */ 181 SGE_PSTAT_MAX /* must be last */
186}; 182};
187 183
188#define T3_MAX_LRO_SES 8 184struct napi_gro_fraginfo;
189#define T3_MAX_LRO_MAX_PKTS 64
190 185
191struct sge_qset { /* an SGE queue set */ 186struct sge_qset { /* an SGE queue set */
192 struct adapter *adap; 187 struct adapter *adap;
@@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */
194 struct sge_rspq rspq; 189 struct sge_rspq rspq;
195 struct sge_fl fl[SGE_RXQ_PER_SET]; 190 struct sge_fl fl[SGE_RXQ_PER_SET];
196 struct sge_txq txq[SGE_TXQ_PER_SET]; 191 struct sge_txq txq[SGE_TXQ_PER_SET];
197 struct net_lro_mgr lro_mgr; 192 struct napi_gro_fraginfo lro_frag_tbl;
198 struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
199 struct skb_frag_struct *lro_frag_tbl;
200 int lro_nfrags;
201 int lro_enabled; 193 int lro_enabled;
202 int lro_frag_len;
203 void *lro_va; 194 void *lro_va;
204 struct net_device *netdev; 195 struct net_device *netdev;
205 struct netdev_queue *tx_q; /* associated netdev TX queue */ 196 struct netdev_queue *tx_q; /* associated netdev TX queue */
@@ -230,6 +221,7 @@ struct adapter {
230 unsigned int slow_intr_mask; 221 unsigned int slow_intr_mask;
231 unsigned long irq_stats[IRQ_NUM_STATS]; 222 unsigned long irq_stats[IRQ_NUM_STATS];
232 223
224 int msix_nvectors;
233 struct { 225 struct {
234 unsigned short vec; 226 unsigned short vec;
235 char desc[22]; 227 char desc[22];
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0089746b8d0..f2c7cc3e263 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -338,7 +338,7 @@ static void free_irq_resources(struct adapter *adapter)
338 338
339 free_irq(adapter->msix_info[0].vec, adapter); 339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i) 340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets; 341 n += adap2pinfo(adapter, i)->nqsets;
342 342
343 for (i = 0; i < n; ++i) 343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec, 344 free_irq(adapter->msix_info[i + 1].vec,
@@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508{ 508{
509 struct port_info *pi = netdev_priv(dev); 509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter; 510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
512 511
513 adapter->params.sge.qset[qset_idx].lro = !!val; 512 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val; 513 adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
519
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
524} 514}
525 515
526/** 516/**
@@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1423 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1424 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1425 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); 1426 *data++ = 0;
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); 1427 *data++ = 0;
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); 1428 *data++ = 0;
1439 *data++ = s->rx_cong_drops; 1429 *data++ = s->rx_cong_drops;
1440 1430
1441 *data++ = s->num_toggled; 1431 *data++ = s->num_toggled;
@@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1826 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1816 memset(&wol->sopass, 0, sizeof(wol->sopass));
1827} 1817}
1828 1818
1829static int cxgb3_set_flags(struct net_device *dev, u32 data)
1830{
1831 struct port_info *pi = netdev_priv(dev);
1832 int i;
1833
1834 if (data & ETH_FLAG_LRO) {
1835 if (!(pi->rx_offload & T3_RX_CSUM))
1836 return -EINVAL;
1837
1838 pi->rx_offload |= T3_LRO;
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1841
1842 } else {
1843 pi->rx_offload &= ~T3_LRO;
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
1846 }
1847
1848 return 0;
1849}
1850
1851static const struct ethtool_ops cxgb_ethtool_ops = { 1819static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings, 1820 .get_settings = get_settings,
1853 .set_settings = set_settings, 1821 .set_settings = set_settings,
@@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
1877 .get_regs = get_regs, 1845 .get_regs = get_regs,
1878 .get_wol = get_wol, 1846 .get_wol = get_wol,
1879 .set_tso = ethtool_op_set_tso, 1847 .set_tso = ethtool_op_set_tso,
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
1882}; 1848};
1883 1849
1884static int in_range(int val, int lo, int hi) 1850static int in_range(int val, int lo, int hi)
@@ -2576,6 +2542,12 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2576{ 2542{
2577 int i, ret = 0; 2543 int i, ret = 0;
2578 2544
2545 if (is_offload(adapter) &&
2546 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2547 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2548 offload_close(&adapter->tdev);
2549 }
2550
2579 /* Stop all ports */ 2551 /* Stop all ports */
2580 for_each_port(adapter, i) { 2552 for_each_port(adapter, i) {
2581 struct net_device *netdev = adapter->port[i]; 2553 struct net_device *netdev = adapter->port[i];
@@ -2584,10 +2556,6 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2584 cxgb_close(netdev); 2556 cxgb_close(netdev);
2585 } 2557 }
2586 2558
2587 if (is_offload(adapter) &&
2588 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2589 offload_close(&adapter->tdev);
2590
2591 /* Stop SGE timers */ 2559 /* Stop SGE timers */
2592 t3_stop_sge_timers(adapter); 2560 t3_stop_sge_timers(adapter);
2593 2561
@@ -2639,6 +2607,9 @@ static void t3_resume_ports(struct adapter *adapter)
2639 } 2607 }
2640 } 2608 }
2641 } 2609 }
2610
2611 if (is_offload(adapter) && !ofld_disable)
2612 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2642} 2613}
2643 2614
2644/* 2615/*
@@ -2752,7 +2723,7 @@ static void set_nqsets(struct adapter *adap)
2752 int i, j = 0; 2723 int i, j = 0;
2753 int num_cpus = num_online_cpus(); 2724 int num_cpus = num_online_cpus();
2754 int hwports = adap->params.nports; 2725 int hwports = adap->params.nports;
2755 int nqsets = SGE_QSETS; 2726 int nqsets = adap->msix_nvectors - 1;
2756 2727
2757 if (adap->params.rev > 0 && adap->flags & USING_MSIX) { 2728 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2758 if (hwports == 2 && 2729 if (hwports == 2 &&
@@ -2781,18 +2752,25 @@ static void set_nqsets(struct adapter *adap)
2781static int __devinit cxgb_enable_msix(struct adapter *adap) 2752static int __devinit cxgb_enable_msix(struct adapter *adap)
2782{ 2753{
2783 struct msix_entry entries[SGE_QSETS + 1]; 2754 struct msix_entry entries[SGE_QSETS + 1];
2755 int vectors;
2784 int i, err; 2756 int i, err;
2785 2757
2786 for (i = 0; i < ARRAY_SIZE(entries); ++i) 2758 vectors = ARRAY_SIZE(entries);
2759 for (i = 0; i < vectors; ++i)
2787 entries[i].entry = i; 2760 entries[i].entry = i;
2788 2761
2789 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries)); 2762 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2763 vectors = err;
2764
2765 if (!err && vectors < (adap->params.nports + 1))
2766 err = -1;
2767
2790 if (!err) { 2768 if (!err) {
2791 for (i = 0; i < ARRAY_SIZE(entries); ++i) 2769 for (i = 0; i < vectors; ++i)
2792 adap->msix_info[i].vec = entries[i].vector; 2770 adap->msix_info[i].vec = entries[i].vector;
2793 } else if (err > 0) 2771 adap->msix_nvectors = vectors;
2794 dev_info(&adap->pdev->dev, 2772 }
2795 "only %d MSI-X vectors left, not using MSI-X\n", err); 2773
2796 return err; 2774 return err;
2797} 2775}
2798 2776
@@ -2960,7 +2938,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2960 netdev->mem_end = mmio_start + mmio_len - 1; 2938 netdev->mem_end = mmio_start + mmio_len - 1;
2961 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 2939 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2962 netdev->features |= NETIF_F_LLTX; 2940 netdev->features |= NETIF_F_LLTX;
2963 netdev->features |= NETIF_F_LRO; 2941 netdev->features |= NETIF_F_GRO;
2964 if (pci_using_dac) 2942 if (pci_using_dac)
2965 netdev->features |= NETIF_F_HIGHDMA; 2943 netdev->features |= NETIF_F_HIGHDMA;
2966 2944
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 2d7f69aff1d..620d80be6aa 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -153,6 +153,18 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
153 mutex_unlock(&cxgb3_db_lock); 153 mutex_unlock(&cxgb3_db_lock);
154} 154}
155 155
156void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error)
157{
158 struct cxgb3_client *client;
159
160 mutex_lock(&cxgb3_db_lock);
161 list_for_each_entry(client, &client_list, client_list) {
162 if (client->err_handler)
163 client->err_handler(tdev, status, error);
164 }
165 mutex_unlock(&cxgb3_db_lock);
166}
167
156static struct net_device *get_iff_from_mac(struct adapter *adapter, 168static struct net_device *get_iff_from_mac(struct adapter *adapter,
157 const unsigned char *mac, 169 const unsigned char *mac,
158 unsigned int vlan) 170 unsigned int vlan)
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index d514e5019df..a8e8e5fcdf8 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -64,10 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
64void cxgb3_unregister_client(struct cxgb3_client *client); 64void cxgb3_unregister_client(struct cxgb3_client *client);
65void cxgb3_add_clients(struct t3cdev *tdev); 65void cxgb3_add_clients(struct t3cdev *tdev);
66void cxgb3_remove_clients(struct t3cdev *tdev); 66void cxgb3_remove_clients(struct t3cdev *tdev);
67void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error);
67 68
68typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, 69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
69 struct sk_buff *skb, void *ctx); 70 struct sk_buff *skb, void *ctx);
70 71
72enum {
73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN
75};
76
71struct cxgb3_client { 77struct cxgb3_client {
72 char *name; 78 char *name;
73 void (*add) (struct t3cdev *); 79 void (*add) (struct t3cdev *);
@@ -76,6 +82,7 @@ struct cxgb3_client {
76 int (*redirect)(void *ctx, struct dst_entry *old, 82 int (*redirect)(void *ctx, struct dst_entry *old,
77 struct dst_entry *new, struct l2t_entry *l2t); 83 struct dst_entry *new, struct l2t_entry *l2t);
78 struct list_head client_list; 84 struct list_head client_list;
85 void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error);
79}; 86};
80 87
81/* 88/*
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index d31791f6029..8205aa4ae94 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q)
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 q->txq_stopped = 0; 586 q->txq_stopped = 0;
587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
588 kfree(q->lro_frag_tbl); 588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
589 q->lro_nfrags = q->lro_frag_len = 0;
590} 589}
591 590
592 591
@@ -1938,6 +1937,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1938 skb->ip_summed = CHECKSUM_UNNECESSARY; 1937 skb->ip_summed = CHECKSUM_UNNECESSARY;
1939 } else 1938 } else
1940 skb->ip_summed = CHECKSUM_NONE; 1939 skb->ip_summed = CHECKSUM_NONE;
1940 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
1941 1941
1942 if (unlikely(p->vlan_valid)) { 1942 if (unlikely(p->vlan_valid)) {
1943 struct vlan_group *grp = pi->vlan_grp; 1943 struct vlan_group *grp = pi->vlan_grp;
@@ -1945,10 +1945,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1945 qs->port_stats[SGE_PSTAT_VLANEX]++; 1945 qs->port_stats[SGE_PSTAT_VLANEX]++;
1946 if (likely(grp)) 1946 if (likely(grp))
1947 if (lro) 1947 if (lro)
1948 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, 1948 vlan_gro_receive(&qs->napi, grp,
1949 grp, 1949 ntohs(p->vlan), skb);
1950 ntohs(p->vlan),
1951 p);
1952 else { 1950 else {
1953 if (unlikely(pi->iscsi_ipv4addr && 1951 if (unlikely(pi->iscsi_ipv4addr &&
1954 is_arp(skb))) { 1952 is_arp(skb))) {
@@ -1965,7 +1963,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1965 dev_kfree_skb_any(skb); 1963 dev_kfree_skb_any(skb);
1966 } else if (rq->polling) { 1964 } else if (rq->polling) {
1967 if (lro) 1965 if (lro)
1968 lro_receive_skb(&qs->lro_mgr, skb, p); 1966 napi_gro_receive(&qs->napi, skb);
1969 else { 1967 else {
1970 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 1968 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1971 cxgb3_arp_process(adap, skb); 1969 cxgb3_arp_process(adap, skb);
@@ -1981,59 +1979,6 @@ static inline int is_eth_tcp(u32 rss)
1981} 1979}
1982 1980
1983/** 1981/**
1984 * lro_frame_ok - check if an ingress packet is eligible for LRO
1985 * @p: the CPL header of the packet
1986 *
1987 * Returns true if a received packet is eligible for LRO.
1988 * The following conditions must be true:
1989 * - packet is TCP/IP Ethernet II (checked elsewhere)
1990 * - not an IP fragment
1991 * - no IP options
1992 * - TCP/IP checksums are correct
1993 * - the packet is for this host
1994 */
1995static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1996{
1997 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1998 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1999
2000 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
2001 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
2002}
2003
2004static int t3_get_lro_header(void **eh, void **iph, void **tcph,
2005 u64 *hdr_flags, void *priv)
2006{
2007 const struct cpl_rx_pkt *cpl = priv;
2008
2009 if (!lro_frame_ok(cpl))
2010 return -1;
2011
2012 *eh = (struct ethhdr *)(cpl + 1);
2013 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
2014 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
2015
2016 *hdr_flags = LRO_IPV4 | LRO_TCP;
2017 return 0;
2018}
2019
2020static int t3_get_skb_header(struct sk_buff *skb,
2021 void **iph, void **tcph, u64 *hdr_flags,
2022 void *priv)
2023{
2024 void *eh;
2025
2026 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
2027}
2028
2029static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2030 void **iph, void **tcph, u64 *hdr_flags,
2031 void *priv)
2032{
2033 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2034}
2035
2036/**
2037 * lro_add_page - add a page chunk to an LRO session 1982 * lro_add_page - add a page chunk to an LRO session
2038 * @adap: the adapter 1983 * @adap: the adapter
2039 * @qs: the associated queue set 1984 * @qs: the associated queue set
@@ -2049,8 +1994,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2049{ 1994{
2050 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1995 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2051 struct cpl_rx_pkt *cpl; 1996 struct cpl_rx_pkt *cpl;
2052 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; 1997 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2053 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; 1998 int nr_frags = qs->lro_frag_tbl.nr_frags;
1999 int frag_len = qs->lro_frag_tbl.len;
2054 int offset = 0; 2000 int offset = 0;
2055 2001
2056 if (!nr_frags) { 2002 if (!nr_frags) {
@@ -2069,13 +2015,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2069 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2015 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2070 rx_frag->size = len; 2016 rx_frag->size = len;
2071 frag_len += len; 2017 frag_len += len;
2072 qs->lro_nfrags++; 2018 qs->lro_frag_tbl.nr_frags++;
2073 qs->lro_frag_len = frag_len; 2019 qs->lro_frag_tbl.len = frag_len;
2074 2020
2075 if (!complete) 2021 if (!complete)
2076 return; 2022 return;
2077 2023
2078 qs->lro_nfrags = qs->lro_frag_len = 0; 2024 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2079 cpl = qs->lro_va; 2025 cpl = qs->lro_va;
2080 2026
2081 if (unlikely(cpl->vlan_valid)) { 2027 if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2030,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2084 struct vlan_group *grp = pi->vlan_grp; 2030 struct vlan_group *grp = pi->vlan_grp;
2085 2031
2086 if (likely(grp != NULL)) { 2032 if (likely(grp != NULL)) {
2087 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, 2033 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2088 qs->lro_frag_tbl, 2034 &qs->lro_frag_tbl);
2089 frag_len, frag_len, 2035 goto out;
2090 grp, ntohs(cpl->vlan),
2091 cpl, 0);
2092 return;
2093 } 2036 }
2094 } 2037 }
2095 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, 2038 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2096 frag_len, frag_len, cpl, 0);
2097}
2098 2039
2099/** 2040out:
2100 * init_lro_mgr - initialize a LRO manager object 2041 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2101 * @lro_mgr: the LRO manager object
2102 */
2103static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{
2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
2111 lro_mgr->lro_arr = qs->lro_desc;
2112 lro_mgr->get_frag_header = t3_get_frag_header;
2113 lro_mgr->get_skb_header = t3_get_skb_header;
2114 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2115 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2116 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2117} 2042}
2118 2043
2119/** 2044/**
@@ -2356,10 +2281,6 @@ next_fl:
2356 } 2281 }
2357 2282
2358 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2283 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2359 lro_flush_all(&qs->lro_mgr);
2360 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2361 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2362 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2363 2284
2364 if (sleeping) 2285 if (sleeping)
2365 check_ring_db(adap, qs, sleeping); 2286 check_ring_db(adap, qs, sleeping);
@@ -2906,7 +2827,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2906{ 2827{
2907 int i, avail, ret = -ENOMEM; 2828 int i, avail, ret = -ENOMEM;
2908 struct sge_qset *q = &adapter->sge.qs[id]; 2829 struct sge_qset *q = &adapter->sge.qs[id];
2909 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2910 2830
2911 init_qset_cntxt(q, id); 2831 init_qset_cntxt(q, id);
2912 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); 2832 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
@@ -2986,10 +2906,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2986 q->fl[0].order = FL0_PG_ORDER; 2906 q->fl[0].order = FL0_PG_ORDER;
2987 q->fl[1].order = FL1_PG_ORDER; 2907 q->fl[1].order = FL1_PG_ORDER;
2988 2908
2989 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2990 sizeof(struct skb_frag_struct),
2991 GFP_KERNEL);
2992 q->lro_nfrags = q->lro_frag_len = 0;
2993 spin_lock_irq(&adapter->sge.reg_lock); 2909 spin_lock_irq(&adapter->sge.reg_lock);
2994 2910
2995 /* FL threshold comparison uses < */ 2911 /* FL threshold comparison uses < */
@@ -3041,8 +2957,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3041 q->tx_q = netdevq; 2957 q->tx_q = netdevq;
3042 t3_update_qset_coalesce(q, p); 2958 t3_update_qset_coalesce(q, p);
3043 2959
3044 init_lro_mgr(q, lro_mgr);
3045
3046 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 2960 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3047 GFP_KERNEL | __GFP_COMP); 2961 GFP_KERNEL | __GFP_COMP);
3048 if (!avail) { 2962 if (!avail) {
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 7ce3053530f..861c867fca8 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1027,7 +1027,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1027 printk(version); 1027 printk(version);
1028 1028
1029 if (bdev) 1029 if (bdev)
1030 snprintf(name, sizeof(name), "%s", bdev->bus_id); 1030 snprintf(name, sizeof(name), "%s", dev_name(bdev));
1031 else { 1031 else {
1032 i = 0; 1032 i = 0;
1033 dev = root_lance_dev; 1033 dev = root_lance_dev;
@@ -1105,10 +1105,10 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1105 1105
1106 start = to_tc_dev(bdev)->resource.start; 1106 start = to_tc_dev(bdev)->resource.start;
1107 len = to_tc_dev(bdev)->resource.end - start + 1; 1107 len = to_tc_dev(bdev)->resource.end - start + 1;
1108 if (!request_mem_region(start, len, bdev->bus_id)) { 1108 if (!request_mem_region(start, len, dev_name(bdev))) {
1109 printk(KERN_ERR 1109 printk(KERN_ERR
1110 "%s: Unable to reserve MMIO resource\n", 1110 "%s: Unable to reserve MMIO resource\n",
1111 bdev->bus_id); 1111 dev_name(bdev));
1112 ret = -EBUSY; 1112 ret = -EBUSY;
1113 goto err_out_dev; 1113 goto err_out_dev;
1114 } 1114 }
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6445cedd586..4ec055dc717 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2937,7 +2937,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2937 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) 2937 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2938 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 2938 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2939 { 2939 {
2940 struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_NOIO); 2940 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2941 if (!newskb) 2941 if (!newskb)
2942 return -ENOMEM; 2942 return -ENOMEM;
2943 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2943 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index e4cef491dc7..55625dbbae5 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -606,8 +606,8 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
606 if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) 606 if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
607 return -ENXIO; 607 return -ENXIO;
608 608
609 printk ("%s: %s at 0x%04lx", 609 printk("%s: %s at 0x%04lx",
610 device->bus_id, depca_signature[lp->adapter], ioaddr); 610 dev_name(device), depca_signature[lp->adapter], ioaddr);
611 611
612 switch (lp->depca_bus) { 612 switch (lp->depca_bus) {
613#ifdef CONFIG_MCA 613#ifdef CONFIG_MCA
@@ -669,7 +669,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
669 669
670 spin_lock_init(&lp->lock); 670 spin_lock_init(&lp->lock);
671 sprintf(lp->adapter_name, "%s (%s)", 671 sprintf(lp->adapter_name, "%s (%s)",
672 depca_signature[lp->adapter], device->bus_id); 672 depca_signature[lp->adapter], dev_name(device));
673 status = -EBUSY; 673 status = -EBUSY;
674 674
675 /* Initialisation Block */ 675 /* Initialisation Block */
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 86bb876fb12..861d2eeaa43 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1944,9 +1944,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
1944 if (stat_ack & stat_ack_rnr) 1944 if (stat_ack & stat_ack_rnr)
1945 nic->ru_running = RU_SUSPENDED; 1945 nic->ru_running = RU_SUSPENDED;
1946 1946
1947 if (likely(netif_rx_schedule_prep(&nic->napi))) { 1947 if (likely(napi_schedule_prep(&nic->napi))) {
1948 e100_disable_irq(nic); 1948 e100_disable_irq(nic);
1949 __netif_rx_schedule(&nic->napi); 1949 __napi_schedule(&nic->napi);
1950 } 1950 }
1951 1951
1952 return IRQ_HANDLED; 1952 return IRQ_HANDLED;
@@ -1962,7 +1962,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
1962 1962
1963 /* If budget not fully consumed, exit the polling mode */ 1963 /* If budget not fully consumed, exit the polling mode */
1964 if (work_done < budget) { 1964 if (work_done < budget) {
1965 netif_rx_complete(napi); 1965 napi_complete(napi);
1966 e100_enable_irq(nic); 1966 e100_enable_irq(nic);
1967 } 1967 }
1968 1968
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f5581de0475..e9a416f4016 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -182,7 +182,6 @@ struct e1000_tx_ring {
182 /* array of buffer information structs */ 182 /* array of buffer information structs */
183 struct e1000_buffer *buffer_info; 183 struct e1000_buffer *buffer_info;
184 184
185 spinlock_t tx_lock;
186 u16 tdh; 185 u16 tdh;
187 u16 tdt; 186 u16 tdt;
188 bool last_tx_tso; 187 bool last_tx_tso;
@@ -238,7 +237,6 @@ struct e1000_adapter {
238 u16 link_speed; 237 u16 link_speed;
239 u16 link_duplex; 238 u16 link_duplex;
240 spinlock_t stats_lock; 239 spinlock_t stats_lock;
241 spinlock_t tx_queue_lock;
242 unsigned int total_tx_bytes; 240 unsigned int total_tx_bytes;
243 unsigned int total_tx_packets; 241 unsigned int total_tx_packets;
244 unsigned int total_rx_bytes; 242 unsigned int total_rx_bytes;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6bd63cc67b3..ca7cd7e2bf2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1048 if (pci_using_dac) 1048 if (pci_using_dac)
1049 netdev->features |= NETIF_F_HIGHDMA; 1049 netdev->features |= NETIF_F_HIGHDMA;
1050 1050
1051 netdev->features |= NETIF_F_LLTX;
1052
1053 netdev->vlan_features |= NETIF_F_TSO; 1051 netdev->vlan_features |= NETIF_F_TSO;
1054 netdev->vlan_features |= NETIF_F_TSO6; 1052 netdev->vlan_features |= NETIF_F_TSO6;
1055 netdev->vlan_features |= NETIF_F_HW_CSUM; 1053 netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1368 return -ENOMEM; 1366 return -ENOMEM;
1369 } 1367 }
1370 1368
1371 spin_lock_init(&adapter->tx_queue_lock);
1372
1373 /* Explicitly disable IRQ since the NIC can be in any state. */ 1369 /* Explicitly disable IRQ since the NIC can be in any state. */
1374 e1000_irq_disable(adapter); 1370 e1000_irq_disable(adapter);
1375 1371
@@ -1624,7 +1620,6 @@ setup_tx_desc_die:
1624 1620
1625 txdr->next_to_use = 0; 1621 txdr->next_to_use = 0;
1626 txdr->next_to_clean = 0; 1622 txdr->next_to_clean = 0;
1627 spin_lock_init(&txdr->tx_lock);
1628 1623
1629 return 0; 1624 return 0;
1630} 1625}
@@ -2865,11 +2860,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2865 return false; 2860 return false;
2866 2861
2867 switch (skb->protocol) { 2862 switch (skb->protocol) {
2868 case __constant_htons(ETH_P_IP): 2863 case cpu_to_be16(ETH_P_IP):
2869 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2864 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2870 cmd_len |= E1000_TXD_CMD_TCP; 2865 cmd_len |= E1000_TXD_CMD_TCP;
2871 break; 2866 break;
2872 case __constant_htons(ETH_P_IPV6): 2867 case cpu_to_be16(ETH_P_IPV6):
2873 /* XXX not handling all IPV6 headers */ 2868 /* XXX not handling all IPV6 headers */
2874 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2869 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2875 cmd_len |= E1000_TXD_CMD_TCP; 2870 cmd_len |= E1000_TXD_CMD_TCP;
@@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3185 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3180 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3186 unsigned int tx_flags = 0; 3181 unsigned int tx_flags = 0;
3187 unsigned int len = skb->len - skb->data_len; 3182 unsigned int len = skb->len - skb->data_len;
3188 unsigned long flags;
3189 unsigned int nr_frags; 3183 unsigned int nr_frags;
3190 unsigned int mss; 3184 unsigned int mss;
3191 int count = 0; 3185 int count = 0;
@@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3290 (hw->mac_type == e1000_82573)) 3284 (hw->mac_type == e1000_82573))
3291 e1000_transfer_dhcp_info(adapter, skb); 3285 e1000_transfer_dhcp_info(adapter, skb);
3292 3286
3293 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3294 /* Collision - tell upper layer to requeue */
3295 return NETDEV_TX_LOCKED;
3296
3297 /* need: count + 2 desc gap to keep tail from touching 3287 /* need: count + 2 desc gap to keep tail from touching
3298 * head, otherwise try next time */ 3288 * head, otherwise try next time */
3299 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { 3289 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3300 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3301 return NETDEV_TX_BUSY; 3290 return NETDEV_TX_BUSY;
3302 }
3303 3291
3304 if (unlikely(hw->mac_type == e1000_82547)) { 3292 if (unlikely(hw->mac_type == e1000_82547)) {
3305 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3293 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3306 netif_stop_queue(netdev); 3294 netif_stop_queue(netdev);
3307 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 3295 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3308 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3309 return NETDEV_TX_BUSY; 3296 return NETDEV_TX_BUSY;
3310 } 3297 }
3311 } 3298 }
@@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3320 tso = e1000_tso(adapter, tx_ring, skb); 3307 tso = e1000_tso(adapter, tx_ring, skb);
3321 if (tso < 0) { 3308 if (tso < 0) {
3322 dev_kfree_skb_any(skb); 3309 dev_kfree_skb_any(skb);
3323 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3324 return NETDEV_TX_OK; 3310 return NETDEV_TX_OK;
3325 } 3311 }
3326 3312
@@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3345 /* Make sure there is space in the ring for the next send. */ 3331 /* Make sure there is space in the ring for the next send. */
3346 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3332 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3347 3333
3348 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3349 return NETDEV_TX_OK; 3334 return NETDEV_TX_OK;
3350} 3335}
3351 3336
@@ -3687,12 +3672,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
3687 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3672 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3688 } 3673 }
3689 3674
3690 if (likely(netif_rx_schedule_prep(&adapter->napi))) { 3675 if (likely(napi_schedule_prep(&adapter->napi))) {
3691 adapter->total_tx_bytes = 0; 3676 adapter->total_tx_bytes = 0;
3692 adapter->total_tx_packets = 0; 3677 adapter->total_tx_packets = 0;
3693 adapter->total_rx_bytes = 0; 3678 adapter->total_rx_bytes = 0;
3694 adapter->total_rx_packets = 0; 3679 adapter->total_rx_packets = 0;
3695 __netif_rx_schedule(&adapter->napi); 3680 __napi_schedule(&adapter->napi);
3696 } else 3681 } else
3697 e1000_irq_enable(adapter); 3682 e1000_irq_enable(adapter);
3698 3683
@@ -3747,12 +3732,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
3747 ew32(IMC, ~0); 3732 ew32(IMC, ~0);
3748 E1000_WRITE_FLUSH(); 3733 E1000_WRITE_FLUSH();
3749 } 3734 }
3750 if (likely(netif_rx_schedule_prep(&adapter->napi))) { 3735 if (likely(napi_schedule_prep(&adapter->napi))) {
3751 adapter->total_tx_bytes = 0; 3736 adapter->total_tx_bytes = 0;
3752 adapter->total_tx_packets = 0; 3737 adapter->total_tx_packets = 0;
3753 adapter->total_rx_bytes = 0; 3738 adapter->total_rx_bytes = 0;
3754 adapter->total_rx_packets = 0; 3739 adapter->total_rx_packets = 0;
3755 __netif_rx_schedule(&adapter->napi); 3740 __napi_schedule(&adapter->napi);
3756 } else 3741 } else
3757 /* this really should not happen! if it does it is basically a 3742 /* this really should not happen! if it does it is basically a
3758 * bug, but not a hard error, so enable ints and continue */ 3743 * bug, but not a hard error, so enable ints and continue */
@@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3773 3758
3774 adapter = netdev_priv(poll_dev); 3759 adapter = netdev_priv(poll_dev);
3775 3760
3776 /* e1000_clean is called per-cpu. This lock protects 3761 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3777 * tx_ring[0] from being cleaned by multiple cpus
3778 * simultaneously. A failure obtaining the lock means
3779 * tx_ring[0] is currently being cleaned anyway. */
3780 if (spin_trylock(&adapter->tx_queue_lock)) {
3781 tx_cleaned = e1000_clean_tx_irq(adapter,
3782 &adapter->tx_ring[0]);
3783 spin_unlock(&adapter->tx_queue_lock);
3784 }
3785 3762
3786 adapter->clean_rx(adapter, &adapter->rx_ring[0], 3763 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3787 &work_done, budget); 3764 &work_done, budget);
@@ -3793,7 +3770,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3793 if (work_done < budget) { 3770 if (work_done < budget) {
3794 if (likely(adapter->itr_setting & 3)) 3771 if (likely(adapter->itr_setting & 3))
3795 e1000_set_itr(adapter); 3772 e1000_set_itr(adapter);
3796 netif_rx_complete(napi); 3773 napi_complete(napi);
3797 e1000_irq_enable(adapter); 3774 e1000_irq_enable(adapter);
3798 } 3775 }
3799 3776
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 0890162953e..565fd4e8f95 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -61,6 +61,7 @@
61static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); 61static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
62static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); 62static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
63static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); 63static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
64static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
64static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, 65static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
65 u16 words, u16 *data); 66 u16 words, u16 *data);
66static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); 67static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
@@ -250,7 +251,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
250 case e1000_media_type_internal_serdes: 251 case e1000_media_type_internal_serdes:
251 func->setup_physical_interface = 252 func->setup_physical_interface =
252 e1000_setup_fiber_serdes_link_82571; 253 e1000_setup_fiber_serdes_link_82571;
253 func->check_for_link = e1000e_check_for_serdes_link; 254 func->check_for_link = e1000_check_for_serdes_link_82571;
254 func->get_link_up_info = 255 func->get_link_up_info =
255 e1000e_get_speed_and_duplex_fiber_serdes; 256 e1000e_get_speed_and_duplex_fiber_serdes;
256 break; 257 break;
@@ -830,6 +831,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
830 hw->dev_spec.e82571.alt_mac_addr_is_present) 831 hw->dev_spec.e82571.alt_mac_addr_is_present)
831 e1000e_set_laa_state_82571(hw, true); 832 e1000e_set_laa_state_82571(hw, true);
832 833
834 /* Reinitialize the 82571 serdes link state machine */
835 if (hw->phy.media_type == e1000_media_type_internal_serdes)
836 hw->mac.serdes_link_state = e1000_serdes_link_down;
837
833 return 0; 838 return 0;
834} 839}
835 840
@@ -980,6 +985,18 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
980 reg |= E1000_PBA_ECC_CORR_EN; 985 reg |= E1000_PBA_ECC_CORR_EN;
981 ew32(PBA_ECC, reg); 986 ew32(PBA_ECC, reg);
982 } 987 }
988 /*
989 * Workaround for hardware errata.
990 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
991 */
992
993 if ((hw->mac.type == e1000_82571) ||
994 (hw->mac.type == e1000_82572)) {
995 reg = er32(CTRL_EXT);
996 reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
997 ew32(CTRL_EXT, reg);
998 }
999
983 1000
984 /* PCI-Ex Control Registers */ 1001 /* PCI-Ex Control Registers */
985 if (hw->mac.type == e1000_82574) { 1002 if (hw->mac.type == e1000_82574) {
@@ -1203,6 +1220,131 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1203} 1220}
1204 1221
1205/** 1222/**
1223 * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
1224 * @hw: pointer to the HW structure
1225 *
1226 * Checks for link up on the hardware. If link is not up and we have
1227 * a signal, then we need to force link up.
1228 **/
1229s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1230{
1231 struct e1000_mac_info *mac = &hw->mac;
1232 u32 rxcw;
1233 u32 ctrl;
1234 u32 status;
1235 s32 ret_val = 0;
1236
1237 ctrl = er32(CTRL);
1238 status = er32(STATUS);
1239 rxcw = er32(RXCW);
1240
1241 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
1242
1243 /* Receiver is synchronized with no invalid bits. */
1244 switch (mac->serdes_link_state) {
1245 case e1000_serdes_link_autoneg_complete:
1246 if (!(status & E1000_STATUS_LU)) {
1247 /*
1248 * We have lost link, retry autoneg before
1249 * reporting link failure
1250 */
1251 mac->serdes_link_state =
1252 e1000_serdes_link_autoneg_progress;
1253 hw_dbg(hw, "AN_UP -> AN_PROG\n");
1254 }
1255 break;
1256
1257 case e1000_serdes_link_forced_up:
1258 /*
1259 * If we are receiving /C/ ordered sets, re-enable
1260 * auto-negotiation in the TXCW register and disable
1261 * forced link in the Device Control register in an
1262 * attempt to auto-negotiate with our link partner.
1263 */
1264 if (rxcw & E1000_RXCW_C) {
1265 /* Enable autoneg, and unforce link up */
1266 ew32(TXCW, mac->txcw);
1267 ew32(CTRL,
1268 (ctrl & ~E1000_CTRL_SLU));
1269 mac->serdes_link_state =
1270 e1000_serdes_link_autoneg_progress;
1271 hw_dbg(hw, "FORCED_UP -> AN_PROG\n");
1272 }
1273 break;
1274
1275 case e1000_serdes_link_autoneg_progress:
1276 /*
1277 * If the LU bit is set in the STATUS register,
1278 * autoneg has completed sucessfully. If not,
1279 * try foring the link because the far end may be
1280 * available but not capable of autonegotiation.
1281 */
1282 if (status & E1000_STATUS_LU) {
1283 mac->serdes_link_state =
1284 e1000_serdes_link_autoneg_complete;
1285 hw_dbg(hw, "AN_PROG -> AN_UP\n");
1286 } else {
1287 /*
1288 * Disable autoneg, force link up and
1289 * full duplex, and change state to forced
1290 */
1291 ew32(TXCW,
1292 (mac->txcw & ~E1000_TXCW_ANE));
1293 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
1294 ew32(CTRL, ctrl);
1295
1296 /* Configure Flow Control after link up. */
1297 ret_val =
1298 e1000e_config_fc_after_link_up(hw);
1299 if (ret_val) {
1300 hw_dbg(hw, "Error config flow control\n");
1301 break;
1302 }
1303 mac->serdes_link_state =
1304 e1000_serdes_link_forced_up;
1305 hw_dbg(hw, "AN_PROG -> FORCED_UP\n");
1306 }
1307 mac->serdes_has_link = true;
1308 break;
1309
1310 case e1000_serdes_link_down:
1311 default:
1312 /* The link was down but the receiver has now gained
1313 * valid sync, so lets see if we can bring the link
1314 * up. */
1315 ew32(TXCW, mac->txcw);
1316 ew32(CTRL,
1317 (ctrl & ~E1000_CTRL_SLU));
1318 mac->serdes_link_state =
1319 e1000_serdes_link_autoneg_progress;
1320 hw_dbg(hw, "DOWN -> AN_PROG\n");
1321 break;
1322 }
1323 } else {
1324 if (!(rxcw & E1000_RXCW_SYNCH)) {
1325 mac->serdes_has_link = false;
1326 mac->serdes_link_state = e1000_serdes_link_down;
1327 hw_dbg(hw, "ANYSTATE -> DOWN\n");
1328 } else {
1329 /*
1330 * We have sync, and can tolerate one
1331 * invalid (IV) codeword before declaring
1332 * link down, so reread to look again
1333 */
1334 udelay(10);
1335 rxcw = er32(RXCW);
1336 if (rxcw & E1000_RXCW_IV) {
1337 mac->serdes_link_state = e1000_serdes_link_down;
1338 mac->serdes_has_link = false;
1339 hw_dbg(hw, "ANYSTATE -> DOWN\n");
1340 }
1341 }
1342 }
1343
1344 return ret_val;
1345}
1346
1347/**
1206 * e1000_valid_led_default_82571 - Verify a valid default LED config 1348 * e1000_valid_led_default_82571 - Verify a valid default LED config
1207 * @hw: pointer to the HW structure 1349 * @hw: pointer to the HW structure
1208 * @data: pointer to the NVM (EEPROM) 1350 * @data: pointer to the NVM (EEPROM)
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e6caf29d425..243aa499fe9 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -69,6 +69,7 @@
69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ 69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
70#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 70#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
72#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 73#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 74#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
74#define E1000_CTRL_EXT_EIAME 0x01000000 75#define E1000_CTRL_EXT_EIAME 0x01000000
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 37bcb190eef..28bf9a51346 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -195,8 +195,6 @@ struct e1000_adapter {
195 u16 link_duplex; 195 u16 link_duplex;
196 u16 eeprom_vers; 196 u16 eeprom_vers;
197 197
198 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
199
200 /* track device up/down/testing state */ 198 /* track device up/down/testing state */
201 unsigned long state; 199 unsigned long state;
202 200
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e48956d924b..2557aeef65e 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1589,7 +1589,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1589 *data = 0; 1589 *data = 0;
1590 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1590 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1591 int i = 0; 1591 int i = 0;
1592 hw->mac.serdes_has_link = 0; 1592 hw->mac.serdes_has_link = false;
1593 1593
1594 /* 1594 /*
1595 * On some blade server designs, link establishment 1595 * On some blade server designs, link establishment
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2d4ce0492df..5cb428c2811 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -459,6 +459,13 @@ enum e1000_smart_speed {
459 e1000_smart_speed_off 459 e1000_smart_speed_off
460}; 460};
461 461
462enum e1000_serdes_link_state {
463 e1000_serdes_link_down = 0,
464 e1000_serdes_link_autoneg_progress,
465 e1000_serdes_link_autoneg_complete,
466 e1000_serdes_link_forced_up
467};
468
462/* Receive Descriptor */ 469/* Receive Descriptor */
463struct e1000_rx_desc { 470struct e1000_rx_desc {
464 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 471 __le64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -787,6 +794,7 @@ struct e1000_mac_info {
787 bool in_ifs_mode; 794 bool in_ifs_mode;
788 bool serdes_has_link; 795 bool serdes_has_link;
789 bool tx_pkt_filtering; 796 bool tx_pkt_filtering;
797 enum e1000_serdes_link_state serdes_link_state;
790}; 798};
791 799
792struct e1000_phy_info { 800struct e1000_phy_info {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 66741104ffd..ac2f34e1836 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -501,7 +501,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
501 ew32(TXCW, mac->txcw); 501 ew32(TXCW, mac->txcw);
502 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 502 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
503 503
504 mac->serdes_has_link = 1; 504 mac->serdes_has_link = true;
505 } 505 }
506 506
507 return 0; 507 return 0;
@@ -566,7 +566,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
566 ew32(TXCW, mac->txcw); 566 ew32(TXCW, mac->txcw);
567 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 567 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
568 568
569 mac->serdes_has_link = 1; 569 mac->serdes_has_link = true;
570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
571 /* 571 /*
572 * If we force link for non-auto-negotiation switch, check 572 * If we force link for non-auto-negotiation switch, check
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91817d0afca..04e007dcf47 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -44,10 +44,11 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/smp.h> 45#include <linux/smp.h>
46#include <linux/pm_qos_params.h> 46#include <linux/pm_qos_params.h>
47#include <linux/aer.h>
47 48
48#include "e1000.h" 49#include "e1000.h"
49 50
50#define DRV_VERSION "0.3.3.3-k6" 51#define DRV_VERSION "0.3.3.4-k2"
51char e1000e_driver_name[] = "e1000e"; 52char e1000e_driver_name[] = "e1000e";
52const char e1000e_driver_version[] = DRV_VERSION; 53const char e1000e_driver_version[] = DRV_VERSION;
53 54
@@ -99,8 +100,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
99 skb->protocol = eth_type_trans(skb, netdev); 100 skb->protocol = eth_type_trans(skb, netdev);
100 101
101 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) 102 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
102 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 103 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
103 le16_to_cpu(vlan)); 104 le16_to_cpu(vlan), skb);
104 else 105 else
105 napi_gro_receive(&adapter->napi, skb); 106 napi_gro_receive(&adapter->napi, skb);
106} 107}
@@ -1152,7 +1153,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1152 * read ICR disables interrupts using IAM 1153 * read ICR disables interrupts using IAM
1153 */ 1154 */
1154 1155
1155 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1156 if (icr & E1000_ICR_LSC) {
1156 hw->mac.get_link_status = 1; 1157 hw->mac.get_link_status = 1;
1157 /* 1158 /*
1158 * ICH8 workaround-- Call gig speed drop workaround on cable 1159 * ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1179,12 +1180,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1179 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1180 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1180 } 1181 }
1181 1182
1182 if (netif_rx_schedule_prep(&adapter->napi)) { 1183 if (napi_schedule_prep(&adapter->napi)) {
1183 adapter->total_tx_bytes = 0; 1184 adapter->total_tx_bytes = 0;
1184 adapter->total_tx_packets = 0; 1185 adapter->total_tx_packets = 0;
1185 adapter->total_rx_bytes = 0; 1186 adapter->total_rx_bytes = 0;
1186 adapter->total_rx_packets = 0; 1187 adapter->total_rx_packets = 0;
1187 __netif_rx_schedule(&adapter->napi); 1188 __napi_schedule(&adapter->napi);
1188 } 1189 }
1189 1190
1190 return IRQ_HANDLED; 1191 return IRQ_HANDLED;
@@ -1218,7 +1219,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
1218 * IMC write 1219 * IMC write
1219 */ 1220 */
1220 1221
1221 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1222 if (icr & E1000_ICR_LSC) {
1222 hw->mac.get_link_status = 1; 1223 hw->mac.get_link_status = 1;
1223 /* 1224 /*
1224 * ICH8 workaround-- Call gig speed drop workaround on cable 1225 * ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1246,12 +1247,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
1246 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1247 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1247 } 1248 }
1248 1249
1249 if (netif_rx_schedule_prep(&adapter->napi)) { 1250 if (napi_schedule_prep(&adapter->napi)) {
1250 adapter->total_tx_bytes = 0; 1251 adapter->total_tx_bytes = 0;
1251 adapter->total_tx_packets = 0; 1252 adapter->total_tx_packets = 0;
1252 adapter->total_rx_bytes = 0; 1253 adapter->total_rx_bytes = 0;
1253 adapter->total_rx_packets = 0; 1254 adapter->total_rx_packets = 0;
1254 __netif_rx_schedule(&adapter->napi); 1255 __napi_schedule(&adapter->napi);
1255 } 1256 }
1256 1257
1257 return IRQ_HANDLED; 1258 return IRQ_HANDLED;
@@ -1320,10 +1321,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1320 adapter->rx_ring->set_itr = 0; 1321 adapter->rx_ring->set_itr = 0;
1321 } 1322 }
1322 1323
1323 if (netif_rx_schedule_prep(&adapter->napi)) { 1324 if (napi_schedule_prep(&adapter->napi)) {
1324 adapter->total_rx_bytes = 0; 1325 adapter->total_rx_bytes = 0;
1325 adapter->total_rx_packets = 0; 1326 adapter->total_rx_packets = 0;
1326 __netif_rx_schedule(&adapter->napi); 1327 __napi_schedule(&adapter->napi);
1327 } 1328 }
1328 return IRQ_HANDLED; 1329 return IRQ_HANDLED;
1329} 1330}
@@ -1698,7 +1699,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1698 1699
1699 tx_ring->next_to_use = 0; 1700 tx_ring->next_to_use = 0;
1700 tx_ring->next_to_clean = 0; 1701 tx_ring->next_to_clean = 0;
1701 spin_lock_init(&adapter->tx_queue_lock);
1702 1702
1703 return 0; 1703 return 0;
1704err: 1704err:
@@ -2007,16 +2007,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
2007 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2007 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2008 goto clean_rx; 2008 goto clean_rx;
2009 2009
2010 /* 2010 tx_cleaned = e1000_clean_tx_irq(adapter);
2011 * e1000_clean is called per-cpu. This lock protects
2012 * tx_ring from being cleaned by multiple cpus
2013 * simultaneously. A failure obtaining the lock means
2014 * tx_ring is currently being cleaned anyway.
2015 */
2016 if (spin_trylock(&adapter->tx_queue_lock)) {
2017 tx_cleaned = e1000_clean_tx_irq(adapter);
2018 spin_unlock(&adapter->tx_queue_lock);
2019 }
2020 2011
2021clean_rx: 2012clean_rx:
2022 adapter->clean_rx(adapter, &work_done, budget); 2013 adapter->clean_rx(adapter, &work_done, budget);
@@ -2028,7 +2019,7 @@ clean_rx:
2028 if (work_done < budget) { 2019 if (work_done < budget) {
2029 if (adapter->itr_setting & 3) 2020 if (adapter->itr_setting & 3)
2030 e1000_set_itr(adapter); 2021 e1000_set_itr(adapter);
2031 netif_rx_complete(napi); 2022 napi_complete(napi);
2032 if (adapter->msix_entries) 2023 if (adapter->msix_entries)
2033 ew32(IMS, adapter->rx_ring->ims_val); 2024 ew32(IMS, adapter->rx_ring->ims_val);
2034 else 2025 else
@@ -2922,8 +2913,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2922 if (e1000_alloc_queues(adapter)) 2913 if (e1000_alloc_queues(adapter))
2923 return -ENOMEM; 2914 return -ENOMEM;
2924 2915
2925 spin_lock_init(&adapter->tx_queue_lock);
2926
2927 /* Explicitly disable IRQ since the NIC can be in any state. */ 2916 /* Explicitly disable IRQ since the NIC can be in any state. */
2928 e1000_irq_disable(adapter); 2917 e1000_irq_disable(adapter);
2929 2918
@@ -3782,11 +3771,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3782 return 0; 3771 return 0;
3783 3772
3784 switch (skb->protocol) { 3773 switch (skb->protocol) {
3785 case __constant_htons(ETH_P_IP): 3774 case cpu_to_be16(ETH_P_IP):
3786 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3775 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3787 cmd_len |= E1000_TXD_CMD_TCP; 3776 cmd_len |= E1000_TXD_CMD_TCP;
3788 break; 3777 break;
3789 case __constant_htons(ETH_P_IPV6): 3778 case cpu_to_be16(ETH_P_IPV6):
3790 /* XXX not handling all IPV6 headers */ 3779 /* XXX not handling all IPV6 headers */
3791 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3780 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3792 cmd_len |= E1000_TXD_CMD_TCP; 3781 cmd_len |= E1000_TXD_CMD_TCP;
@@ -4069,7 +4058,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4069 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4058 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4070 unsigned int tx_flags = 0; 4059 unsigned int tx_flags = 0;
4071 unsigned int len = skb->len - skb->data_len; 4060 unsigned int len = skb->len - skb->data_len;
4072 unsigned long irq_flags;
4073 unsigned int nr_frags; 4061 unsigned int nr_frags;
4074 unsigned int mss; 4062 unsigned int mss;
4075 int count = 0; 4063 int count = 0;
@@ -4138,18 +4126,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4138 if (adapter->hw.mac.tx_pkt_filtering) 4126 if (adapter->hw.mac.tx_pkt_filtering)
4139 e1000_transfer_dhcp_info(adapter, skb); 4127 e1000_transfer_dhcp_info(adapter, skb);
4140 4128
4141 if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
4142 /* Collision - tell upper layer to requeue */
4143 return NETDEV_TX_LOCKED;
4144
4145 /* 4129 /*
4146 * need: count + 2 desc gap to keep tail from touching 4130 * need: count + 2 desc gap to keep tail from touching
4147 * head, otherwise try next time 4131 * head, otherwise try next time
4148 */ 4132 */
4149 if (e1000_maybe_stop_tx(netdev, count + 2)) { 4133 if (e1000_maybe_stop_tx(netdev, count + 2))
4150 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4151 return NETDEV_TX_BUSY; 4134 return NETDEV_TX_BUSY;
4152 }
4153 4135
4154 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4136 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4155 tx_flags |= E1000_TX_FLAGS_VLAN; 4137 tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4143,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4161 tso = e1000_tso(adapter, skb); 4143 tso = e1000_tso(adapter, skb);
4162 if (tso < 0) { 4144 if (tso < 0) {
4163 dev_kfree_skb_any(skb); 4145 dev_kfree_skb_any(skb);
4164 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4165 return NETDEV_TX_OK; 4146 return NETDEV_TX_OK;
4166 } 4147 }
4167 4148
@@ -4182,7 +4163,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4182 if (count < 0) { 4163 if (count < 0) {
4183 /* handle pci_map_single() error in e1000_tx_map */ 4164 /* handle pci_map_single() error in e1000_tx_map */
4184 dev_kfree_skb_any(skb); 4165 dev_kfree_skb_any(skb);
4185 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4186 return NETDEV_TX_OK; 4166 return NETDEV_TX_OK;
4187 } 4167 }
4188 4168
@@ -4193,7 +4173,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4193 /* Make sure there is space in the ring for the next send. */ 4173 /* Make sure there is space in the ring for the next send. */
4194 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 4174 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4195 4175
4196 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4197 return NETDEV_TX_OK; 4176 return NETDEV_TX_OK;
4198} 4177}
4199 4178
@@ -4543,6 +4522,14 @@ static int e1000_resume(struct pci_dev *pdev)
4543 return err; 4522 return err;
4544 } 4523 }
4545 4524
4525 /* AER (Advanced Error Reporting) hooks */
4526 err = pci_enable_pcie_error_reporting(pdev);
4527 if (err) {
4528 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4529 "0x%x\n", err);
4530 /* non-fatal, continue */
4531 }
4532
4546 pci_set_master(pdev); 4533 pci_set_master(pdev);
4547 4534
4548 pci_enable_wake(pdev, PCI_D3hot, 0); 4535 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4637,24 +4624,29 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4637 struct e1000_adapter *adapter = netdev_priv(netdev); 4624 struct e1000_adapter *adapter = netdev_priv(netdev);
4638 struct e1000_hw *hw = &adapter->hw; 4625 struct e1000_hw *hw = &adapter->hw;
4639 int err; 4626 int err;
4627 pci_ers_result_t result;
4640 4628
4641 e1000e_disable_l1aspm(pdev); 4629 e1000e_disable_l1aspm(pdev);
4642 err = pci_enable_device_mem(pdev); 4630 err = pci_enable_device_mem(pdev);
4643 if (err) { 4631 if (err) {
4644 dev_err(&pdev->dev, 4632 dev_err(&pdev->dev,
4645 "Cannot re-enable PCI device after reset.\n"); 4633 "Cannot re-enable PCI device after reset.\n");
4646 return PCI_ERS_RESULT_DISCONNECT; 4634 result = PCI_ERS_RESULT_DISCONNECT;
4647 } 4635 } else {
4648 pci_set_master(pdev); 4636 pci_set_master(pdev);
4649 pci_restore_state(pdev); 4637 pci_restore_state(pdev);
4650 4638
4651 pci_enable_wake(pdev, PCI_D3hot, 0); 4639 pci_enable_wake(pdev, PCI_D3hot, 0);
4652 pci_enable_wake(pdev, PCI_D3cold, 0); 4640 pci_enable_wake(pdev, PCI_D3cold, 0);
4653 4641
4654 e1000e_reset(adapter); 4642 e1000e_reset(adapter);
4655 ew32(WUS, ~0); 4643 ew32(WUS, ~0);
4644 result = PCI_ERS_RESULT_RECOVERED;
4645 }
4646
4647 pci_cleanup_aer_uncorrect_error_status(pdev);
4656 4648
4657 return PCI_ERS_RESULT_RECOVERED; 4649 return result;
4658} 4650}
4659 4651
4660/** 4652/**
@@ -4922,12 +4914,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4922 if (pci_using_dac) 4914 if (pci_using_dac)
4923 netdev->features |= NETIF_F_HIGHDMA; 4915 netdev->features |= NETIF_F_HIGHDMA;
4924 4916
4925 /*
4926 * We should not be using LLTX anymore, but we are still Tx faster with
4927 * it.
4928 */
4929 netdev->features |= NETIF_F_LLTX;
4930
4931 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 4917 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4932 adapter->flags |= FLAG_MNG_PT_ENABLED; 4918 adapter->flags |= FLAG_MNG_PT_ENABLED;
4933 4919
@@ -5091,6 +5077,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5091{ 5077{
5092 struct net_device *netdev = pci_get_drvdata(pdev); 5078 struct net_device *netdev = pci_get_drvdata(pdev);
5093 struct e1000_adapter *adapter = netdev_priv(netdev); 5079 struct e1000_adapter *adapter = netdev_priv(netdev);
5080 int err;
5094 5081
5095 /* 5082 /*
5096 * flush_scheduled work may reschedule our watchdog task, so 5083 * flush_scheduled work may reschedule our watchdog task, so
@@ -5125,6 +5112,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5125 5112
5126 free_netdev(netdev); 5113 free_netdev(netdev);
5127 5114
5115 /* AER disable */
5116 err = pci_disable_pcie_error_reporting(pdev);
5117 if (err)
5118 dev_err(&pdev->dev,
5119 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5120
5128 pci_disable_device(pdev); 5121 pci_disable_device(pdev);
5129} 5122}
5130 5123
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6271b9411cc..656cf1b8d32 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0096" 43#define DRV_VERSION "EHEA_0098"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index dfe92264e82..958dacbb497 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -308,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
308 308
309 memset(stats, 0, sizeof(*stats)); 309 memset(stats, 0, sizeof(*stats));
310 310
311 cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 311 cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
312 if (!cb2) { 312 if (!cb2) {
313 ehea_error("no mem for cb2"); 313 ehea_error("no mem for cb2");
314 goto out; 314 goto out;
@@ -341,7 +341,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
341 stats->rx_packets = rx_packets; 341 stats->rx_packets = rx_packets;
342 342
343out_herr: 343out_herr:
344 kfree(cb2); 344 free_page((unsigned long)cb2);
345out: 345out:
346 return stats; 346 return stats;
347} 347}
@@ -370,8 +370,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
370 EHEA_L_PKT_SIZE); 370 EHEA_L_PKT_SIZE);
371 if (!skb_arr_rq1[index]) { 371 if (!skb_arr_rq1[index]) {
372 pr->rq1_skba.os_skbs = fill_wqes - i; 372 pr->rq1_skba.os_skbs = fill_wqes - i;
373 ehea_error("%s: no mem for skb/%d wqes filled",
374 dev->name, i);
375 break; 373 break;
376 } 374 }
377 } 375 }
@@ -387,26 +385,19 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
387 ehea_update_rq1a(pr->qp, adder); 385 ehea_update_rq1a(pr->qp, adder);
388} 386}
389 387
390static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) 388static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
391{ 389{
392 int ret = 0;
393 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 390 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
394 struct net_device *dev = pr->port->netdev; 391 struct net_device *dev = pr->port->netdev;
395 int i; 392 int i;
396 393
397 for (i = 0; i < pr->rq1_skba.len; i++) { 394 for (i = 0; i < pr->rq1_skba.len; i++) {
398 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 395 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
399 if (!skb_arr_rq1[i]) { 396 if (!skb_arr_rq1[i])
400 ehea_error("%s: no mem for skb/%d wqes filled", 397 break;
401 dev->name, i);
402 ret = -ENOMEM;
403 goto out;
404 }
405 } 398 }
406 /* Ring doorbell */ 399 /* Ring doorbell */
407 ehea_update_rq1a(pr->qp, nr_rq1a); 400 ehea_update_rq1a(pr->qp, nr_rq1a);
408out:
409 return ret;
410} 401}
411 402
412static int ehea_refill_rq_def(struct ehea_port_res *pr, 403static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -435,10 +426,12 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
435 u64 tmp_addr; 426 u64 tmp_addr;
436 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 427 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
437 if (!skb) { 428 if (!skb) {
438 ehea_error("%s: no mem for skb/%d wqes filled",
439 pr->port->netdev->name, i);
440 q_skba->os_skbs = fill_wqes - i; 429 q_skba->os_skbs = fill_wqes - i;
441 ret = -ENOMEM; 430 if (q_skba->os_skbs == q_skba->len - 2) {
431 ehea_info("%s: rq%i ran dry - no mem for skb",
432 pr->port->netdev->name, rq_nr);
433 ret = -ENOMEM;
434 }
442 break; 435 break;
443 } 436 }
444 skb_reserve(skb, NET_IP_ALIGN); 437 skb_reserve(skb, NET_IP_ALIGN);
@@ -830,7 +823,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
830 while ((rx != budget) || force_irq) { 823 while ((rx != budget) || force_irq) {
831 pr->poll_counter = 0; 824 pr->poll_counter = 0;
832 force_irq = 0; 825 force_irq = 0;
833 netif_rx_complete(napi); 826 napi_complete(napi);
834 ehea_reset_cq_ep(pr->recv_cq); 827 ehea_reset_cq_ep(pr->recv_cq);
835 ehea_reset_cq_ep(pr->send_cq); 828 ehea_reset_cq_ep(pr->send_cq);
836 ehea_reset_cq_n1(pr->recv_cq); 829 ehea_reset_cq_n1(pr->recv_cq);
@@ -841,7 +834,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
841 if (!cqe && !cqe_skb) 834 if (!cqe && !cqe_skb)
842 return rx; 835 return rx;
843 836
844 if (!netif_rx_reschedule(napi)) 837 if (!napi_reschedule(napi))
845 return rx; 838 return rx;
846 839
847 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 840 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
@@ -859,7 +852,7 @@ static void ehea_netpoll(struct net_device *dev)
859 int i; 852 int i;
860 853
861 for (i = 0; i < port->num_def_qps; i++) 854 for (i = 0; i < port->num_def_qps; i++)
862 netif_rx_schedule(&port->port_res[i].napi); 855 napi_schedule(&port->port_res[i].napi);
863} 856}
864#endif 857#endif
865 858
@@ -867,7 +860,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
867{ 860{
868 struct ehea_port_res *pr = param; 861 struct ehea_port_res *pr = param;
869 862
870 netif_rx_schedule(&pr->napi); 863 napi_schedule(&pr->napi);
871 864
872 return IRQ_HANDLED; 865 return IRQ_HANDLED;
873} 866}
@@ -915,7 +908,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
915 struct hcp_ehea_port_cb0 *cb0; 908 struct hcp_ehea_port_cb0 *cb0;
916 909
917 /* may be called via ehea_neq_tasklet() */ 910 /* may be called via ehea_neq_tasklet() */
918 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 911 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
919 if (!cb0) { 912 if (!cb0) {
920 ehea_error("no mem for cb0"); 913 ehea_error("no mem for cb0");
921 ret = -ENOMEM; 914 ret = -ENOMEM;
@@ -996,7 +989,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
996out_free: 989out_free:
997 if (ret || netif_msg_probe(port)) 990 if (ret || netif_msg_probe(port))
998 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); 991 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
999 kfree(cb0); 992 free_page((unsigned long)cb0);
1000out: 993out:
1001 return ret; 994 return ret;
1002} 995}
@@ -1007,7 +1000,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1007 u64 hret; 1000 u64 hret;
1008 int ret = 0; 1001 int ret = 0;
1009 1002
1010 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1003 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1011 if (!cb4) { 1004 if (!cb4) {
1012 ehea_error("no mem for cb4"); 1005 ehea_error("no mem for cb4");
1013 ret = -ENOMEM; 1006 ret = -ENOMEM;
@@ -1075,7 +1068,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1075 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1068 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1076 netif_carrier_on(port->netdev); 1069 netif_carrier_on(port->netdev);
1077 1070
1078 kfree(cb4); 1071 free_page((unsigned long)cb4);
1079out: 1072out:
1080 return ret; 1073 return ret;
1081} 1074}
@@ -1201,11 +1194,11 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
1201 int ret; 1194 int ret;
1202 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1195 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1203 1196
1204 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1197 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1205 - init_attr->act_nr_rwqes_rq2 1198 - init_attr->act_nr_rwqes_rq2
1206 - init_attr->act_nr_rwqes_rq3 - 1); 1199 - init_attr->act_nr_rwqes_rq3 - 1);
1207 1200
1208 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1201 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1209 1202
1210 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); 1203 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1211 1204
@@ -1302,7 +1295,7 @@ static int ehea_configure_port(struct ehea_port *port)
1302 struct hcp_ehea_port_cb0 *cb0; 1295 struct hcp_ehea_port_cb0 *cb0;
1303 1296
1304 ret = -ENOMEM; 1297 ret = -ENOMEM;
1305 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1298 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1306 if (!cb0) 1299 if (!cb0)
1307 goto out; 1300 goto out;
1308 1301
@@ -1338,7 +1331,7 @@ static int ehea_configure_port(struct ehea_port *port)
1338 ret = 0; 1331 ret = 0;
1339 1332
1340out_free: 1333out_free:
1341 kfree(cb0); 1334 free_page((unsigned long)cb0);
1342out: 1335out:
1343 return ret; 1336 return ret;
1344} 1337}
@@ -1748,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1748 goto out; 1741 goto out;
1749 } 1742 }
1750 1743
1751 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1744 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1752 if (!cb0) { 1745 if (!cb0) {
1753 ehea_error("no mem for cb0"); 1746 ehea_error("no mem for cb0");
1754 ret = -ENOMEM; 1747 ret = -ENOMEM;
@@ -1793,7 +1786,7 @@ out_upregs:
1793 ehea_update_bcmc_registrations(); 1786 ehea_update_bcmc_registrations();
1794 spin_unlock(&ehea_bcmc_regs.lock); 1787 spin_unlock(&ehea_bcmc_regs.lock);
1795out_free: 1788out_free:
1796 kfree(cb0); 1789 free_page((unsigned long)cb0);
1797out: 1790out:
1798 return ret; 1791 return ret;
1799} 1792}
@@ -1817,7 +1810,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1817 if ((enable && port->promisc) || (!enable && !port->promisc)) 1810 if ((enable && port->promisc) || (!enable && !port->promisc))
1818 return; 1811 return;
1819 1812
1820 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 1813 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1821 if (!cb7) { 1814 if (!cb7) {
1822 ehea_error("no mem for cb7"); 1815 ehea_error("no mem for cb7");
1823 goto out; 1816 goto out;
@@ -1836,7 +1829,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1836 1829
1837 port->promisc = enable; 1830 port->promisc = enable;
1838out: 1831out:
1839 kfree(cb7); 1832 free_page((unsigned long)cb7);
1840 return; 1833 return;
1841} 1834}
1842 1835
@@ -2217,7 +2210,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2217 2210
2218 port->vgrp = grp; 2211 port->vgrp = grp;
2219 2212
2220 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2213 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2221 if (!cb1) { 2214 if (!cb1) {
2222 ehea_error("no mem for cb1"); 2215 ehea_error("no mem for cb1");
2223 goto out; 2216 goto out;
@@ -2228,7 +2221,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2228 if (hret != H_SUCCESS) 2221 if (hret != H_SUCCESS)
2229 ehea_error("modify_ehea_port failed"); 2222 ehea_error("modify_ehea_port failed");
2230 2223
2231 kfree(cb1); 2224 free_page((unsigned long)cb1);
2232out: 2225out:
2233 return; 2226 return;
2234} 2227}
@@ -2241,7 +2234,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2241 int index; 2234 int index;
2242 u64 hret; 2235 u64 hret;
2243 2236
2244 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2237 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2245 if (!cb1) { 2238 if (!cb1) {
2246 ehea_error("no mem for cb1"); 2239 ehea_error("no mem for cb1");
2247 goto out; 2240 goto out;
@@ -2262,7 +2255,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2262 if (hret != H_SUCCESS) 2255 if (hret != H_SUCCESS)
2263 ehea_error("modify_ehea_port failed"); 2256 ehea_error("modify_ehea_port failed");
2264out: 2257out:
2265 kfree(cb1); 2258 free_page((unsigned long)cb1);
2266 return; 2259 return;
2267} 2260}
2268 2261
@@ -2276,7 +2269,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2276 2269
2277 vlan_group_set_device(port->vgrp, vid, NULL); 2270 vlan_group_set_device(port->vgrp, vid, NULL);
2278 2271
2279 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2272 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2280 if (!cb1) { 2273 if (!cb1) {
2281 ehea_error("no mem for cb1"); 2274 ehea_error("no mem for cb1");
2282 goto out; 2275 goto out;
@@ -2297,7 +2290,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2297 if (hret != H_SUCCESS) 2290 if (hret != H_SUCCESS)
2298 ehea_error("modify_ehea_port failed"); 2291 ehea_error("modify_ehea_port failed");
2299out: 2292out:
2300 kfree(cb1); 2293 free_page((unsigned long)cb1);
2301 return; 2294 return;
2302} 2295}
2303 2296
@@ -2309,7 +2302,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2309 u64 dummy64 = 0; 2302 u64 dummy64 = 0;
2310 struct hcp_modify_qp_cb0 *cb0; 2303 struct hcp_modify_qp_cb0 *cb0;
2311 2304
2312 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2305 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2313 if (!cb0) { 2306 if (!cb0) {
2314 ret = -ENOMEM; 2307 ret = -ENOMEM;
2315 goto out; 2308 goto out;
@@ -2372,7 +2365,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2372 2365
2373 ret = 0; 2366 ret = 0;
2374out: 2367out:
2375 kfree(cb0); 2368 free_page((unsigned long)cb0);
2376 return ret; 2369 return ret;
2377} 2370}
2378 2371
@@ -2664,7 +2657,7 @@ int ehea_stop_qps(struct net_device *dev)
2664 u64 dummy64 = 0; 2657 u64 dummy64 = 0;
2665 u16 dummy16 = 0; 2658 u16 dummy16 = 0;
2666 2659
2667 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2660 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2668 if (!cb0) { 2661 if (!cb0) {
2669 ret = -ENOMEM; 2662 ret = -ENOMEM;
2670 goto out; 2663 goto out;
@@ -2716,7 +2709,7 @@ int ehea_stop_qps(struct net_device *dev)
2716 2709
2717 ret = 0; 2710 ret = 0;
2718out: 2711out:
2719 kfree(cb0); 2712 free_page((unsigned long)cb0);
2720 2713
2721 return ret; 2714 return ret;
2722} 2715}
@@ -2766,7 +2759,7 @@ int ehea_restart_qps(struct net_device *dev)
2766 u64 dummy64 = 0; 2759 u64 dummy64 = 0;
2767 u16 dummy16 = 0; 2760 u16 dummy16 = 0;
2768 2761
2769 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2762 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2770 if (!cb0) { 2763 if (!cb0) {
2771 ret = -ENOMEM; 2764 ret = -ENOMEM;
2772 goto out; 2765 goto out;
@@ -2819,7 +2812,7 @@ int ehea_restart_qps(struct net_device *dev)
2819 ehea_refill_rq3(pr, 0); 2812 ehea_refill_rq3(pr, 0);
2820 } 2813 }
2821out: 2814out:
2822 kfree(cb0); 2815 free_page((unsigned long)cb0);
2823 2816
2824 return ret; 2817 return ret;
2825} 2818}
@@ -2950,7 +2943,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2950 u64 hret; 2943 u64 hret;
2951 int ret; 2944 int ret;
2952 2945
2953 cb = kzalloc(PAGE_SIZE, GFP_KERNEL); 2946 cb = (void *)get_zeroed_page(GFP_KERNEL);
2954 if (!cb) { 2947 if (!cb) {
2955 ret = -ENOMEM; 2948 ret = -ENOMEM;
2956 goto out; 2949 goto out;
@@ -2967,7 +2960,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2967 ret = 0; 2960 ret = 0;
2968 2961
2969out_herr: 2962out_herr:
2970 kfree(cb); 2963 free_page((unsigned long)cb);
2971out: 2964out:
2972 return ret; 2965 return ret;
2973} 2966}
@@ -2981,7 +2974,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2981 *jumbo = 0; 2974 *jumbo = 0;
2982 2975
2983 /* (Try to) enable *jumbo frames */ 2976 /* (Try to) enable *jumbo frames */
2984 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2977 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2985 if (!cb4) { 2978 if (!cb4) {
2986 ehea_error("no mem for cb4"); 2979 ehea_error("no mem for cb4");
2987 ret = -ENOMEM; 2980 ret = -ENOMEM;
@@ -3009,7 +3002,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3009 } else 3002 } else
3010 ret = -EINVAL; 3003 ret = -EINVAL;
3011 3004
3012 kfree(cb4); 3005 free_page((unsigned long)cb4);
3013 } 3006 }
3014out: 3007out:
3015 return ret; 3008 return ret;
@@ -3040,7 +3033,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
3040 port->ofdev.dev.parent = &port->adapter->ofdev->dev; 3033 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3041 port->ofdev.dev.bus = &ibmebus_bus_type; 3034 port->ofdev.dev.bus = &ibmebus_bus_type;
3042 3035
3043 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++); 3036 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3044 port->ofdev.dev.release = logical_port_release; 3037 port->ofdev.dev.release = logical_port_release;
3045 3038
3046 ret = of_device_register(&port->ofdev); 3039 ret = of_device_register(&port->ofdev);
@@ -3069,6 +3062,22 @@ static void ehea_unregister_port(struct ehea_port *port)
3069 of_device_unregister(&port->ofdev); 3062 of_device_unregister(&port->ofdev);
3070} 3063}
3071 3064
3065static const struct net_device_ops ehea_netdev_ops = {
3066 .ndo_open = ehea_open,
3067 .ndo_stop = ehea_stop,
3068 .ndo_start_xmit = ehea_start_xmit,
3069#ifdef CONFIG_NET_POLL_CONTROLLER
3070 .ndo_poll_controller = ehea_netpoll,
3071#endif
3072 .ndo_get_stats = ehea_get_stats,
3073 .ndo_set_mac_address = ehea_set_mac_addr,
3074 .ndo_set_multicast_list = ehea_set_multicast_list,
3075 .ndo_change_mtu = ehea_change_mtu,
3076 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3077 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3078 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid
3079};
3080
3072struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3081struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3073 u32 logical_port_id, 3082 u32 logical_port_id,
3074 struct device_node *dn) 3083 struct device_node *dn)
@@ -3121,19 +3130,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3121 /* initialize net_device structure */ 3130 /* initialize net_device structure */
3122 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 3131 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3123 3132
3124 dev->open = ehea_open; 3133 dev->netdev_ops = &ehea_netdev_ops;
3125#ifdef CONFIG_NET_POLL_CONTROLLER 3134 ehea_set_ethtool_ops(dev);
3126 dev->poll_controller = ehea_netpoll; 3135
3127#endif
3128 dev->stop = ehea_stop;
3129 dev->hard_start_xmit = ehea_start_xmit;
3130 dev->get_stats = ehea_get_stats;
3131 dev->set_multicast_list = ehea_set_multicast_list;
3132 dev->set_mac_address = ehea_set_mac_addr;
3133 dev->change_mtu = ehea_change_mtu;
3134 dev->vlan_rx_register = ehea_vlan_rx_register;
3135 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
3136 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
3137 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3136 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3138 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3137 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3139 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3138 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
@@ -3142,7 +3141,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3142 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3141 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3143 3142
3144 INIT_WORK(&port->reset_task, ehea_reset_port); 3143 INIT_WORK(&port->reset_task, ehea_reset_port);
3145 ehea_set_ethtool_ops(dev);
3146 3144
3147 ret = register_netdev(dev); 3145 ret = register_netdev(dev);
3148 if (ret) { 3146 if (ret) {
@@ -3450,6 +3448,7 @@ out_kill_eq:
3450 ehea_destroy_eq(adapter->neq); 3448 ehea_destroy_eq(adapter->neq);
3451 3449
3452out_free_ad: 3450out_free_ad:
3451 list_del(&adapter->list);
3453 kfree(adapter); 3452 kfree(adapter);
3454 3453
3455out: 3454out:
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 49d766ebbcf..3747457f5e6 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -1005,7 +1005,7 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
1005 unsigned long ret; 1005 unsigned long ret;
1006 u64 *rblock; 1006 u64 *rblock;
1007 1007
1008 rblock = kzalloc(PAGE_SIZE, GFP_KERNEL); 1008 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1009 if (!rblock) { 1009 if (!rblock) {
1010 ehea_error("Cannot allocate rblock memory."); 1010 ehea_error("Cannot allocate rblock memory.");
1011 return; 1011 return;
@@ -1022,5 +1022,5 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
1022 else 1022 else
1023 ehea_error("Error data could not be fetched: %llX", res_handle); 1023 ehea_error("Error data could not be fetched: %llX", res_handle);
1024 1024
1025 kfree(rblock); 1025 free_page((unsigned long)rblock);
1026} 1026}
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a832cc5d6a1..c26cea0b300 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
36#define DRV_VERSION "1.0.0.648" 36#define DRV_VERSION "1.0.0.933"
37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
38#define PFX DRV_NAME ": " 38#define PFX DRV_NAME ": "
39 39
@@ -97,6 +97,7 @@ struct enic {
97 ____cacheline_aligned struct vnic_rq rq[1]; 97 ____cacheline_aligned struct vnic_rq rq[1];
98 unsigned int rq_count; 98 unsigned int rq_count;
99 int (*rq_alloc_buf)(struct vnic_rq *rq); 99 int (*rq_alloc_buf)(struct vnic_rq *rq);
100 u64 rq_bad_fcs;
100 struct napi_struct napi; 101 struct napi_struct napi;
101 struct net_lro_mgr lro_mgr; 102 struct net_lro_mgr lro_mgr;
102 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC]; 103 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 7d60551d538..03403a51f7e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -400,10 +400,13 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
400 return IRQ_NONE; /* not our interrupt */ 400 return IRQ_NONE; /* not our interrupt */
401 } 401 }
402 402
403 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) 403 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) {
404 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
404 enic_notify_check(enic); 405 enic_notify_check(enic);
406 }
405 407
406 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { 408 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
409 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
407 enic_log_q_error(enic); 410 enic_log_q_error(enic);
408 /* schedule recovery from WQ/RQ error */ 411 /* schedule recovery from WQ/RQ error */
409 schedule_work(&enic->reset); 412 schedule_work(&enic->reset);
@@ -411,8 +414,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
411 } 414 }
412 415
413 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { 416 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
414 if (netif_rx_schedule_prep(&enic->napi)) 417 if (napi_schedule_prep(&enic->napi))
415 __netif_rx_schedule(&enic->napi); 418 __napi_schedule(&enic->napi);
416 } else { 419 } else {
417 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 420 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
418 } 421 }
@@ -440,7 +443,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
440 * writes). 443 * writes).
441 */ 444 */
442 445
443 netif_rx_schedule(&enic->napi); 446 napi_schedule(&enic->napi);
444 447
445 return IRQ_HANDLED; 448 return IRQ_HANDLED;
446} 449}
@@ -450,7 +453,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
450 struct enic *enic = data; 453 struct enic *enic = data;
451 454
452 /* schedule NAPI polling for RQ cleanup */ 455 /* schedule NAPI polling for RQ cleanup */
453 netif_rx_schedule(&enic->napi); 456 napi_schedule(&enic->napi);
454 457
455 return IRQ_HANDLED; 458 return IRQ_HANDLED;
456} 459}
@@ -476,6 +479,8 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data)
476{ 479{
477 struct enic *enic = data; 480 struct enic *enic = data;
478 481
482 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]);
483
479 enic_log_q_error(enic); 484 enic_log_q_error(enic);
480 485
481 /* schedule recovery from WQ/RQ error */ 486 /* schedule recovery from WQ/RQ error */
@@ -488,8 +493,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
488{ 493{
489 struct enic *enic = data; 494 struct enic *enic = data;
490 495
496 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]);
491 enic_notify_check(enic); 497 enic_notify_check(enic);
492 vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
493 498
494 return IRQ_HANDLED; 499 return IRQ_HANDLED;
495} 500}
@@ -570,11 +575,11 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
570 * to each TCP segment resulting from the TSO. 575 * to each TCP segment resulting from the TSO.
571 */ 576 */
572 577
573 if (skb->protocol == __constant_htons(ETH_P_IP)) { 578 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
574 ip_hdr(skb)->check = 0; 579 ip_hdr(skb)->check = 0;
575 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 580 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
576 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 581 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
577 } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { 582 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
578 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 583 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
579 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 584 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
580 } 585 }
@@ -616,7 +621,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
616 vlan_tag_insert, vlan_tag); 621 vlan_tag_insert, vlan_tag);
617} 622}
618 623
619/* netif_tx_lock held, process context with BHs disabled */ 624/* netif_tx_lock held, process context with BHs disabled, or BH */
620static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) 625static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
621{ 626{
622 struct enic *enic = netdev_priv(netdev); 627 struct enic *enic = netdev_priv(netdev);
@@ -683,7 +688,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
683 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 688 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
684 net_stats->rx_errors = stats->rx.rx_errors; 689 net_stats->rx_errors = stats->rx.rx_errors;
685 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 690 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
686 net_stats->rx_crc_errors = stats->rx.rx_crc_errors; 691 net_stats->rx_crc_errors = enic->rq_bad_fcs;
687 net_stats->rx_dropped = stats->rx.rx_no_bufs; 692 net_stats->rx_dropped = stats->rx.rx_no_bufs;
688 693
689 return net_stats; 694 return net_stats;
@@ -928,12 +933,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
928 933
929 if (packet_error) { 934 if (packet_error) {
930 935
931 if (bytes_written > 0 && !fcs_ok) { 936 if (bytes_written > 0 && !fcs_ok)
932 if (net_ratelimit()) 937 enic->rq_bad_fcs++;
933 printk(KERN_ERR PFX
934 "%s: packet error: bad FCS\n",
935 netdev->name);
936 }
937 938
938 dev_kfree_skb_any(skb); 939 dev_kfree_skb_any(skb);
939 940
@@ -1068,8 +1069,8 @@ static int enic_poll(struct napi_struct *napi, int budget)
1068 if (netdev->features & NETIF_F_LRO) 1069 if (netdev->features & NETIF_F_LRO)
1069 lro_flush_all(&enic->lro_mgr); 1070 lro_flush_all(&enic->lro_mgr);
1070 1071
1071 netif_rx_complete(napi); 1072 napi_complete(napi);
1072 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1073 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
1073 } 1074 }
1074 1075
1075 return rq_work_done; 1076 return rq_work_done;
@@ -1095,9 +1096,9 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1095 1096
1096 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1097 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1097 1098
1098 /* Accumulate intr event credits for this polling 1099 /* Return intr event credits for this polling
1099 * cycle. An intr event is the completion of a 1100 * cycle. An intr event is the completion of a
1100 * a WQ or RQ packet. 1101 * RQ packet.
1101 */ 1102 */
1102 1103
1103 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1104 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
@@ -1112,7 +1113,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1112 if (netdev->features & NETIF_F_LRO) 1113 if (netdev->features & NETIF_F_LRO)
1113 lro_flush_all(&enic->lro_mgr); 1114 lro_flush_all(&enic->lro_mgr);
1114 1115
1115 netif_rx_complete(napi); 1116 napi_complete(napi);
1116 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1117 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1117 } 1118 }
1118 1119
@@ -1461,6 +1462,26 @@ static int enic_dev_soft_reset(struct enic *enic)
1461 return err; 1462 return err;
1462} 1463}
1463 1464
1465static int enic_set_niccfg(struct enic *enic)
1466{
1467 const u8 rss_default_cpu = 0;
1468 const u8 rss_hash_type = 0;
1469 const u8 rss_hash_bits = 0;
1470 const u8 rss_base_cpu = 0;
1471 const u8 rss_enable = 0;
1472 const u8 tso_ipid_split_en = 0;
1473 const u8 ig_vlan_strip_en = 1;
1474
1475 /* Enable VLAN tag stripping. RSS not enabled (yet).
1476 */
1477
1478 return enic_set_nic_cfg(enic,
1479 rss_default_cpu, rss_hash_type,
1480 rss_hash_bits, rss_base_cpu,
1481 rss_enable, tso_ipid_split_en,
1482 ig_vlan_strip_en);
1483}
1484
1464static void enic_reset(struct work_struct *work) 1485static void enic_reset(struct work_struct *work)
1465{ 1486{
1466 struct enic *enic = container_of(work, struct enic, reset); 1487 struct enic *enic = container_of(work, struct enic, reset);
@@ -1476,8 +1497,10 @@ static void enic_reset(struct work_struct *work)
1476 1497
1477 enic_stop(enic->netdev); 1498 enic_stop(enic->netdev);
1478 enic_dev_soft_reset(enic); 1499 enic_dev_soft_reset(enic);
1500 vnic_dev_init(enic->vdev, 0);
1479 enic_reset_mcaddrs(enic); 1501 enic_reset_mcaddrs(enic);
1480 enic_init_vnic_resources(enic); 1502 enic_init_vnic_resources(enic);
1503 enic_set_niccfg(enic);
1481 enic_open(enic->netdev); 1504 enic_open(enic->netdev);
1482 1505
1483 rtnl_unlock(); 1506 rtnl_unlock();
@@ -1620,14 +1643,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1620 unsigned int i; 1643 unsigned int i;
1621 int err; 1644 int err;
1622 1645
1623 const u8 rss_default_cpu = 0;
1624 const u8 rss_hash_type = 0;
1625 const u8 rss_hash_bits = 0;
1626 const u8 rss_base_cpu = 0;
1627 const u8 rss_enable = 0;
1628 const u8 tso_ipid_split_en = 0;
1629 const u8 ig_vlan_strip_en = 1;
1630
1631 /* Allocate net device structure and initialize. Private 1646 /* Allocate net device structure and initialize. Private
1632 * instance data is initialized to zero. 1647 * instance data is initialized to zero.
1633 */ 1648 */
@@ -1793,14 +1808,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1793 1808
1794 enic_init_vnic_resources(enic); 1809 enic_init_vnic_resources(enic);
1795 1810
1796 /* Enable VLAN tag stripping. RSS not enabled (yet). 1811 err = enic_set_niccfg(enic);
1797 */
1798
1799 err = enic_set_nic_cfg(enic,
1800 rss_default_cpu, rss_hash_type,
1801 rss_hash_bits, rss_base_cpu,
1802 rss_enable, tso_ipid_split_en,
1803 ig_vlan_strip_en);
1804 if (err) { 1812 if (err) {
1805 printk(KERN_ERR PFX 1813 printk(KERN_ERR PFX
1806 "Failed to config nic, aborting.\n"); 1814 "Failed to config nic, aborting.\n");
@@ -1858,7 +1866,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1858 if (using_dac) 1866 if (using_dac)
1859 netdev->features |= NETIF_F_HIGHDMA; 1867 netdev->features |= NETIF_F_HIGHDMA;
1860 1868
1861
1862 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); 1869 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
1863 1870
1864 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; 1871 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
@@ -1870,7 +1877,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1870 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; 1877 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
1871 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1878 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1872 1879
1873
1874 err = register_netdev(netdev); 1880 err = register_netdev(netdev);
1875 if (err) { 1881 if (err) {
1876 printk(KERN_ERR PFX 1882 printk(KERN_ERR PFX
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 11708579b6c..e21b9d636ae 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -34,6 +34,9 @@ struct vnic_res {
34 unsigned int count; 34 unsigned int count;
35}; 35};
36 36
37#define VNIC_DEV_CAP_INIT 0x0001
38#define VNIC_DEV_CAP_PERBI 0x0002
39
37struct vnic_dev { 40struct vnic_dev {
38 void *priv; 41 void *priv;
39 struct pci_dev *pdev; 42 struct pci_dev *pdev;
@@ -50,6 +53,7 @@ struct vnic_dev {
50 dma_addr_t stats_pa; 53 dma_addr_t stats_pa;
51 struct vnic_devcmd_fw_info *fw_info; 54 struct vnic_devcmd_fw_info *fw_info;
52 dma_addr_t fw_info_pa; 55 dma_addr_t fw_info_pa;
56 u32 cap_flags;
53}; 57};
54 58
55#define VNIC_MAX_RES_HDR_SIZE \ 59#define VNIC_MAX_RES_HDR_SIZE \
@@ -575,9 +579,9 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
575{ 579{
576 u64 a0 = (u32)arg, a1 = 0; 580 u64 a0 = (u32)arg, a1 = 0;
577 int wait = 1000; 581 int wait = 1000;
578 int r = 0; 582 int r = 0;
579 583
580 if (vnic_dev_capable(vdev, CMD_INIT)) 584 if (vdev->cap_flags & VNIC_DEV_CAP_INIT)
581 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 585 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
582 else { 586 else {
583 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 587 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
@@ -587,8 +591,8 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
587 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); 591 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
588 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 592 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
589 } 593 }
590 } 594 }
591 return r; 595 return r;
592} 596}
593 597
594int vnic_dev_link_status(struct vnic_dev *vdev) 598int vnic_dev_link_status(struct vnic_dev *vdev)
@@ -626,6 +630,22 @@ u32 vnic_dev_mtu(struct vnic_dev *vdev)
626 return vdev->notify_copy.mtu; 630 return vdev->notify_copy.mtu;
627} 631}
628 632
633u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
634{
635 if (!vnic_dev_notify_ready(vdev))
636 return 0;
637
638 return vdev->notify_copy.link_down_cnt;
639}
640
641u32 vnic_dev_notify_status(struct vnic_dev *vdev)
642{
643 if (!vnic_dev_notify_ready(vdev))
644 return 0;
645
646 return vdev->notify_copy.status;
647}
648
629void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 649void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
630 enum vnic_dev_intr_mode intr_mode) 650 enum vnic_dev_intr_mode intr_mode)
631{ 651{
@@ -682,6 +702,11 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
682 if (!vdev->devcmd) 702 if (!vdev->devcmd)
683 goto err_out; 703 goto err_out;
684 704
705 vdev->cap_flags = 0;
706
707 if (vnic_dev_capable(vdev, CMD_INIT))
708 vdev->cap_flags |= VNIC_DEV_CAP_INIT;
709
685 return vdev; 710 return vdev;
686 711
687err_out: 712err_out:
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index b9dc1821c80..8aa8db2fd03 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -102,6 +102,8 @@ int vnic_dev_link_status(struct vnic_dev *vdev);
102u32 vnic_dev_port_speed(struct vnic_dev *vdev); 102u32 vnic_dev_port_speed(struct vnic_dev *vdev);
103u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); 103u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
104u32 vnic_dev_mtu(struct vnic_dev *vdev); 104u32 vnic_dev_mtu(struct vnic_dev *vdev);
105u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
106u32 vnic_dev_notify_status(struct vnic_dev *vdev);
105int vnic_dev_close(struct vnic_dev *vdev); 107int vnic_dev_close(struct vnic_dev *vdev);
106int vnic_dev_enable(struct vnic_dev *vdev); 108int vnic_dev_enable(struct vnic_dev *vdev);
107int vnic_dev_disable(struct vnic_dev *vdev); 109int vnic_dev_disable(struct vnic_dev *vdev);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 8062c75154e..2587f34fbfb 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -191,7 +191,7 @@ enum vnic_devcmd_cmd {
191 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), 191 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
192 192
193 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct 193 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
194 * (u8)a1=INT13_CMD_xxx */ 194 * (u32)a1=INT13_CMD_xxx */
195 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), 195 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
196 196
197 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ 197 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
@@ -207,6 +207,11 @@ enum vnic_devcmd_cmd {
207 * in: (u32)a0=cmd 207 * in: (u32)a0=cmd
208 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ 208 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
209 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), 209 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
210
211 /* persistent binding info
212 * in: (u64)a0=paddr of arg
213 * (u32)a1=CMD_PERBI_XXX */
214 CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
210}; 215};
211 216
212/* flags for CMD_OPEN */ 217/* flags for CMD_OPEN */
@@ -259,6 +264,7 @@ struct vnic_devcmd_notify {
259 u32 status; /* status bits (see VNIC_STF_*) */ 264 u32 status; /* status bits (see VNIC_STF_*) */
260 u32 error; /* error code (see ERR_*) for first ERR */ 265 u32 error; /* error code (see ERR_*) for first ERR */
261 u32 link_down_cnt; /* running count of link down transitions */ 266 u32 link_down_cnt; /* running count of link down transitions */
267 u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
262}; 268};
263#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ 269#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
264#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */ 270#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index ce633a5a7e3..9a53604edce 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -76,6 +76,20 @@ static inline void vnic_intr_return_credits(struct vnic_intr *intr,
76 iowrite32(int_credit_return, &intr->ctrl->int_credit_return); 76 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
77} 77}
78 78
79static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
80{
81 return ioread32(&intr->ctrl->int_credits);
82}
83
84static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
85{
86 unsigned int credits = vnic_intr_credits(intr);
87 int unmask = 1;
88 int reset_timer = 1;
89
90 vnic_intr_return_credits(intr, credits, unmask, reset_timer);
91}
92
79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) 93static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
80{ 94{
81 /* read PBA without clearing */ 95 /* read PBA without clearing */
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index a539bc3163c..b60e27dfcfa 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1114,9 +1114,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1114 1114
1115 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { 1115 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1116 spin_lock(&ep->napi_lock); 1116 spin_lock(&ep->napi_lock);
1117 if (netif_rx_schedule_prep(&ep->napi)) { 1117 if (napi_schedule_prep(&ep->napi)) {
1118 epic_napi_irq_off(dev, ep); 1118 epic_napi_irq_off(dev, ep);
1119 __netif_rx_schedule(&ep->napi); 1119 __napi_schedule(&ep->napi);
1120 } else 1120 } else
1121 ep->reschedule_in_poll++; 1121 ep->reschedule_in_poll++;
1122 spin_unlock(&ep->napi_lock); 1122 spin_unlock(&ep->napi_lock);
@@ -1293,7 +1293,7 @@ rx_action:
1293 1293
1294 more = ep->reschedule_in_poll; 1294 more = ep->reschedule_in_poll;
1295 if (!more) { 1295 if (!more) {
1296 __netif_rx_complete(napi); 1296 __napi_complete(napi);
1297 outl(EpicNapiEvent, ioaddr + INTSTAT); 1297 outl(EpicNapiEvent, ioaddr + INTSTAT);
1298 epic_napi_irq_on(dev, ep); 1298 epic_napi_irq_on(dev, ep);
1299 } else 1299 } else
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2769083bfe8..fe2650237e3 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -36,30 +36,43 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/bitops.h> 38#include <linux/bitops.h>
39#include <linux/io.h>
40#include <linux/irq.h>
41#include <linux/clk.h>
42#include <linux/platform_device.h>
39 43
40#include <asm/irq.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43#include <asm/pgtable.h>
44#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
45 45
46#ifndef CONFIG_ARCH_MXC
46#include <asm/coldfire.h> 47#include <asm/coldfire.h>
47#include <asm/mcfsim.h> 48#include <asm/mcfsim.h>
49#endif
50
48#include "fec.h" 51#include "fec.h"
49 52
50#if defined(CONFIG_FEC2) 53#ifdef CONFIG_ARCH_MXC
51#define FEC_MAX_PORTS 2 54#include <mach/hardware.h>
55#define FEC_ALIGNMENT 0xf
52#else 56#else
53#define FEC_MAX_PORTS 1 57#define FEC_ALIGNMENT 0x3
54#endif 58#endif
55 59
60#if defined CONFIG_M5272 || defined CONFIG_M527x || defined CONFIG_M523x \
61 || defined CONFIG_M528x || defined CONFIG_M532x || defined CONFIG_M520x
62#define FEC_LEGACY
63/*
64 * Define the fixed address of the FEC hardware.
65 */
56#if defined(CONFIG_M5272) 66#if defined(CONFIG_M5272)
57#define HAVE_mii_link_interrupt 67#define HAVE_mii_link_interrupt
58#endif 68#endif
59 69
60/* 70#if defined(CONFIG_FEC2)
61 * Define the fixed address of the FEC hardware. 71#define FEC_MAX_PORTS 2
62 */ 72#else
73#define FEC_MAX_PORTS 1
74#endif
75
63static unsigned int fec_hw[] = { 76static unsigned int fec_hw[] = {
64#if defined(CONFIG_M5272) 77#if defined(CONFIG_M5272)
65 (MCF_MBAR + 0x840), 78 (MCF_MBAR + 0x840),
@@ -72,8 +85,6 @@ static unsigned int fec_hw[] = {
72 (MCF_MBAR+0x30000), 85 (MCF_MBAR+0x30000),
73#elif defined(CONFIG_M532x) 86#elif defined(CONFIG_M532x)
74 (MCF_MBAR+0xfc030000), 87 (MCF_MBAR+0xfc030000),
75#else
76 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
77#endif 88#endif
78}; 89};
79 90
@@ -99,6 +110,8 @@ static unsigned char fec_mac_default[] = {
99#define FEC_FLASHMAC 0 110#define FEC_FLASHMAC 0
100#endif 111#endif
101 112
113#endif /* FEC_LEGACY */
114
102/* Forward declarations of some structures to support different PHYs 115/* Forward declarations of some structures to support different PHYs
103*/ 116*/
104 117
@@ -162,7 +175,7 @@ typedef struct {
162 * account when setting it. 175 * account when setting it.
163 */ 176 */
164#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 177#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
165 defined(CONFIG_M520x) || defined(CONFIG_M532x) 178 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
166#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 179#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
167#else 180#else
168#define OPT_FRAME_SIZE 0 181#define OPT_FRAME_SIZE 0
@@ -182,6 +195,8 @@ struct fec_enet_private {
182 195
183 struct net_device *netdev; 196 struct net_device *netdev;
184 197
198 struct clk *clk;
199
185 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 200 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
186 unsigned char *tx_bounce[TX_RING_SIZE]; 201 unsigned char *tx_bounce[TX_RING_SIZE];
187 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 202 struct sk_buff* tx_skbuff[TX_RING_SIZE];
@@ -190,6 +205,7 @@ struct fec_enet_private {
190 205
191 /* CPM dual port RAM relative addresses. 206 /* CPM dual port RAM relative addresses.
192 */ 207 */
208 dma_addr_t bd_dma;
193 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 209 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
194 cbd_t *tx_bd_base; 210 cbd_t *tx_bd_base;
195 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 211 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
@@ -342,10 +358,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 * 4-byte boundaries. Use bounce buffers to copy data 358 * 4-byte boundaries. Use bounce buffers to copy data
343 * and get it aligned. Ugh. 359 * and get it aligned. Ugh.
344 */ 360 */
345 if (bdp->cbd_bufaddr & 0x3) { 361 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
346 unsigned int index; 362 unsigned int index;
347 index = bdp - fep->tx_bd_base; 363 index = bdp - fep->tx_bd_base;
348 memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); 364 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
349 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 365 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
350 } 366 }
351 367
@@ -359,8 +375,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
359 /* Push the data cache so the CPM does not get stale memory 375 /* Push the data cache so the CPM does not get stale memory
360 * data. 376 * data.
361 */ 377 */
362 flush_dcache_range((unsigned long)skb->data, 378 dma_sync_single(NULL, bdp->cbd_bufaddr,
363 (unsigned long)skb->data + skb->len); 379 bdp->cbd_datlen, DMA_TO_DEVICE);
364 380
365 /* Send it on its way. Tell FEC it's ready, interrupt when done, 381 /* Send it on its way. Tell FEC it's ready, interrupt when done,
366 * it's the last BD of the frame, and to put the CRC on the end. 382 * it's the last BD of the frame, and to put the CRC on the end.
@@ -633,6 +649,9 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
633 dev->stats.rx_bytes += pkt_len; 649 dev->stats.rx_bytes += pkt_len;
634 data = (__u8*)__va(bdp->cbd_bufaddr); 650 data = (__u8*)__va(bdp->cbd_bufaddr);
635 651
652 dma_sync_single(NULL, (unsigned long)__pa(data),
653 pkt_len - 4, DMA_FROM_DEVICE);
654
636 /* This does 16 byte alignment, exactly what we need. 655 /* This does 16 byte alignment, exactly what we need.
637 * The packet length includes FCS, but we don't want to 656 * The packet length includes FCS, but we don't want to
638 * include that when passing upstream as it messes up 657 * include that when passing upstream as it messes up
@@ -1114,7 +1133,7 @@ static phy_info_t const phy_info_am79c874 = {
1114/* register definitions for the 8721 */ 1133/* register definitions for the 8721 */
1115 1134
1116#define MII_KS8721BL_RXERCR 21 1135#define MII_KS8721BL_RXERCR 21
1117#define MII_KS8721BL_ICSR 22 1136#define MII_KS8721BL_ICSR 27
1118#define MII_KS8721BL_PHYCR 31 1137#define MII_KS8721BL_PHYCR 31
1119 1138
1120static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1139static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
@@ -1308,10 +1327,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1308 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1327 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1309} 1328}
1310 1329
1311static void __inline__ fec_enable_phy_intr(void)
1312{
1313}
1314
1315static void __inline__ fec_disable_phy_intr(void) 1330static void __inline__ fec_disable_phy_intr(void)
1316{ 1331{
1317 volatile unsigned long *icrp; 1332 volatile unsigned long *icrp;
@@ -1327,17 +1342,6 @@ static void __inline__ fec_phy_ack_intr(void)
1327 *icrp = 0x0d000000; 1342 *icrp = 0x0d000000;
1328} 1343}
1329 1344
1330static void __inline__ fec_localhw_setup(void)
1331{
1332}
1333
1334/*
1335 * Do not need to make region uncached on 5272.
1336 */
1337static void __inline__ fec_uncache(unsigned long addr)
1338{
1339}
1340
1341/* ------------------------------------------------------------------------- */ 1345/* ------------------------------------------------------------------------- */
1342 1346
1343#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) 1347#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
@@ -1477,10 +1481,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1477 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1481 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1478} 1482}
1479 1483
1480static void __inline__ fec_enable_phy_intr(void)
1481{
1482}
1483
1484static void __inline__ fec_disable_phy_intr(void) 1484static void __inline__ fec_disable_phy_intr(void)
1485{ 1485{
1486} 1486}
@@ -1489,17 +1489,6 @@ static void __inline__ fec_phy_ack_intr(void)
1489{ 1489{
1490} 1490}
1491 1491
1492static void __inline__ fec_localhw_setup(void)
1493{
1494}
1495
1496/*
1497 * Do not need to make region uncached on 5272.
1498 */
1499static void __inline__ fec_uncache(unsigned long addr)
1500{
1501}
1502
1503/* ------------------------------------------------------------------------- */ 1492/* ------------------------------------------------------------------------- */
1504 1493
1505#elif defined(CONFIG_M520x) 1494#elif defined(CONFIG_M520x)
@@ -1598,10 +1587,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1598 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1587 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1599} 1588}
1600 1589
1601static void __inline__ fec_enable_phy_intr(void)
1602{
1603}
1604
1605static void __inline__ fec_disable_phy_intr(void) 1590static void __inline__ fec_disable_phy_intr(void)
1606{ 1591{
1607} 1592}
@@ -1610,14 +1595,6 @@ static void __inline__ fec_phy_ack_intr(void)
1610{ 1595{
1611} 1596}
1612 1597
1613static void __inline__ fec_localhw_setup(void)
1614{
1615}
1616
1617static void __inline__ fec_uncache(unsigned long addr)
1618{
1619}
1620
1621/* ------------------------------------------------------------------------- */ 1598/* ------------------------------------------------------------------------- */
1622 1599
1623#elif defined(CONFIG_M532x) 1600#elif defined(CONFIG_M532x)
@@ -1737,10 +1714,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1737 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1714 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1738} 1715}
1739 1716
1740static void __inline__ fec_enable_phy_intr(void)
1741{
1742}
1743
1744static void __inline__ fec_disable_phy_intr(void) 1717static void __inline__ fec_disable_phy_intr(void)
1745{ 1718{
1746} 1719}
@@ -1749,107 +1722,6 @@ static void __inline__ fec_phy_ack_intr(void)
1749{ 1722{
1750} 1723}
1751 1724
1752static void __inline__ fec_localhw_setup(void)
1753{
1754}
1755
1756/*
1757 * Do not need to make region uncached on 532x.
1758 */
1759static void __inline__ fec_uncache(unsigned long addr)
1760{
1761}
1762
1763/* ------------------------------------------------------------------------- */
1764
1765
1766#else
1767
1768/*
1769 * Code specific to the MPC860T setup.
1770 */
1771static void __inline__ fec_request_intrs(struct net_device *dev)
1772{
1773 volatile immap_t *immap;
1774
1775 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */
1776
1777 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
1778 panic("Could not allocate FEC IRQ!");
1779}
1780
1781static void __inline__ fec_get_mac(struct net_device *dev)
1782{
1783 bd_t *bd;
1784
1785 bd = (bd_t *)__res;
1786 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN);
1787}
1788
1789static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
1790{
1791 extern uint _get_IMMR(void);
1792 volatile immap_t *immap;
1793 volatile fec_t *fecp;
1794
1795 fecp = fep->hwp;
1796 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */
1797
1798 /* Configure all of port D for MII.
1799 */
1800 immap->im_ioport.iop_pdpar = 0x1fff;
1801
1802 /* Bits moved from Rev. D onward.
1803 */
1804 if ((_get_IMMR() & 0xffff) < 0x0501)
1805 immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */
1806 else
1807 immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */
1808
1809 /* Set MII speed to 2.5 MHz
1810 */
1811 fecp->fec_mii_speed = fep->phy_speed =
1812 ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e;
1813}
1814
1815static void __inline__ fec_enable_phy_intr(void)
1816{
1817 volatile fec_t *fecp;
1818
1819 fecp = fep->hwp;
1820
1821 /* Enable MII command finished interrupt
1822 */
1823 fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
1824}
1825
1826static void __inline__ fec_disable_phy_intr(void)
1827{
1828}
1829
1830static void __inline__ fec_phy_ack_intr(void)
1831{
1832}
1833
1834static void __inline__ fec_localhw_setup(void)
1835{
1836 volatile fec_t *fecp;
1837
1838 fecp = fep->hwp;
1839 fecp->fec_r_hash = PKT_MAXBUF_SIZE;
1840 /* Enable big endian and don't care about SDMA FC.
1841 */
1842 fecp->fec_fun_code = 0x78000000;
1843}
1844
1845static void __inline__ fec_uncache(unsigned long addr)
1846{
1847 pte_t *pte;
1848 pte = va_to_pte(mem_addr);
1849 pte_val(*pte) |= _PAGE_NO_CACHE;
1850 flush_tlb_page(init_mm.mmap, mem_addr);
1851}
1852
1853#endif 1725#endif
1854 1726
1855/* ------------------------------------------------------------------------- */ 1727/* ------------------------------------------------------------------------- */
@@ -2055,7 +1927,9 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2055 printk("FEC: No PHY device found.\n"); 1927 printk("FEC: No PHY device found.\n");
2056 /* Disable external MII interface */ 1928 /* Disable external MII interface */
2057 fecp->fec_mii_speed = fep->phy_speed = 0; 1929 fecp->fec_mii_speed = fep->phy_speed = 0;
1930#ifdef FREC_LEGACY
2058 fec_disable_phy_intr(); 1931 fec_disable_phy_intr();
1932#endif
2059 } 1933 }
2060} 1934}
2061 1935
@@ -2237,12 +2111,12 @@ fec_set_mac_address(struct net_device *dev)
2237 2111
2238} 2112}
2239 2113
2240/* Initialize the FEC Ethernet on 860T (or ColdFire 5272).
2241 */
2242 /* 2114 /*
2243 * XXX: We need to clean up on failure exits here. 2115 * XXX: We need to clean up on failure exits here.
2116 *
2117 * index is only used in legacy code
2244 */ 2118 */
2245int __init fec_enet_init(struct net_device *dev) 2119int __init fec_enet_init(struct net_device *dev, int index)
2246{ 2120{
2247 struct fec_enet_private *fep = netdev_priv(dev); 2121 struct fec_enet_private *fep = netdev_priv(dev);
2248 unsigned long mem_addr; 2122 unsigned long mem_addr;
@@ -2250,15 +2124,11 @@ int __init fec_enet_init(struct net_device *dev)
2250 cbd_t *cbd_base; 2124 cbd_t *cbd_base;
2251 volatile fec_t *fecp; 2125 volatile fec_t *fecp;
2252 int i, j; 2126 int i, j;
2253 static int index = 0;
2254
2255 /* Only allow us to be probed once. */
2256 if (index >= FEC_MAX_PORTS)
2257 return -ENXIO;
2258 2127
2259 /* Allocate memory for buffer descriptors. 2128 /* Allocate memory for buffer descriptors.
2260 */ 2129 */
2261 mem_addr = __get_free_page(GFP_KERNEL); 2130 mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE,
2131 &fep->bd_dma, GFP_KERNEL);
2262 if (mem_addr == 0) { 2132 if (mem_addr == 0) {
2263 printk("FEC: allocate descriptor memory failed?\n"); 2133 printk("FEC: allocate descriptor memory failed?\n");
2264 return -ENOMEM; 2134 return -ENOMEM;
@@ -2269,7 +2139,7 @@ int __init fec_enet_init(struct net_device *dev)
2269 2139
2270 /* Create an Ethernet device instance. 2140 /* Create an Ethernet device instance.
2271 */ 2141 */
2272 fecp = (volatile fec_t *) fec_hw[index]; 2142 fecp = (volatile fec_t *)dev->base_addr;
2273 2143
2274 fep->index = index; 2144 fep->index = index;
2275 fep->hwp = fecp; 2145 fep->hwp = fecp;
@@ -2280,18 +2150,24 @@ int __init fec_enet_init(struct net_device *dev)
2280 fecp->fec_ecntrl = 1; 2150 fecp->fec_ecntrl = 1;
2281 udelay(10); 2151 udelay(10);
2282 2152
2283 /* Set the Ethernet address. If using multiple Enets on the 8xx, 2153 /* Set the Ethernet address */
2284 * this needs some work to get unique addresses. 2154#ifdef FEC_LEGACY
2285 *
2286 * This is our default MAC address unless the user changes
2287 * it via eth_mac_addr (our dev->set_mac_addr handler).
2288 */
2289 fec_get_mac(dev); 2155 fec_get_mac(dev);
2156#else
2157 {
2158 unsigned long l;
2159 l = fecp->fec_addr_low;
2160 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
2161 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
2162 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
2163 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
2164 l = fecp->fec_addr_high;
2165 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
2166 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
2167 }
2168#endif
2290 2169
2291 cbd_base = (cbd_t *)mem_addr; 2170 cbd_base = (cbd_t *)mem_addr;
2292 /* XXX: missing check for allocation failure */
2293
2294 fec_uncache(mem_addr);
2295 2171
2296 /* Set receive and transmit descriptor base. 2172 /* Set receive and transmit descriptor base.
2297 */ 2173 */
@@ -2313,8 +2189,6 @@ int __init fec_enet_init(struct net_device *dev)
2313 mem_addr = __get_free_page(GFP_KERNEL); 2189 mem_addr = __get_free_page(GFP_KERNEL);
2314 /* XXX: missing check for allocation failure */ 2190 /* XXX: missing check for allocation failure */
2315 2191
2316 fec_uncache(mem_addr);
2317
2318 /* Initialize the BD for every fragment in the page. 2192 /* Initialize the BD for every fragment in the page.
2319 */ 2193 */
2320 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 2194 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
@@ -2357,13 +2231,16 @@ int __init fec_enet_init(struct net_device *dev)
2357 2231
2358 /* Set receive and transmit descriptor base. 2232 /* Set receive and transmit descriptor base.
2359 */ 2233 */
2360 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2234 fecp->fec_r_des_start = fep->bd_dma;
2361 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2235 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
2236 * RX_RING_SIZE;
2362 2237
2238#ifdef FEC_LEGACY
2363 /* Install our interrupt handlers. This varies depending on 2239 /* Install our interrupt handlers. This varies depending on
2364 * the architecture. 2240 * the architecture.
2365 */ 2241 */
2366 fec_request_intrs(dev); 2242 fec_request_intrs(dev);
2243#endif
2367 2244
2368 fecp->fec_grp_hash_table_high = 0; 2245 fecp->fec_grp_hash_table_high = 0;
2369 fecp->fec_grp_hash_table_low = 0; 2246 fecp->fec_grp_hash_table_low = 0;
@@ -2375,8 +2252,6 @@ int __init fec_enet_init(struct net_device *dev)
2375 fecp->fec_hash_table_low = 0; 2252 fecp->fec_hash_table_low = 0;
2376#endif 2253#endif
2377 2254
2378 dev->base_addr = (unsigned long)fecp;
2379
2380 /* The FEC Ethernet specific entries in the device structure. */ 2255 /* The FEC Ethernet specific entries in the device structure. */
2381 dev->open = fec_enet_open; 2256 dev->open = fec_enet_open;
2382 dev->hard_start_xmit = fec_enet_start_xmit; 2257 dev->hard_start_xmit = fec_enet_start_xmit;
@@ -2390,7 +2265,20 @@ int __init fec_enet_init(struct net_device *dev)
2390 mii_free = mii_cmds; 2265 mii_free = mii_cmds;
2391 2266
2392 /* setup MII interface */ 2267 /* setup MII interface */
2268#ifdef FEC_LEGACY
2393 fec_set_mii(dev, fep); 2269 fec_set_mii(dev, fep);
2270#else
2271 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
2272 fecp->fec_x_cntrl = 0x00;
2273
2274 /*
2275 * Set MII speed to 2.5 MHz
2276 */
2277 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
2278 / 2500000) / 2) & 0x3F) << 1;
2279 fecp->fec_mii_speed = fep->phy_speed;
2280 fec_restart(dev, 0);
2281#endif
2394 2282
2395 /* Clear and enable interrupts */ 2283 /* Clear and enable interrupts */
2396 fecp->fec_ievent = 0xffc00000; 2284 fecp->fec_ievent = 0xffc00000;
@@ -2403,7 +2291,6 @@ int __init fec_enet_init(struct net_device *dev)
2403 fep->phy_addr = 0; 2291 fep->phy_addr = 0;
2404 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 2292 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
2405 2293
2406 index++;
2407 return 0; 2294 return 0;
2408} 2295}
2409 2296
@@ -2430,7 +2317,6 @@ fec_restart(struct net_device *dev, int duplex)
2430 /* Clear any outstanding interrupt. 2317 /* Clear any outstanding interrupt.
2431 */ 2318 */
2432 fecp->fec_ievent = 0xffc00000; 2319 fecp->fec_ievent = 0xffc00000;
2433 fec_enable_phy_intr();
2434 2320
2435 /* Set station address. 2321 /* Set station address.
2436 */ 2322 */
@@ -2445,12 +2331,11 @@ fec_restart(struct net_device *dev, int duplex)
2445 */ 2331 */
2446 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2332 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
2447 2333
2448 fec_localhw_setup();
2449
2450 /* Set receive and transmit descriptor base. 2334 /* Set receive and transmit descriptor base.
2451 */ 2335 */
2452 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2336 fecp->fec_r_des_start = fep->bd_dma;
2453 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2337 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
2338 * RX_RING_SIZE;
2454 2339
2455 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2340 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
2456 fep->cur_rx = fep->rx_bd_base; 2341 fep->cur_rx = fep->rx_bd_base;
@@ -2552,12 +2437,12 @@ fec_stop(struct net_device *dev)
2552 /* Clear outstanding MII command interrupts. 2437 /* Clear outstanding MII command interrupts.
2553 */ 2438 */
2554 fecp->fec_ievent = FEC_ENET_MII; 2439 fecp->fec_ievent = FEC_ENET_MII;
2555 fec_enable_phy_intr();
2556 2440
2557 fecp->fec_imask = FEC_ENET_MII; 2441 fecp->fec_imask = FEC_ENET_MII;
2558 fecp->fec_mii_speed = fep->phy_speed; 2442 fecp->fec_mii_speed = fep->phy_speed;
2559} 2443}
2560 2444
2445#ifdef FEC_LEGACY
2561static int __init fec_enet_module_init(void) 2446static int __init fec_enet_module_init(void)
2562{ 2447{
2563 struct net_device *dev; 2448 struct net_device *dev;
@@ -2569,7 +2454,8 @@ static int __init fec_enet_module_init(void)
2569 dev = alloc_etherdev(sizeof(struct fec_enet_private)); 2454 dev = alloc_etherdev(sizeof(struct fec_enet_private));
2570 if (!dev) 2455 if (!dev)
2571 return -ENOMEM; 2456 return -ENOMEM;
2572 err = fec_enet_init(dev); 2457 dev->base_addr = (unsigned long)fec_hw[i];
2458 err = fec_enet_init(dev, i);
2573 if (err) { 2459 if (err) {
2574 free_netdev(dev); 2460 free_netdev(dev);
2575 continue; 2461 continue;
@@ -2584,6 +2470,170 @@ static int __init fec_enet_module_init(void)
2584 } 2470 }
2585 return 0; 2471 return 0;
2586} 2472}
2473#else
2474
2475static int __devinit
2476fec_probe(struct platform_device *pdev)
2477{
2478 struct fec_enet_private *fep;
2479 struct net_device *ndev;
2480 int i, irq, ret = 0;
2481 struct resource *r;
2482
2483 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2484 if (!r)
2485 return -ENXIO;
2486
2487 r = request_mem_region(r->start, resource_size(r), pdev->name);
2488 if (!r)
2489 return -EBUSY;
2490
2491 /* Init network device */
2492 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2493 if (!ndev)
2494 return -ENOMEM;
2495
2496 SET_NETDEV_DEV(ndev, &pdev->dev);
2497
2498 /* setup board info structure */
2499 fep = netdev_priv(ndev);
2500 memset(fep, 0, sizeof(*fep));
2501
2502 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
2503
2504 if (!ndev->base_addr) {
2505 ret = -ENOMEM;
2506 goto failed_ioremap;
2507 }
2508
2509 platform_set_drvdata(pdev, ndev);
2510
2511 /* This device has up to three irqs on some platforms */
2512 for (i = 0; i < 3; i++) {
2513 irq = platform_get_irq(pdev, i);
2514 if (i && irq < 0)
2515 break;
2516 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
2517 if (ret) {
2518 while (i >= 0) {
2519 irq = platform_get_irq(pdev, i);
2520 free_irq(irq, ndev);
2521 i--;
2522 }
2523 goto failed_irq;
2524 }
2525 }
2526
2527 fep->clk = clk_get(&pdev->dev, "fec_clk");
2528 if (IS_ERR(fep->clk)) {
2529 ret = PTR_ERR(fep->clk);
2530 goto failed_clk;
2531 }
2532 clk_enable(fep->clk);
2533
2534 ret = fec_enet_init(ndev, 0);
2535 if (ret)
2536 goto failed_init;
2537
2538 ret = register_netdev(ndev);
2539 if (ret)
2540 goto failed_register;
2541
2542 return 0;
2543
2544failed_register:
2545failed_init:
2546 clk_disable(fep->clk);
2547 clk_put(fep->clk);
2548failed_clk:
2549 for (i = 0; i < 3; i++) {
2550 irq = platform_get_irq(pdev, i);
2551 if (irq > 0)
2552 free_irq(irq, ndev);
2553 }
2554failed_irq:
2555 iounmap((void __iomem *)ndev->base_addr);
2556failed_ioremap:
2557 free_netdev(ndev);
2558
2559 return ret;
2560}
2561
2562static int __devexit
2563fec_drv_remove(struct platform_device *pdev)
2564{
2565 struct net_device *ndev = platform_get_drvdata(pdev);
2566 struct fec_enet_private *fep = netdev_priv(ndev);
2567
2568 platform_set_drvdata(pdev, NULL);
2569
2570 fec_stop(ndev);
2571 clk_disable(fep->clk);
2572 clk_put(fep->clk);
2573 iounmap((void __iomem *)ndev->base_addr);
2574 unregister_netdev(ndev);
2575 free_netdev(ndev);
2576 return 0;
2577}
2578
2579static int
2580fec_suspend(struct platform_device *dev, pm_message_t state)
2581{
2582 struct net_device *ndev = platform_get_drvdata(dev);
2583 struct fec_enet_private *fep;
2584
2585 if (ndev) {
2586 fep = netdev_priv(ndev);
2587 if (netif_running(ndev)) {
2588 netif_device_detach(ndev);
2589 fec_stop(ndev);
2590 }
2591 }
2592 return 0;
2593}
2594
2595static int
2596fec_resume(struct platform_device *dev)
2597{
2598 struct net_device *ndev = platform_get_drvdata(dev);
2599
2600 if (ndev) {
2601 if (netif_running(ndev)) {
2602 fec_enet_init(ndev, 0);
2603 netif_device_attach(ndev);
2604 }
2605 }
2606 return 0;
2607}
2608
2609static struct platform_driver fec_driver = {
2610 .driver = {
2611 .name = "fec",
2612 .owner = THIS_MODULE,
2613 },
2614 .probe = fec_probe,
2615 .remove = __devexit_p(fec_drv_remove),
2616 .suspend = fec_suspend,
2617 .resume = fec_resume,
2618};
2619
2620static int __init
2621fec_enet_module_init(void)
2622{
2623 printk(KERN_INFO "FEC Ethernet Driver\n");
2624
2625 return platform_driver_register(&fec_driver);
2626}
2627
2628static void __exit
2629fec_enet_cleanup(void)
2630{
2631 platform_driver_unregister(&fec_driver);
2632}
2633
2634module_exit(fec_enet_cleanup);
2635
2636#endif /* FEC_LEGACY */
2587 2637
2588module_init(fec_enet_module_init); 2638module_init(fec_enet_module_init);
2589 2639
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 292719dacef..76c64c92e19 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,7 @@
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) 17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
18/* 18/*
19 * Just figures, Motorola would have to change the offsets for 19 * Just figures, Motorola would have to change the offsets for
20 * registers in the same peripheral device on different models 20 * registers in the same peripheral device on different models
@@ -103,12 +103,19 @@ typedef struct fec {
103/* 103/*
104 * Define the buffer descriptor structure. 104 * Define the buffer descriptor structure.
105 */ 105 */
106#ifdef CONFIG_ARCH_MXC
107typedef struct bufdesc {
108 unsigned short cbd_datlen; /* Data length */
109 unsigned short cbd_sc; /* Control and status info */
110 unsigned long cbd_bufaddr; /* Buffer address */
111} cbd_t;
112#else
106typedef struct bufdesc { 113typedef struct bufdesc {
107 unsigned short cbd_sc; /* Control and status info */ 114 unsigned short cbd_sc; /* Control and status info */
108 unsigned short cbd_datlen; /* Data length */ 115 unsigned short cbd_datlen; /* Data length */
109 unsigned long cbd_bufaddr; /* Buffer address */ 116 unsigned long cbd_bufaddr; /* Buffer address */
110} cbd_t; 117} cbd_t;
111 118#endif
112 119
113/* 120/*
114 * The following definitions courtesy of commproc.h, which where 121 * The following definitions courtesy of commproc.h, which where
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b910cf6374..021308f9f0c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,7 +39,7 @@
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
41 */ 41 */
42#define FORCEDETH_VERSION "0.62" 42#define FORCEDETH_VERSION "0.63"
43#define DRV_NAME "forcedeth" 43#define DRV_NAME "forcedeth"
44 44
45#include <linux/module.h> 45#include <linux/module.h>
@@ -102,7 +102,7 @@
102enum { 102enum {
103 NvRegIrqStatus = 0x000, 103 NvRegIrqStatus = 0x000,
104#define NVREG_IRQSTAT_MIIEVENT 0x040 104#define NVREG_IRQSTAT_MIIEVENT 0x040
105#define NVREG_IRQSTAT_MASK 0x81ff 105#define NVREG_IRQSTAT_MASK 0x83ff
106 NvRegIrqMask = 0x004, 106 NvRegIrqMask = 0x004,
107#define NVREG_IRQ_RX_ERROR 0x0001 107#define NVREG_IRQ_RX_ERROR 0x0001
108#define NVREG_IRQ_RX 0x0002 108#define NVREG_IRQ_RX 0x0002
@@ -113,7 +113,7 @@ enum {
113#define NVREG_IRQ_LINK 0x0040 113#define NVREG_IRQ_LINK 0x0040
114#define NVREG_IRQ_RX_FORCED 0x0080 114#define NVREG_IRQ_RX_FORCED 0x0080
115#define NVREG_IRQ_TX_FORCED 0x0100 115#define NVREG_IRQ_TX_FORCED 0x0100
116#define NVREG_IRQ_RECOVER_ERROR 0x8000 116#define NVREG_IRQ_RECOVER_ERROR 0x8200
117#define NVREG_IRQMASK_THROUGHPUT 0x00df 117#define NVREG_IRQMASK_THROUGHPUT 0x00df
118#define NVREG_IRQMASK_CPU 0x0060 118#define NVREG_IRQMASK_CPU 0x0060
119#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 119#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
@@ -157,6 +157,9 @@ enum {
157#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 157#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
158#define NVREG_XMITCTL_HOST_LOADED 0x00004000 158#define NVREG_XMITCTL_HOST_LOADED 0x00004000
159#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 159#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
160#define NVREG_XMITCTL_DATA_START 0x00100000
161#define NVREG_XMITCTL_DATA_READY 0x00010000
162#define NVREG_XMITCTL_DATA_ERROR 0x00020000
160 NvRegTransmitterStatus = 0x088, 163 NvRegTransmitterStatus = 0x088,
161#define NVREG_XMITSTAT_BUSY 0x01 164#define NVREG_XMITSTAT_BUSY 0x01
162 165
@@ -289,8 +292,10 @@ enum {
289#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 292#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
290#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 293#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
291 294
292 NvRegPatternCRC = 0x204, 295 NvRegMgmtUnitGetVersion = 0x204,
293 NvRegPatternMask = 0x208, 296#define NVREG_MGMTUNITGETVERSION 0x01
297 NvRegMgmtUnitVersion = 0x208,
298#define NVREG_MGMTUNITVERSION 0x08
294 NvRegPowerCap = 0x268, 299 NvRegPowerCap = 0x268,
295#define NVREG_POWERCAP_D3SUPP (1<<30) 300#define NVREG_POWERCAP_D3SUPP (1<<30)
296#define NVREG_POWERCAP_D2SUPP (1<<26) 301#define NVREG_POWERCAP_D2SUPP (1<<26)
@@ -303,6 +308,8 @@ enum {
303#define NVREG_POWERSTATE_D1 0x0001 308#define NVREG_POWERSTATE_D1 0x0001
304#define NVREG_POWERSTATE_D2 0x0002 309#define NVREG_POWERSTATE_D2 0x0002
305#define NVREG_POWERSTATE_D3 0x0003 310#define NVREG_POWERSTATE_D3 0x0003
311 NvRegMgmtUnitControl = 0x278,
312#define NVREG_MGMTUNITCONTROL_INUSE 0x20000
306 NvRegTxCnt = 0x280, 313 NvRegTxCnt = 0x280,
307 NvRegTxZeroReXmt = 0x284, 314 NvRegTxZeroReXmt = 0x284,
308 NvRegTxOneReXmt = 0x288, 315 NvRegTxOneReXmt = 0x288,
@@ -582,6 +589,9 @@ union ring_type {
582#define NV_MSI_X_VECTOR_TX 0x1 589#define NV_MSI_X_VECTOR_TX 0x1
583#define NV_MSI_X_VECTOR_OTHER 0x2 590#define NV_MSI_X_VECTOR_OTHER 0x2
584 591
592#define NV_MSI_PRIV_OFFSET 0x68
593#define NV_MSI_PRIV_VALUE 0xffffffff
594
585#define NV_RESTART_TX 0x1 595#define NV_RESTART_TX 0x1
586#define NV_RESTART_RX 0x2 596#define NV_RESTART_RX 0x2
587 597
@@ -758,6 +768,8 @@ struct fe_priv {
758 u32 register_size; 768 u32 register_size;
759 int rx_csum; 769 int rx_csum;
760 u32 mac_in_use; 770 u32 mac_in_use;
771 int mgmt_version;
772 int mgmt_sema;
761 773
762 void __iomem *base; 774 void __iomem *base;
763 775
@@ -812,6 +824,11 @@ struct fe_priv {
812 824
813 /* power saved state */ 825 /* power saved state */
814 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 826 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
827
828 /* for different msi-x irq type */
829 char name_rx[IFNAMSIZ + 3]; /* -rx */
830 char name_tx[IFNAMSIZ + 3]; /* -tx */
831 char name_other[IFNAMSIZ + 6]; /* -other */
815}; 832};
816 833
817/* 834/*
@@ -857,7 +874,7 @@ enum {
857 NV_MSIX_INT_DISABLED, 874 NV_MSIX_INT_DISABLED,
858 NV_MSIX_INT_ENABLED 875 NV_MSIX_INT_ENABLED
859}; 876};
860static int msix = NV_MSIX_INT_DISABLED; 877static int msix = NV_MSIX_INT_ENABLED;
861 878
862/* 879/*
863 * DMA 64bit 880 * DMA 64bit
@@ -1760,7 +1777,7 @@ static void nv_do_rx_refill(unsigned long data)
1760 struct fe_priv *np = netdev_priv(dev); 1777 struct fe_priv *np = netdev_priv(dev);
1761 1778
1762 /* Just reschedule NAPI rx processing */ 1779 /* Just reschedule NAPI rx processing */
1763 netif_rx_schedule(&np->napi); 1780 napi_schedule(&np->napi);
1764} 1781}
1765#else 1782#else
1766static void nv_do_rx_refill(unsigned long data) 1783static void nv_do_rx_refill(unsigned long data)
@@ -3406,7 +3423,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3406#ifdef CONFIG_FORCEDETH_NAPI 3423#ifdef CONFIG_FORCEDETH_NAPI
3407 if (events & NVREG_IRQ_RX_ALL) { 3424 if (events & NVREG_IRQ_RX_ALL) {
3408 spin_lock(&np->lock); 3425 spin_lock(&np->lock);
3409 netif_rx_schedule(&np->napi); 3426 napi_schedule(&np->napi);
3410 3427
3411 /* Disable furthur receive irq's */ 3428 /* Disable furthur receive irq's */
3412 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3429 np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3523,7 +3540,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3523#ifdef CONFIG_FORCEDETH_NAPI 3540#ifdef CONFIG_FORCEDETH_NAPI
3524 if (events & NVREG_IRQ_RX_ALL) { 3541 if (events & NVREG_IRQ_RX_ALL) {
3525 spin_lock(&np->lock); 3542 spin_lock(&np->lock);
3526 netif_rx_schedule(&np->napi); 3543 napi_schedule(&np->napi);
3527 3544
3528 /* Disable furthur receive irq's */ 3545 /* Disable furthur receive irq's */
3529 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3546 np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3680,7 +3697,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3680 /* re-enable receive interrupts */ 3697 /* re-enable receive interrupts */
3681 spin_lock_irqsave(&np->lock, flags); 3698 spin_lock_irqsave(&np->lock, flags);
3682 3699
3683 __netif_rx_complete(napi); 3700 __napi_complete(napi);
3684 3701
3685 np->irqmask |= NVREG_IRQ_RX_ALL; 3702 np->irqmask |= NVREG_IRQ_RX_ALL;
3686 if (np->msi_flags & NV_MSI_X_ENABLED) 3703 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3703,13 +3720,13 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3703 u32 events; 3720 u32 events;
3704 3721
3705 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3722 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3706 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3707 3723
3708 if (events) { 3724 if (events) {
3709 netif_rx_schedule(&np->napi);
3710 /* disable receive interrupts on the nic */ 3725 /* disable receive interrupts on the nic */
3711 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3726 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3712 pci_push(base); 3727 pci_push(base);
3728 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3729 napi_schedule(&np->napi);
3713 } 3730 }
3714 return IRQ_HANDLED; 3731 return IRQ_HANDLED;
3715} 3732}
@@ -3918,21 +3935,27 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3918 np->msi_flags |= NV_MSI_X_ENABLED; 3935 np->msi_flags |= NV_MSI_X_ENABLED;
3919 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3936 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3920 /* Request irq for rx handling */ 3937 /* Request irq for rx handling */
3921 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3938 sprintf(np->name_rx, "%s-rx", dev->name);
3939 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3940 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3922 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3941 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3923 pci_disable_msix(np->pci_dev); 3942 pci_disable_msix(np->pci_dev);
3924 np->msi_flags &= ~NV_MSI_X_ENABLED; 3943 np->msi_flags &= ~NV_MSI_X_ENABLED;
3925 goto out_err; 3944 goto out_err;
3926 } 3945 }
3927 /* Request irq for tx handling */ 3946 /* Request irq for tx handling */
3928 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3947 sprintf(np->name_tx, "%s-tx", dev->name);
3948 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3949 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3929 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3950 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3930 pci_disable_msix(np->pci_dev); 3951 pci_disable_msix(np->pci_dev);
3931 np->msi_flags &= ~NV_MSI_X_ENABLED; 3952 np->msi_flags &= ~NV_MSI_X_ENABLED;
3932 goto out_free_rx; 3953 goto out_free_rx;
3933 } 3954 }
3934 /* Request irq for link and timer handling */ 3955 /* Request irq for link and timer handling */
3935 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3956 sprintf(np->name_other, "%s-other", dev->name);
3957 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3958 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3936 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3959 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3937 pci_disable_msix(np->pci_dev); 3960 pci_disable_msix(np->pci_dev);
3938 np->msi_flags &= ~NV_MSI_X_ENABLED; 3961 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4046,19 +4069,19 @@ static void nv_do_nic_poll(unsigned long data)
4046 mask |= NVREG_IRQ_OTHER; 4069 mask |= NVREG_IRQ_OTHER;
4047 } 4070 }
4048 } 4071 }
4049 np->nic_poll_irq = 0;
4050
4051 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4072 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4052 4073
4053 if (np->recover_error) { 4074 if (np->recover_error) {
4054 np->recover_error = 0; 4075 np->recover_error = 0;
4055 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 4076 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
4056 if (netif_running(dev)) { 4077 if (netif_running(dev)) {
4057 netif_tx_lock_bh(dev); 4078 netif_tx_lock_bh(dev);
4058 netif_addr_lock(dev); 4079 netif_addr_lock(dev);
4059 spin_lock(&np->lock); 4080 spin_lock(&np->lock);
4060 /* stop engines */ 4081 /* stop engines */
4061 nv_stop_rxtx(dev); 4082 nv_stop_rxtx(dev);
4083 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4084 nv_mac_reset(dev);
4062 nv_txrx_reset(dev); 4085 nv_txrx_reset(dev);
4063 /* drain rx queue */ 4086 /* drain rx queue */
4064 nv_drain_rxtx(dev); 4087 nv_drain_rxtx(dev);
@@ -4076,6 +4099,11 @@ static void nv_do_nic_poll(unsigned long data)
4076 pci_push(base); 4099 pci_push(base);
4077 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4100 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4078 pci_push(base); 4101 pci_push(base);
4102 /* clear interrupts */
4103 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4104 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4105 else
4106 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4079 4107
4080 /* restart rx engine */ 4108 /* restart rx engine */
4081 nv_start_rxtx(dev); 4109 nv_start_rxtx(dev);
@@ -4085,11 +4113,11 @@ static void nv_do_nic_poll(unsigned long data)
4085 } 4113 }
4086 } 4114 }
4087 4115
4088
4089 writel(mask, base + NvRegIrqMask); 4116 writel(mask, base + NvRegIrqMask);
4090 pci_push(base); 4117 pci_push(base);
4091 4118
4092 if (!using_multi_irqs(dev)) { 4119 if (!using_multi_irqs(dev)) {
4120 np->nic_poll_irq = 0;
4093 if (nv_optimized(np)) 4121 if (nv_optimized(np))
4094 nv_nic_irq_optimized(0, dev); 4122 nv_nic_irq_optimized(0, dev);
4095 else 4123 else
@@ -4100,18 +4128,22 @@ static void nv_do_nic_poll(unsigned long data)
4100 enable_irq_lockdep(np->pci_dev->irq); 4128 enable_irq_lockdep(np->pci_dev->irq);
4101 } else { 4129 } else {
4102 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4130 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4131 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4103 nv_nic_irq_rx(0, dev); 4132 nv_nic_irq_rx(0, dev);
4104 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4133 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4105 } 4134 }
4106 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4135 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4136 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4107 nv_nic_irq_tx(0, dev); 4137 nv_nic_irq_tx(0, dev);
4108 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4138 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4109 } 4139 }
4110 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4140 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4141 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4111 nv_nic_irq_other(0, dev); 4142 nv_nic_irq_other(0, dev);
4112 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4143 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4113 } 4144 }
4114 } 4145 }
4146
4115} 4147}
4116 4148
4117#ifdef CONFIG_NET_POLL_CONTROLLER 4149#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4738,7 +4770,7 @@ static int nv_set_tx_csum(struct net_device *dev, u32 data)
4738 struct fe_priv *np = netdev_priv(dev); 4770 struct fe_priv *np = netdev_priv(dev);
4739 4771
4740 if (np->driver_data & DEV_HAS_CHECKSUM) 4772 if (np->driver_data & DEV_HAS_CHECKSUM)
4741 return ethtool_op_set_tx_hw_csum(dev, data); 4773 return ethtool_op_set_tx_csum(dev, data);
4742 else 4774 else
4743 return -EOPNOTSUPP; 4775 return -EOPNOTSUPP;
4744} 4776}
@@ -5169,6 +5201,7 @@ static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5169/* The mgmt unit and driver use a semaphore to access the phy during init */ 5201/* The mgmt unit and driver use a semaphore to access the phy during init */
5170static int nv_mgmt_acquire_sema(struct net_device *dev) 5202static int nv_mgmt_acquire_sema(struct net_device *dev)
5171{ 5203{
5204 struct fe_priv *np = netdev_priv(dev);
5172 u8 __iomem *base = get_hwbase(dev); 5205 u8 __iomem *base = get_hwbase(dev);
5173 int i; 5206 int i;
5174 u32 tx_ctrl, mgmt_sema; 5207 u32 tx_ctrl, mgmt_sema;
@@ -5191,8 +5224,10 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5191 /* verify that semaphore was acquired */ 5224 /* verify that semaphore was acquired */
5192 tx_ctrl = readl(base + NvRegTransmitterControl); 5225 tx_ctrl = readl(base + NvRegTransmitterControl);
5193 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5226 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5194 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 5227 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5228 np->mgmt_sema = 1;
5195 return 1; 5229 return 1;
5230 }
5196 else 5231 else
5197 udelay(50); 5232 udelay(50);
5198 } 5233 }
@@ -5200,6 +5235,51 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5200 return 0; 5235 return 0;
5201} 5236}
5202 5237
5238static void nv_mgmt_release_sema(struct net_device *dev)
5239{
5240 struct fe_priv *np = netdev_priv(dev);
5241 u8 __iomem *base = get_hwbase(dev);
5242 u32 tx_ctrl;
5243
5244 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5245 if (np->mgmt_sema) {
5246 tx_ctrl = readl(base + NvRegTransmitterControl);
5247 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5248 writel(tx_ctrl, base + NvRegTransmitterControl);
5249 }
5250 }
5251}
5252
5253
5254static int nv_mgmt_get_version(struct net_device *dev)
5255{
5256 struct fe_priv *np = netdev_priv(dev);
5257 u8 __iomem *base = get_hwbase(dev);
5258 u32 data_ready = readl(base + NvRegTransmitterControl);
5259 u32 data_ready2 = 0;
5260 unsigned long start;
5261 int ready = 0;
5262
5263 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5264 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5265 start = jiffies;
5266 while (time_before(jiffies, start + 5*HZ)) {
5267 data_ready2 = readl(base + NvRegTransmitterControl);
5268 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5269 ready = 1;
5270 break;
5271 }
5272 schedule_timeout_uninterruptible(1);
5273 }
5274
5275 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5276 return 0;
5277
5278 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5279
5280 return 1;
5281}
5282
5203static int nv_open(struct net_device *dev) 5283static int nv_open(struct net_device *dev)
5204{ 5284{
5205 struct fe_priv *np = netdev_priv(dev); 5285 struct fe_priv *np = netdev_priv(dev);
@@ -5771,19 +5851,26 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5771 5851
5772 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5852 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5773 /* management unit running on the mac? */ 5853 /* management unit running on the mac? */
5774 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5854 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5775 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5855 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5776 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5856 nv_mgmt_acquire_sema(dev) &&
5777 if (nv_mgmt_acquire_sema(dev)) { 5857 nv_mgmt_get_version(dev)) {
5778 /* management unit setup the phy already? */ 5858 np->mac_in_use = 1;
5779 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5859 if (np->mgmt_version > 0) {
5780 NVREG_XMITCTL_SYNC_PHY_INIT) { 5860 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5781 /* phy is inited by mgmt unit */ 5861 }
5782 phyinitialized = 1; 5862 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5783 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5863 pci_name(pci_dev), np->mac_in_use);
5784 } else { 5864 /* management unit setup the phy already? */
5785 /* we need to init the phy */ 5865 if (np->mac_in_use &&
5786 } 5866 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5867 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5868 /* phy is inited by mgmt unit */
5869 phyinitialized = 1;
5870 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5871 pci_name(pci_dev));
5872 } else {
5873 /* we need to init the phy */
5787 } 5874 }
5788 } 5875 }
5789 } 5876 }
@@ -5945,6 +6032,8 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5945 /* restore any phy related changes */ 6032 /* restore any phy related changes */
5946 nv_restore_phy(dev); 6033 nv_restore_phy(dev);
5947 6034
6035 nv_mgmt_release_sema(dev);
6036
5948 /* free all structures */ 6037 /* free all structures */
5949 free_rings(dev); 6038 free_rings(dev);
5950 iounmap(get_hwbase(dev)); 6039 iounmap(get_hwbase(dev));
@@ -5995,6 +6084,8 @@ static int nv_resume(struct pci_dev *pdev)
5995 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6084 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5996 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6085 writel(np->saved_config_space[i], base+i*sizeof(u32));
5997 6086
6087 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6088
5998 netif_device_attach(dev); 6089 netif_device_attach(dev);
5999 if (netif_running(dev)) { 6090 if (netif_running(dev)) {
6000 rc = nv_open(dev); 6091 rc = nv_open(dev);
@@ -6057,11 +6148,11 @@ static struct pci_device_id pci_tbl[] = {
6057 }, 6148 },
6058 { /* CK804 Ethernet Controller */ 6149 { /* CK804 Ethernet Controller */
6059 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 6150 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
6060 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6151 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6061 }, 6152 },
6062 { /* CK804 Ethernet Controller */ 6153 { /* CK804 Ethernet Controller */
6063 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 6154 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
6064 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6155 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6065 }, 6156 },
6066 { /* MCP04 Ethernet Controller */ 6157 { /* MCP04 Ethernet Controller */
6067 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 6158 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
@@ -6081,11 +6172,11 @@ static struct pci_device_id pci_tbl[] = {
6081 }, 6172 },
6082 { /* MCP55 Ethernet Controller */ 6173 { /* MCP55 Ethernet Controller */
6083 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 6174 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6084 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6175 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6085 }, 6176 },
6086 { /* MCP55 Ethernet Controller */ 6177 { /* MCP55 Ethernet Controller */
6087 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 6178 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6088 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6179 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6089 }, 6180 },
6090 { /* MCP61 Ethernet Controller */ 6181 { /* MCP61 Ethernet Controller */
6091 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 6182 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ce900e54d8d..b037ce9857b 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
209 209
210 if (received < budget) { 210 if (received < budget) {
211 /* done */ 211 /* done */
212 netif_rx_complete(napi); 212 napi_complete(napi);
213 (*fep->ops->napi_enable_rx)(dev); 213 (*fep->ops->napi_enable_rx)(dev);
214 } 214 }
215 return received; 215 return received;
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id)
478 /* NOTE: it is possible for FCCs in NAPI mode */ 478 /* NOTE: it is possible for FCCs in NAPI mode */
479 /* to submit a spurious interrupt while in poll */ 479 /* to submit a spurious interrupt while in poll */
480 if (napi_ok) 480 if (napi_ok)
481 __netif_rx_schedule(&fep->napi); 481 __napi_schedule(&fep->napi);
482 } 482 }
483 } 483 }
484 484
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
new file mode 100644
index 00000000000..c434a156d7a
--- /dev/null
+++ b/drivers/net/fsl_pq_mdio.c
@@ -0,0 +1,463 @@
1/*
2 * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
3 * Provides Bus interface for MIIM regs
4 *
5 * Author: Andy Fleming <afleming@freescale.com>
6 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
8 *
9 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/platform_device.h>
33#include <linux/crc32.h>
34#include <linux/mii.h>
35#include <linux/phy.h>
36#include <linux/of.h>
37#include <linux/of_platform.h>
38
39#include <asm/io.h>
40#include <asm/irq.h>
41#include <asm/uaccess.h>
42#include <asm/ucc.h>
43
44#include "gianfar.h"
45#include "fsl_pq_mdio.h"
46
47/*
48 * Write value to the PHY at mii_id at register regnum,
49 * on the bus attached to the local interface, which may be different from the
50 * generic mdio bus (tied to a single interface), waiting until the write is
51 * done before returning. This is helpful in programming interfaces like
52 * the TBI which control interfaces like onchip SERDES and are always tied to
53 * the local mdio pins, which may not be the same as system mdio bus, used for
54 * controlling the external PHYs, for example.
55 */
56int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
57 int regnum, u16 value)
58{
59 /* Set the PHY address and the register address we want to write */
60 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
61
62 /* Write out the value we want */
63 out_be32(&regs->miimcon, value);
64
65 /* Wait for the transaction to finish */
66 while (in_be32(&regs->miimind) & MIIMIND_BUSY)
67 cpu_relax();
68
69 return 0;
70}
71
72/*
73 * Read the bus for PHY at addr mii_id, register regnum, and
74 * return the value. Clears miimcom first. All PHY operation
75 * done on the bus attached to the local interface,
76 * which may be different from the generic mdio bus
77 * This is helpful in programming interfaces like
78 * the TBI which, in turn, control interfaces like onchip SERDES
79 * and are always tied to the local mdio pins, which may not be the
80 * same as system mdio bus, used for controlling the external PHYs, for eg.
81 */
82int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
83 int mii_id, int regnum)
84{
85 u16 value;
86
87 /* Set the PHY address and the register address we want to read */
88 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
89
90 /* Clear miimcom, and then initiate a read */
91 out_be32(&regs->miimcom, 0);
92 out_be32(&regs->miimcom, MII_READ_COMMAND);
93
94 /* Wait for the transaction to finish */
95 while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
96 cpu_relax();
97
98 /* Grab the value of the register from miimstat */
99 value = in_be32(&regs->miimstat);
100
101 return value;
102}
103
104/*
105 * Write value to the PHY at mii_id at register regnum,
106 * on the bus, waiting until the write is done before returning.
107 */
108int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
109{
110 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
111
112 /* Write to the local MII regs */
113 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
114}
115
116/*
117 * Read the bus for PHY at addr mii_id, register regnum, and
118 * return the value. Clears miimcom first.
119 */
120int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
121{
122 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
123
124 /* Read the local MII regs */
125 return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
126}
127
128/* Reset the MIIM registers, and wait for the bus to free */
129static int fsl_pq_mdio_reset(struct mii_bus *bus)
130{
131 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
132 unsigned int timeout = PHY_INIT_TIMEOUT;
133
134 mutex_lock(&bus->mdio_lock);
135
136 /* Reset the management interface */
137 out_be32(&regs->miimcfg, MIIMCFG_RESET);
138
139 /* Setup the MII Mgmt clock speed */
140 out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
141
142 /* Wait until the bus is free */
143 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
144 cpu_relax();
145
146 mutex_unlock(&bus->mdio_lock);
147
148 if(timeout == 0) {
149 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
150 bus->name);
151 return -EBUSY;
152 }
153
154 return 0;
155}
156
157/* Allocate an array which provides irq #s for each PHY on the given bus */
158static int *create_irq_map(struct device_node *np)
159{
160 int *irqs;
161 int i;
162 struct device_node *child = NULL;
163
164 irqs = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
165
166 if (!irqs)
167 return NULL;
168
169 for (i = 0; i < PHY_MAX_ADDR; i++)
170 irqs[i] = PHY_POLL;
171
172 while ((child = of_get_next_child(np, child)) != NULL) {
173 int irq = irq_of_parse_and_map(child, 0);
174 const u32 *id;
175
176 if (irq == NO_IRQ)
177 continue;
178
179 id = of_get_property(child, "reg", NULL);
180
181 if (!id)
182 continue;
183
184 if (*id < PHY_MAX_ADDR && *id >= 0)
185 irqs[*id] = irq;
186 else
187 printk(KERN_WARNING "%s: "
188 "%d is not a valid PHY address\n",
189 np->full_name, *id);
190 }
191
192 return irqs;
193}
194
195void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
196{
197 const u32 *reg;
198
199 reg = of_get_property(np, "reg", NULL);
200
201 snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
202}
203
204/* Scan the bus in reverse, looking for an empty spot */
205static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
206{
207 int i;
208
209 for (i = PHY_MAX_ADDR; i > 0; i--) {
210 u32 phy_id;
211
212 if (get_phy_id(new_bus, i, &phy_id))
213 return -1;
214
215 if (phy_id == 0xffffffff)
216 break;
217 }
218
219 return i;
220}
221
222
223#ifdef CONFIG_GIANFAR
224static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
225{
226 struct gfar __iomem *enet_regs;
227
228 /*
229 * This is mildly evil, but so is our hardware for doing this.
230 * Also, we have to cast back to struct gfar because of
231 * definition weirdness done in gianfar.h.
232 */
233 enet_regs = (struct gfar __iomem *)
234 ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs));
235
236 return &enet_regs->tbipa;
237}
238#endif
239
240
241#ifdef CONFIG_UCC_GETH
242static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
243{
244 struct device_node *np = NULL;
245 int err = 0;
246
247 for_each_compatible_node(np, NULL, "ucc_geth") {
248 struct resource tempres;
249
250 err = of_address_to_resource(np, 0, &tempres);
251 if (err)
252 continue;
253
254 /* if our mdio regs fall within this UCC regs range */
255 if ((start >= tempres.start) && (end <= tempres.end)) {
256 /* Find the id of the UCC */
257 const u32 *id;
258
259 id = of_get_property(np, "cell-index", NULL);
260 if (!id) {
261 id = of_get_property(np, "device-id", NULL);
262 if (!id)
263 continue;
264 }
265
266 *ucc_id = *id;
267
268 return 0;
269 }
270 }
271
272 if (err)
273 return err;
274 else
275 return -EINVAL;
276}
277#endif
278
279
280static int fsl_pq_mdio_probe(struct of_device *ofdev,
281 const struct of_device_id *match)
282{
283 struct device_node *np = ofdev->node;
284 struct device_node *tbi;
285 struct fsl_pq_mdio __iomem *regs;
286 u32 __iomem *tbipa;
287 struct mii_bus *new_bus;
288 int tbiaddr = -1;
289 u64 addr, size;
290 int err = 0;
291
292 new_bus = mdiobus_alloc();
293 if (NULL == new_bus)
294 return -ENOMEM;
295
296 new_bus->name = "Freescale PowerQUICC MII Bus",
297 new_bus->read = &fsl_pq_mdio_read,
298 new_bus->write = &fsl_pq_mdio_write,
299 new_bus->reset = &fsl_pq_mdio_reset,
300 fsl_pq_mdio_bus_name(new_bus->id, np);
301
302 /* Set the PHY base address */
303 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
304 regs = ioremap(addr, size);
305
306 if (NULL == regs) {
307 err = -ENOMEM;
308 goto err_free_bus;
309 }
310
311 new_bus->priv = (void __force *)regs;
312
313 new_bus->irq = create_irq_map(np);
314
315 if (NULL == new_bus->irq) {
316 err = -ENOMEM;
317 goto err_unmap_regs;
318 }
319
320 new_bus->parent = &ofdev->dev;
321 dev_set_drvdata(&ofdev->dev, new_bus);
322
323 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
324 of_device_is_compatible(np, "gianfar")) {
325#ifdef CONFIG_GIANFAR
326 tbipa = get_gfar_tbipa(regs);
327#else
328 err = -ENODEV;
329 goto err_free_irqs;
330#endif
331 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
332 of_device_is_compatible(np, "ucc_geth_phy")) {
333#ifdef CONFIG_UCC_GETH
334 u32 id;
335
336 tbipa = &regs->utbipar;
337
338 if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
339 goto err_free_irqs;
340
341 ucc_set_qe_mux_mii_mng(id - 1);
342#else
343 err = -ENODEV;
344 goto err_free_irqs;
345#endif
346 } else {
347 err = -ENODEV;
348 goto err_free_irqs;
349 }
350
351 for_each_child_of_node(np, tbi) {
352 if (!strncmp(tbi->type, "tbi-phy", 8))
353 break;
354 }
355
356 if (tbi) {
357 const u32 *prop = of_get_property(tbi, "reg", NULL);
358
359 if (prop)
360 tbiaddr = *prop;
361 }
362
363 if (tbiaddr == -1) {
364 out_be32(tbipa, 0);
365
366 tbiaddr = fsl_pq_mdio_find_free(new_bus);
367 }
368
369 /*
370 * We define TBIPA at 0 to be illegal, opting to fail for boards that
371 * have PHYs at 1-31, rather than change tbipa and rescan.
372 */
373 if (tbiaddr == 0) {
374 err = -EBUSY;
375
376 goto err_free_irqs;
377 }
378
379 out_be32(tbipa, tbiaddr);
380
381 /*
382 * The TBIPHY-only buses will find PHYs at every address,
383 * so we mask them all but the TBI
384 */
385 if (!of_device_is_compatible(np, "fsl,gianfar-mdio"))
386 new_bus->phy_mask = ~(1 << tbiaddr);
387
388 err = mdiobus_register(new_bus);
389
390 if (err) {
391 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
392 new_bus->name);
393 goto err_free_irqs;
394 }
395
396 return 0;
397
398err_free_irqs:
399 kfree(new_bus->irq);
400err_unmap_regs:
401 iounmap(regs);
402err_free_bus:
403 kfree(new_bus);
404
405 return err;
406}
407
408
409static int fsl_pq_mdio_remove(struct of_device *ofdev)
410{
411 struct device *device = &ofdev->dev;
412 struct mii_bus *bus = dev_get_drvdata(device);
413
414 mdiobus_unregister(bus);
415
416 dev_set_drvdata(device, NULL);
417
418 iounmap((void __iomem *)bus->priv);
419 bus->priv = NULL;
420 mdiobus_free(bus);
421
422 return 0;
423}
424
425static struct of_device_id fsl_pq_mdio_match[] = {
426 {
427 .type = "mdio",
428 .compatible = "ucc_geth_phy",
429 },
430 {
431 .type = "mdio",
432 .compatible = "gianfar",
433 },
434 {
435 .compatible = "fsl,ucc-mdio",
436 },
437 {
438 .compatible = "fsl,gianfar-tbi",
439 },
440 {
441 .compatible = "fsl,gianfar-mdio",
442 },
443 {},
444};
445
446static struct of_platform_driver fsl_pq_mdio_driver = {
447 .name = "fsl-pq_mdio",
448 .probe = fsl_pq_mdio_probe,
449 .remove = fsl_pq_mdio_remove,
450 .match_table = fsl_pq_mdio_match,
451};
452
453int __init fsl_pq_mdio_init(void)
454{
455 return of_register_platform_driver(&fsl_pq_mdio_driver);
456}
457
458void fsl_pq_mdio_exit(void)
459{
460 of_unregister_platform_driver(&fsl_pq_mdio_driver);
461}
462subsys_initcall_sync(fsl_pq_mdio_init);
463module_exit(fsl_pq_mdio_exit);
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
new file mode 100644
index 00000000000..36dad527410
--- /dev/null
+++ b/drivers/net/fsl_pq_mdio.h
@@ -0,0 +1,45 @@
1/*
2 * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 *
5 * Author: Andy Fleming
6 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15#ifndef __FSL_PQ_MDIO_H
16#define __FSL_PQ_MDIO_H
17
18#define MIIMIND_BUSY 0x00000001
19#define MIIMIND_NOTVALID 0x00000004
20#define MIIMCFG_INIT_VALUE 0x00000007
21#define MIIMCFG_RESET 0x80000000
22
23#define MII_READ_COMMAND 0x00000001
24
25struct fsl_pq_mdio {
26 u32 miimcfg; /* MII management configuration reg */
27 u32 miimcom; /* MII management command reg */
28 u32 miimadd; /* MII management address reg */
29 u32 miimcon; /* MII management control reg */
30 u32 miimstat; /* MII management status reg */
31 u32 miimind; /* MII management indication reg */
32 u8 reserved[28]; /* Space holder */
33 u32 utbipar; /* TBI phy address reg (only on UCC) */
34} __attribute__ ((packed));
35
36
37int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
38int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
39int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
40 int regnum, u16 value);
41int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
42int __init fsl_pq_mdio_init(void);
43void fsl_pq_mdio_exit(void);
44void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
45#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 9b12a13a640..a64a4385f5a 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -93,7 +93,7 @@
93#include <linux/of.h> 93#include <linux/of.h>
94 94
95#include "gianfar.h" 95#include "gianfar.h"
96#include "gianfar_mii.h" 96#include "fsl_pq_mdio.h"
97 97
98#define TX_TIMEOUT (1*HZ) 98#define TX_TIMEOUT (1*HZ)
99#undef BRIEF_GFAR_ERRORS 99#undef BRIEF_GFAR_ERRORS
@@ -141,8 +141,6 @@ void gfar_start(struct net_device *dev);
141static void gfar_clear_exact_match(struct net_device *dev); 141static void gfar_clear_exact_match(struct net_device *dev);
142static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 142static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
143 143
144extern const struct ethtool_ops gfar_ethtool_ops;
145
146MODULE_AUTHOR("Freescale Semiconductor, Inc"); 144MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 145MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 146MODULE_LICENSE("GPL");
@@ -166,6 +164,9 @@ static int gfar_of_init(struct net_device *dev)
166 struct gfar_private *priv = netdev_priv(dev); 164 struct gfar_private *priv = netdev_priv(dev);
167 struct device_node *np = priv->node; 165 struct device_node *np = priv->node;
168 char bus_name[MII_BUS_ID_SIZE]; 166 char bus_name[MII_BUS_ID_SIZE];
167 const u32 *stash;
168 const u32 *stash_len;
169 const u32 *stash_idx;
169 170
170 if (!np || !of_device_is_available(np)) 171 if (!np || !of_device_is_available(np))
171 return -ENODEV; 172 return -ENODEV;
@@ -195,6 +196,26 @@ static int gfar_of_init(struct net_device *dev)
195 } 196 }
196 } 197 }
197 198
199 stash = of_get_property(np, "bd-stash", NULL);
200
201 if(stash) {
202 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
203 priv->bd_stash_en = 1;
204 }
205
206 stash_len = of_get_property(np, "rx-stash-len", NULL);
207
208 if (stash_len)
209 priv->rx_stash_size = *stash_len;
210
211 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
212
213 if (stash_idx)
214 priv->rx_stash_index = *stash_idx;
215
216 if (stash_len || stash_idx)
217 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
218
198 mac_addr = of_get_mac_address(np); 219 mac_addr = of_get_mac_address(np);
199 if (mac_addr) 220 if (mac_addr)
200 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); 221 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
@@ -255,7 +276,7 @@ static int gfar_of_init(struct net_device *dev)
255 of_node_put(phy); 276 of_node_put(phy);
256 of_node_put(mdio); 277 of_node_put(mdio);
257 278
258 gfar_mdio_bus_name(bus_name, mdio); 279 fsl_pq_mdio_bus_name(bus_name, mdio);
259 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x", 280 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x",
260 bus_name, *id); 281 bus_name, *id);
261 } 282 }
@@ -425,7 +446,7 @@ static int gfar_probe(struct of_device *ofdev,
425 priv->hash_width = 8; 446 priv->hash_width = 8;
426 447
427 priv->hash_regs[0] = &priv->regs->gaddr0; 448 priv->hash_regs[0] = &priv->regs->gaddr0;
428 priv->hash_regs[1] = &priv->regs->gaddr1; 449 priv->hash_regs[1] = &priv->regs->gaddr1;
429 priv->hash_regs[2] = &priv->regs->gaddr2; 450 priv->hash_regs[2] = &priv->regs->gaddr2;
430 priv->hash_regs[3] = &priv->regs->gaddr3; 451 priv->hash_regs[3] = &priv->regs->gaddr3;
431 priv->hash_regs[4] = &priv->regs->gaddr4; 452 priv->hash_regs[4] = &priv->regs->gaddr4;
@@ -466,6 +487,9 @@ static int gfar_probe(struct of_device *ofdev,
466 goto register_fail; 487 goto register_fail;
467 } 488 }
468 489
490 device_init_wakeup(&dev->dev,
491 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
492
469 /* fill out IRQ number and name fields */ 493 /* fill out IRQ number and name fields */
470 len_devname = strlen(dev->name); 494 len_devname = strlen(dev->name);
471 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 495 strncpy(&priv->int_name_tx[0], dev->name, len_devname);
@@ -838,7 +862,7 @@ void stop_gfar(struct net_device *dev)
838 free_irq(priv->interruptTransmit, dev); 862 free_irq(priv->interruptTransmit, dev);
839 free_irq(priv->interruptReceive, dev); 863 free_irq(priv->interruptReceive, dev);
840 } else { 864 } else {
841 free_irq(priv->interruptTransmit, dev); 865 free_irq(priv->interruptTransmit, dev);
842 } 866 }
843 867
844 free_skb_resources(priv); 868 free_skb_resources(priv);
@@ -1183,6 +1207,8 @@ static int gfar_enet_open(struct net_device *dev)
1183 1207
1184 napi_enable(&priv->napi); 1208 napi_enable(&priv->napi);
1185 1209
1210 skb_queue_head_init(&priv->rx_recycle);
1211
1186 /* Initialize a bunch of registers */ 1212 /* Initialize a bunch of registers */
1187 init_registers(dev); 1213 init_registers(dev);
1188 1214
@@ -1203,6 +1229,8 @@ static int gfar_enet_open(struct net_device *dev)
1203 1229
1204 netif_start_queue(dev); 1230 netif_start_queue(dev);
1205 1231
1232 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1233
1206 return err; 1234 return err;
1207} 1235}
1208 1236
@@ -1399,6 +1427,7 @@ static int gfar_close(struct net_device *dev)
1399 1427
1400 napi_disable(&priv->napi); 1428 napi_disable(&priv->napi);
1401 1429
1430 skb_queue_purge(&priv->rx_recycle);
1402 cancel_work_sync(&priv->reset_task); 1431 cancel_work_sync(&priv->reset_task);
1403 stop_gfar(dev); 1432 stop_gfar(dev);
1404 1433
@@ -1595,7 +1624,17 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1595 bdp = next_txbd(bdp, base, tx_ring_size); 1624 bdp = next_txbd(bdp, base, tx_ring_size);
1596 } 1625 }
1597 1626
1598 dev_kfree_skb_any(skb); 1627 /*
1628 * If there's room in the queue (limit it to rx_buffer_size)
1629 * we add this skb back into the pool, if it's the right size
1630 */
1631 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
1632 skb_recycle_check(skb, priv->rx_buffer_size +
1633 RXBUF_ALIGNMENT))
1634 __skb_queue_head(&priv->rx_recycle, skb);
1635 else
1636 dev_kfree_skb_any(skb);
1637
1599 priv->tx_skbuff[skb_dirtytx] = NULL; 1638 priv->tx_skbuff[skb_dirtytx] = NULL;
1600 1639
1601 skb_dirtytx = (skb_dirtytx + 1) & 1640 skb_dirtytx = (skb_dirtytx + 1) &
@@ -1626,9 +1665,9 @@ static void gfar_schedule_cleanup(struct net_device *dev)
1626 spin_lock_irqsave(&priv->txlock, flags); 1665 spin_lock_irqsave(&priv->txlock, flags);
1627 spin_lock(&priv->rxlock); 1666 spin_lock(&priv->rxlock);
1628 1667
1629 if (netif_rx_schedule_prep(&priv->napi)) { 1668 if (napi_schedule_prep(&priv->napi)) {
1630 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1669 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1631 __netif_rx_schedule(&priv->napi); 1670 __napi_schedule(&priv->napi);
1632 } else { 1671 } else {
1633 /* 1672 /*
1634 * Clear IEVENT, so interrupts aren't called again 1673 * Clear IEVENT, so interrupts aren't called again
@@ -1674,8 +1713,10 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
1674 struct gfar_private *priv = netdev_priv(dev); 1713 struct gfar_private *priv = netdev_priv(dev);
1675 struct sk_buff *skb = NULL; 1714 struct sk_buff *skb = NULL;
1676 1715
1677 /* We have to allocate the skb, so keep trying till we succeed */ 1716 skb = __skb_dequeue(&priv->rx_recycle);
1678 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 1717 if (!skb)
1718 skb = netdev_alloc_skb(dev,
1719 priv->rx_buffer_size + RXBUF_ALIGNMENT);
1679 1720
1680 if (!skb) 1721 if (!skb)
1681 return NULL; 1722 return NULL;
@@ -1823,7 +1864,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1823 if (unlikely(!newskb)) 1864 if (unlikely(!newskb))
1824 newskb = skb; 1865 newskb = skb;
1825 else if (skb) 1866 else if (skb)
1826 dev_kfree_skb_any(skb); 1867 __skb_queue_head(&priv->rx_recycle, skb);
1827 } else { 1868 } else {
1828 /* Increment the number of packets */ 1869 /* Increment the number of packets */
1829 dev->stats.rx_packets++; 1870 dev->stats.rx_packets++;
@@ -1835,6 +1876,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1835 skb_put(skb, pkt_len); 1876 skb_put(skb, pkt_len);
1836 dev->stats.rx_bytes += pkt_len; 1877 dev->stats.rx_bytes += pkt_len;
1837 1878
1879 if (in_irq() || irqs_disabled())
1880 printk("Interrupt problem!\n");
1838 gfar_process_frame(dev, skb, amount_pull); 1881 gfar_process_frame(dev, skb, amount_pull);
1839 1882
1840 } else { 1883 } else {
@@ -1891,7 +1934,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1891 return budget; 1934 return budget;
1892 1935
1893 if (rx_cleaned < budget) { 1936 if (rx_cleaned < budget) {
1894 netif_rx_complete(napi); 1937 napi_complete(napi);
1895 1938
1896 /* Clear the halt bit in RSTAT */ 1939 /* Clear the halt bit in RSTAT */
1897 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1940 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
@@ -2308,23 +2351,12 @@ static struct of_platform_driver gfar_driver = {
2308 2351
2309static int __init gfar_init(void) 2352static int __init gfar_init(void)
2310{ 2353{
2311 int err = gfar_mdio_init(); 2354 return of_register_platform_driver(&gfar_driver);
2312
2313 if (err)
2314 return err;
2315
2316 err = of_register_platform_driver(&gfar_driver);
2317
2318 if (err)
2319 gfar_mdio_exit();
2320
2321 return err;
2322} 2355}
2323 2356
2324static void __exit gfar_exit(void) 2357static void __exit gfar_exit(void)
2325{ 2358{
2326 of_unregister_platform_driver(&gfar_driver); 2359 of_unregister_platform_driver(&gfar_driver);
2327 gfar_mdio_exit();
2328} 2360}
2329 2361
2330module_init(gfar_init); 2362module_init(gfar_init);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index eaa86897f5c..54332b0059d 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -46,7 +46,6 @@
46#include <linux/workqueue.h> 46#include <linux/workqueue.h>
47#include <linux/ethtool.h> 47#include <linux/ethtool.h>
48#include <linux/fsl_devices.h> 48#include <linux/fsl_devices.h>
49#include "gianfar_mii.h"
50 49
51/* The maximum number of packets to be handled in one call of gfar_poll */ 50/* The maximum number of packets to be handled in one call of gfar_poll */
52#define GFAR_DEV_WEIGHT 64 51#define GFAR_DEV_WEIGHT 64
@@ -126,9 +125,12 @@ extern const char gfar_driver_version[];
126#define DEFAULT_RX_COALESCE 0 125#define DEFAULT_RX_COALESCE 0
127#define DEFAULT_RXCOUNT 0 126#define DEFAULT_RXCOUNT 0
128 127
129#define MIIMCFG_INIT_VALUE 0x00000007 128#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
130#define MIIMCFG_RESET 0x80000000 129 | SUPPORTED_10baseT_Full \
131#define MIIMIND_BUSY 0x00000001 130 | SUPPORTED_100baseT_Half \
131 | SUPPORTED_100baseT_Full \
132 | SUPPORTED_Autoneg \
133 | SUPPORTED_MII)
132 134
133/* TBI register addresses */ 135/* TBI register addresses */
134#define MII_TBICON 0x11 136#define MII_TBICON 0x11
@@ -756,6 +758,8 @@ struct gfar_private {
756 unsigned int rx_stash_size; 758 unsigned int rx_stash_size;
757 unsigned int rx_stash_index; 759 unsigned int rx_stash_index;
758 760
761 struct sk_buff_head rx_recycle;
762
759 struct vlan_group *vlgrp; 763 struct vlan_group *vlgrp;
760 764
761 /* Unprotected fields */ 765 /* Unprotected fields */
@@ -826,8 +830,7 @@ extern void gfar_halt(struct net_device *dev);
826extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 830extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
827 int enable, u32 regnum, u32 read); 831 int enable, u32 regnum, u32 read);
828void gfar_init_sysfs(struct net_device *dev); 832void gfar_init_sysfs(struct net_device *dev);
829int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, 833
830 int regnum, u16 value); 834extern const struct ethtool_ops gfar_ethtool_ops;
831int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
832 835
833#endif /* __GIANFAR_H */ 836#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 59b3b5d98ef..dbf06e9313c 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -600,6 +600,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
600 600
601 spin_lock_irqsave(&priv->bflock, flags); 601 spin_lock_irqsave(&priv->bflock, flags);
602 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 602 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
603 device_set_wakeup_enable(&dev->dev, priv->wol_en);
603 spin_unlock_irqrestore(&priv->bflock, flags); 604 spin_unlock_irqrestore(&priv->bflock, flags);
604 605
605 return 0; 606 return 0;
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index f49a426ad68..64e4679b327 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -105,7 +105,7 @@ int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
105 * All PHY configuration is done through the TSEC1 MIIM regs */ 105 * All PHY configuration is done through the TSEC1 MIIM regs */
106int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) 106int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
107{ 107{
108 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 108 struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv;
109 109
110 /* Write to the local MII regs */ 110 /* Write to the local MII regs */
111 return(gfar_local_mdio_write(regs, mii_id, regnum, value)); 111 return(gfar_local_mdio_write(regs, mii_id, regnum, value));
@@ -116,7 +116,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
116 * configuration has to be done through the TSEC1 MIIM regs */ 116 * configuration has to be done through the TSEC1 MIIM regs */
117int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 117int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
118{ 118{
119 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 119 struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv;
120 120
121 /* Read the local MII regs */ 121 /* Read the local MII regs */
122 return(gfar_local_mdio_read(regs, mii_id, regnum)); 122 return(gfar_local_mdio_read(regs, mii_id, regnum));
@@ -125,7 +125,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
125/* Reset the MIIM registers, and wait for the bus to free */ 125/* Reset the MIIM registers, and wait for the bus to free */
126static int gfar_mdio_reset(struct mii_bus *bus) 126static int gfar_mdio_reset(struct mii_bus *bus)
127{ 127{
128 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 128 struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv;
129 unsigned int timeout = PHY_INIT_TIMEOUT; 129 unsigned int timeout = PHY_INIT_TIMEOUT;
130 130
131 mutex_lock(&bus->mdio_lock); 131 mutex_lock(&bus->mdio_lock);
@@ -268,8 +268,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
268 * Also, we have to cast back to struct gfar_mii because of 268 * Also, we have to cast back to struct gfar_mii because of
269 * definition weirdness done in gianfar.h. 269 * definition weirdness done in gianfar.h.
270 */ 270 */
271 enet_regs = (struct gfar __iomem *) 271 enet_regs = (struct gfar __force __iomem *)
272 ((char *)regs - offsetof(struct gfar, gfar_mii_regs)); 272 ((char __force *)regs - offsetof(struct gfar, gfar_mii_regs));
273 273
274 for_each_child_of_node(np, tbi) { 274 for_each_child_of_node(np, tbi) {
275 if (!strncmp(tbi->type, "tbi-phy", 8)) 275 if (!strncmp(tbi->type, "tbi-phy", 8))
@@ -337,7 +337,7 @@ static int gfar_mdio_remove(struct of_device *ofdev)
337 337
338 dev_set_drvdata(&ofdev->dev, NULL); 338 dev_set_drvdata(&ofdev->dev, NULL);
339 339
340 iounmap((void __iomem *)bus->priv); 340 iounmap((void __force __iomem *)bus->priv);
341 bus->priv = NULL; 341 bus->priv = NULL;
342 kfree(bus->irq); 342 kfree(bus->irq);
343 mdiobus_free(bus); 343 mdiobus_free(bus);
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
deleted file mode 100644
index 65c242cd468..00000000000
--- a/drivers/net/gianfar_mii.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * drivers/net/gianfar_mii.h
3 *
4 * Gianfar Ethernet Driver -- MII Management Bus Implementation
5 * Driver for the MDIO bus controller in the Gianfar register space
6 *
7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala
9 *
10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#ifndef __GIANFAR_MII_H
19#define __GIANFAR_MII_H
20
21struct gfar_private; /* forward ref */
22
23#define MIIMIND_BUSY 0x00000001
24#define MIIMIND_NOTVALID 0x00000004
25
26#define MII_READ_COMMAND 0x00000001
27
28#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
29 | SUPPORTED_10baseT_Full \
30 | SUPPORTED_100baseT_Half \
31 | SUPPORTED_100baseT_Full \
32 | SUPPORTED_Autoneg \
33 | SUPPORTED_MII)
34
35struct gfar_mii {
36 u32 miimcfg; /* 0x.520 - MII Management Config Register */
37 u32 miimcom; /* 0x.524 - MII Management Command Register */
38 u32 miimadd; /* 0x.528 - MII Management Address Register */
39 u32 miimcon; /* 0x.52c - MII Management Control Register */
40 u32 miimstat; /* 0x.530 - MII Management Status Register */
41 u32 miimind; /* 0x.534 - MII Management Indicator Register */
42};
43
44int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
46int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
47 int regnum, u16 value);
48int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
49struct mii_bus *gfar_get_miibus(const struct gfar_private *priv);
50int __init gfar_mdio_init(void);
51void gfar_mdio_exit(void);
52
53void gfar_mdio_bus_name(char *name, struct device_node *np);
54#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 782c2017008..dd26da74f27 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -53,6 +53,9 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
53 u32 temp; 53 u32 temp;
54 unsigned long flags; 54 unsigned long flags;
55 55
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count;
58
56 /* Find out the new setting */ 59 /* Find out the new setting */
57 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 60 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
58 new_setting = 1; 61 new_setting = 1;
@@ -81,7 +84,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
81 return count; 84 return count;
82} 85}
83 86
84DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); 87static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
85 88
86static ssize_t gfar_show_rx_stash_size(struct device *dev, 89static ssize_t gfar_show_rx_stash_size(struct device *dev,
87 struct device_attribute *attr, char *buf) 90 struct device_attribute *attr, char *buf)
@@ -100,6 +103,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
100 u32 temp; 103 u32 temp;
101 unsigned long flags; 104 unsigned long flags;
102 105
106 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
107 return count;
108
103 spin_lock_irqsave(&priv->rxlock, flags); 109 spin_lock_irqsave(&priv->rxlock, flags);
104 if (length > priv->rx_buffer_size) 110 if (length > priv->rx_buffer_size)
105 goto out; 111 goto out;
@@ -130,8 +136,8 @@ out:
130 return count; 136 return count;
131} 137}
132 138
133DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, 139static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
134 gfar_set_rx_stash_size); 140 gfar_set_rx_stash_size);
135 141
136/* Stashing will only be enabled when rx_stash_size != 0 */ 142/* Stashing will only be enabled when rx_stash_size != 0 */
137static ssize_t gfar_show_rx_stash_index(struct device *dev, 143static ssize_t gfar_show_rx_stash_index(struct device *dev,
@@ -152,6 +158,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
152 u32 temp; 158 u32 temp;
153 unsigned long flags; 159 unsigned long flags;
154 160
161 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
162 return count;
163
155 spin_lock_irqsave(&priv->rxlock, flags); 164 spin_lock_irqsave(&priv->rxlock, flags);
156 if (index > priv->rx_stash_size) 165 if (index > priv->rx_stash_size)
157 goto out; 166 goto out;
@@ -172,8 +181,8 @@ out:
172 return count; 181 return count;
173} 182}
174 183
175DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, 184static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
176 gfar_set_rx_stash_index); 185 gfar_set_rx_stash_index);
177 186
178static ssize_t gfar_show_fifo_threshold(struct device *dev, 187static ssize_t gfar_show_fifo_threshold(struct device *dev,
179 struct device_attribute *attr, 188 struct device_attribute *attr,
@@ -210,8 +219,8 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
210 return count; 219 return count;
211} 220}
212 221
213DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, 222static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
214 gfar_set_fifo_threshold); 223 gfar_set_fifo_threshold);
215 224
216static ssize_t gfar_show_fifo_starve(struct device *dev, 225static ssize_t gfar_show_fifo_starve(struct device *dev,
217 struct device_attribute *attr, char *buf) 226 struct device_attribute *attr, char *buf)
@@ -247,7 +256,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
247 return count; 256 return count;
248} 257}
249 258
250DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, gfar_set_fifo_starve); 259static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
260 gfar_set_fifo_starve);
251 261
252static ssize_t gfar_show_fifo_starve_off(struct device *dev, 262static ssize_t gfar_show_fifo_starve_off(struct device *dev,
253 struct device_attribute *attr, 263 struct device_attribute *attr,
@@ -284,8 +294,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
284 return count; 294 return count;
285} 295}
286 296
287DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, 297static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
288 gfar_set_fifo_starve_off); 298 gfar_set_fifo_starve_off);
289 299
290void gfar_init_sysfs(struct net_device *dev) 300void gfar_init_sysfs(struct net_device *dev)
291{ 301{
@@ -293,12 +303,9 @@ void gfar_init_sysfs(struct net_device *dev)
293 int rc; 303 int rc;
294 304
295 /* Initialize the default values */ 305 /* Initialize the default values */
296 priv->rx_stash_size = DEFAULT_STASH_LENGTH;
297 priv->rx_stash_index = DEFAULT_STASH_INDEX;
298 priv->fifo_threshold = DEFAULT_FIFO_TX_THR; 306 priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
299 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE; 307 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
300 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF; 308 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
301 priv->bd_stash_en = DEFAULT_BD_STASH;
302 309
303 /* Create our sysfs files */ 310 /* Create our sysfs files */
304 rc = device_create_file(&dev->dev, &dev_attr_bd_stash); 311 rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 7e8b3c59a7d..455641f8677 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1244,7 +1244,7 @@ do { \
1244 csum_add(sum, (ih)->saddr & 0xffff); \ 1244 csum_add(sum, (ih)->saddr & 0xffff); \
1245 csum_add(sum, (ih)->daddr >> 16); \ 1245 csum_add(sum, (ih)->daddr >> 16); \
1246 csum_add(sum, (ih)->daddr & 0xffff); \ 1246 csum_add(sum, (ih)->daddr & 0xffff); \
1247 csum_add(sum, __constant_htons(IPPROTO_UDP)); \ 1247 csum_add(sum, cpu_to_be16(IPPROTO_UDP)); \
1248 csum_add(sum, (uh)->len); \ 1248 csum_add(sum, (uh)->len); \
1249} while (0) 1249} while (0)
1250 1250
@@ -1255,7 +1255,7 @@ do { \
1255 csum_add(sum, (ih)->saddr & 0xffff); \ 1255 csum_add(sum, (ih)->saddr & 0xffff); \
1256 csum_add(sum, (ih)->daddr >> 16); \ 1256 csum_add(sum, (ih)->daddr >> 16); \
1257 csum_add(sum, (ih)->daddr & 0xffff); \ 1257 csum_add(sum, (ih)->daddr & 0xffff); \
1258 csum_add(sum, __constant_htons(IPPROTO_TCP)); \ 1258 csum_add(sum, cpu_to_be16(IPPROTO_TCP)); \
1259 csum_add(sum, htons(len)); \ 1259 csum_add(sum, htons(len)); \
1260} while (0) 1260} while (0)
1261#endif 1261#endif
@@ -1296,7 +1296,7 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
1296 /* tack on checksum tag */ 1296 /* tack on checksum tag */
1297 u32 tagval = 0; 1297 u32 tagval = 0;
1298 struct ethhdr *eh = (struct ethhdr *)skb->data; 1298 struct ethhdr *eh = (struct ethhdr *)skb->data;
1299 if (eh->h_proto == __constant_htons(ETH_P_IP)) { 1299 if (eh->h_proto == cpu_to_be16(ETH_P_IP)) {
1300 struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN); 1300 struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN);
1301 if (ih->protocol == IPPROTO_UDP) { 1301 if (ih->protocol == IPPROTO_UDP) {
1302 struct udphdr *uh 1302 struct udphdr *uh
@@ -1605,7 +1605,7 @@ static int hamachi_rx(struct net_device *dev)
1605 */ 1605 */
1606 if (ntohs(ih->tot_len) >= 46){ 1606 if (ntohs(ih->tot_len) >= 46){
1607 /* don't worry about frags */ 1607 /* don't worry about frags */
1608 if (!(ih->frag_off & __constant_htons(IP_MF|IP_OFFSET))) { 1608 if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) {
1609 u32 inv = *(u32 *) &buf_addr[data_size - 16]; 1609 u32 inv = *(u32 *) &buf_addr[data_size - 16];
1610 u32 *p = (u32 *) &buf_addr[data_size - 20]; 1610 u32 *p = (u32 *) &buf_addr[data_size - 20];
1611 register u32 crc, p_r, p_r1; 1611 register u32 crc, p_r, p_r1;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 2d4089894ec..3da9f394b4c 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -322,23 +322,25 @@ static const struct header_ops sp_header_ops = {
322 .rebuild = sp_rebuild_header, 322 .rebuild = sp_rebuild_header,
323}; 323};
324 324
325static const struct net_device_ops sp_netdev_ops = {
326 .ndo_open = sp_open_dev,
327 .ndo_stop = sp_close,
328 .ndo_start_xmit = sp_xmit,
329 .ndo_set_mac_address = sp_set_mac_address,
330};
331
325static void sp_setup(struct net_device *dev) 332static void sp_setup(struct net_device *dev)
326{ 333{
327 /* Finish setting up the DEVICE info. */ 334 /* Finish setting up the DEVICE info. */
328 dev->mtu = SIXP_MTU; 335 dev->netdev_ops = &sp_netdev_ops;
329 dev->hard_start_xmit = sp_xmit;
330 dev->open = sp_open_dev;
331 dev->destructor = free_netdev; 336 dev->destructor = free_netdev;
332 dev->stop = sp_close; 337 dev->mtu = SIXP_MTU;
333
334 dev->set_mac_address = sp_set_mac_address;
335 dev->hard_header_len = AX25_MAX_HEADER_LEN; 338 dev->hard_header_len = AX25_MAX_HEADER_LEN;
336 dev->header_ops = &sp_header_ops; 339 dev->header_ops = &sp_header_ops;
337 340
338 dev->addr_len = AX25_ADDR_LEN; 341 dev->addr_len = AX25_ADDR_LEN;
339 dev->type = ARPHRD_AX25; 342 dev->type = ARPHRD_AX25;
340 dev->tx_queue_len = 10; 343 dev->tx_queue_len = 10;
341 dev->tx_timeout = NULL;
342 344
343 /* Only activated in AX.25 mode */ 345 /* Only activated in AX.25 mode */
344 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 346 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 81a65e3a1c0..bb78c11559c 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -203,7 +203,6 @@ struct baycom_state {
203 unsigned char buf[TXBUFFER_SIZE]; 203 unsigned char buf[TXBUFFER_SIZE];
204 } hdlctx; 204 } hdlctx;
205 205
206 struct net_device_stats stats;
207 unsigned int ptt_keyed; 206 unsigned int ptt_keyed;
208 struct sk_buff *skb; /* next transmit packet */ 207 struct sk_buff *skb; /* next transmit packet */
209 208
@@ -423,7 +422,7 @@ static void encode_hdlc(struct baycom_state *bc)
423 bc->hdlctx.bufptr = bc->hdlctx.buf; 422 bc->hdlctx.bufptr = bc->hdlctx.buf;
424 bc->hdlctx.bufcnt = wp - bc->hdlctx.buf; 423 bc->hdlctx.bufcnt = wp - bc->hdlctx.buf;
425 dev_kfree_skb(skb); 424 dev_kfree_skb(skb);
426 bc->stats.tx_packets++; 425 bc->dev->stats.tx_packets++;
427} 426}
428 427
429/* ---------------------------------------------------------------------- */ 428/* ---------------------------------------------------------------------- */
@@ -547,7 +546,7 @@ static void do_rxpacket(struct net_device *dev)
547 pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */ 546 pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */
548 if (!(skb = dev_alloc_skb(pktlen))) { 547 if (!(skb = dev_alloc_skb(pktlen))) {
549 printk("%s: memory squeeze, dropping packet\n", dev->name); 548 printk("%s: memory squeeze, dropping packet\n", dev->name);
550 bc->stats.rx_dropped++; 549 dev->stats.rx_dropped++;
551 return; 550 return;
552 } 551 }
553 cp = skb_put(skb, pktlen); 552 cp = skb_put(skb, pktlen);
@@ -555,7 +554,7 @@ static void do_rxpacket(struct net_device *dev)
555 memcpy(cp, bc->hdlcrx.buf, pktlen - 1); 554 memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
556 skb->protocol = ax25_type_trans(skb, dev); 555 skb->protocol = ax25_type_trans(skb, dev);
557 netif_rx(skb); 556 netif_rx(skb);
558 bc->stats.rx_packets++; 557 dev->stats.rx_packets++;
559} 558}
560 559
561static int receive(struct net_device *dev, int cnt) 560static int receive(struct net_device *dev, int cnt)
@@ -802,19 +801,6 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
802 801
803/* --------------------------------------------------------------------- */ 802/* --------------------------------------------------------------------- */
804 803
805static struct net_device_stats *baycom_get_stats(struct net_device *dev)
806{
807 struct baycom_state *bc = netdev_priv(dev);
808
809 /*
810 * Get the current statistics. This may be called with the
811 * card open or closed.
812 */
813 return &bc->stats;
814}
815
816/* --------------------------------------------------------------------- */
817
818static void epp_wakeup(void *handle) 804static void epp_wakeup(void *handle)
819{ 805{
820 struct net_device *dev = (struct net_device *)handle; 806 struct net_device *dev = (struct net_device *)handle;
@@ -1065,10 +1051,10 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1065 hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); 1051 hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT);
1066 hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); 1052 hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT);
1067 hi.data.cs.ptt_keyed = bc->ptt_keyed; 1053 hi.data.cs.ptt_keyed = bc->ptt_keyed;
1068 hi.data.cs.tx_packets = bc->stats.tx_packets; 1054 hi.data.cs.tx_packets = dev->stats.tx_packets;
1069 hi.data.cs.tx_errors = bc->stats.tx_errors; 1055 hi.data.cs.tx_errors = dev->stats.tx_errors;
1070 hi.data.cs.rx_packets = bc->stats.rx_packets; 1056 hi.data.cs.rx_packets = dev->stats.rx_packets;
1071 hi.data.cs.rx_errors = bc->stats.rx_errors; 1057 hi.data.cs.rx_errors = dev->stats.rx_errors;
1072 break; 1058 break;
1073 1059
1074 case HDLCDRVCTL_OLDGETSTAT: 1060 case HDLCDRVCTL_OLDGETSTAT:
@@ -1116,6 +1102,14 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1116 1102
1117/* --------------------------------------------------------------------- */ 1103/* --------------------------------------------------------------------- */
1118 1104
1105static const struct net_device_ops baycom_netdev_ops = {
1106 .ndo_open = epp_open,
1107 .ndo_stop = epp_close,
1108 .ndo_do_ioctl = baycom_ioctl,
1109 .ndo_start_xmit = baycom_send_packet,
1110 .ndo_set_mac_address = baycom_set_mac_address,
1111};
1112
1119/* 1113/*
1120 * Check for a network adaptor of this type, and return '0' if one exists. 1114 * Check for a network adaptor of this type, and return '0' if one exists.
1121 * If dev->base_addr == 0, probe all likely locations. 1115 * If dev->base_addr == 0, probe all likely locations.
@@ -1143,17 +1137,12 @@ static void baycom_probe(struct net_device *dev)
1143 /* 1137 /*
1144 * initialize the device struct 1138 * initialize the device struct
1145 */ 1139 */
1146 dev->open = epp_open;
1147 dev->stop = epp_close;
1148 dev->do_ioctl = baycom_ioctl;
1149 dev->hard_start_xmit = baycom_send_packet;
1150 dev->get_stats = baycom_get_stats;
1151 1140
1152 /* Fill in the fields of the device structure */ 1141 /* Fill in the fields of the device structure */
1153 bc->skb = NULL; 1142 bc->skb = NULL;
1154 1143
1144 dev->netdev_ops = &baycom_netdev_ops;
1155 dev->header_ops = &ax25_header_ops; 1145 dev->header_ops = &ax25_header_ops;
1156 dev->set_mac_address = baycom_set_mac_address;
1157 1146
1158 dev->type = ARPHRD_AX25; /* AF_AX25 device */ 1147 dev->type = ARPHRD_AX25; /* AF_AX25 device */
1159 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; 1148 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 46f8f3390e7..2c619bc99ae 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -97,7 +97,7 @@ static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
97static int bpq_device_event(struct notifier_block *, unsigned long, void *); 97static int bpq_device_event(struct notifier_block *, unsigned long, void *);
98 98
99static struct packet_type bpq_packet_type = { 99static struct packet_type bpq_packet_type = {
100 .type = __constant_htons(ETH_P_BPQ), 100 .type = cpu_to_be16(ETH_P_BPQ),
101 .func = bpq_rcv, 101 .func = bpq_rcv,
102}; 102};
103 103
@@ -110,7 +110,6 @@ struct bpqdev {
110 struct list_head bpq_list; /* list of bpq devices chain */ 110 struct list_head bpq_list; /* list of bpq devices chain */
111 struct net_device *ethdev; /* link to ethernet device */ 111 struct net_device *ethdev; /* link to ethernet device */
112 struct net_device *axdev; /* bpq device (bpq#) */ 112 struct net_device *axdev; /* bpq device (bpq#) */
113 struct net_device_stats stats; /* some statistics */
114 char dest_addr[6]; /* ether destination address */ 113 char dest_addr[6]; /* ether destination address */
115 char acpt_addr[6]; /* accept ether frames from this address only */ 114 char acpt_addr[6]; /* accept ether frames from this address only */
116}; 115};
@@ -222,8 +221,8 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
222 skb_pull(skb, 2); /* Remove the length bytes */ 221 skb_pull(skb, 2); /* Remove the length bytes */
223 skb_trim(skb, len); /* Set the length of the data */ 222 skb_trim(skb, len); /* Set the length of the data */
224 223
225 bpq->stats.rx_packets++; 224 dev->stats.rx_packets++;
226 bpq->stats.rx_bytes += len; 225 dev->stats.rx_bytes += len;
227 226
228 ptr = skb_push(skb, 1); 227 ptr = skb_push(skb, 1);
229 *ptr = 0; 228 *ptr = 0;
@@ -292,7 +291,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
292 bpq = netdev_priv(dev); 291 bpq = netdev_priv(dev);
293 292
294 if ((dev = bpq_get_ether_dev(dev)) == NULL) { 293 if ((dev = bpq_get_ether_dev(dev)) == NULL) {
295 bpq->stats.tx_dropped++; 294 dev->stats.tx_dropped++;
296 kfree_skb(skb); 295 kfree_skb(skb);
297 return -ENODEV; 296 return -ENODEV;
298 } 297 }
@@ -300,8 +299,8 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
300 skb->protocol = ax25_type_trans(skb, dev); 299 skb->protocol = ax25_type_trans(skb, dev);
301 skb_reset_network_header(skb); 300 skb_reset_network_header(skb);
302 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); 301 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
303 bpq->stats.tx_packets++; 302 dev->stats.tx_packets++;
304 bpq->stats.tx_bytes+=skb->len; 303 dev->stats.tx_bytes+=skb->len;
305 304
306 dev_queue_xmit(skb); 305 dev_queue_xmit(skb);
307 netif_wake_queue(dev); 306 netif_wake_queue(dev);
@@ -309,16 +308,6 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
309} 308}
310 309
311/* 310/*
312 * Statistics
313 */
314static struct net_device_stats *bpq_get_stats(struct net_device *dev)
315{
316 struct bpqdev *bpq = netdev_priv(dev);
317
318 return &bpq->stats;
319}
320
321/*
322 * Set AX.25 callsign 311 * Set AX.25 callsign
323 */ 312 */
324static int bpq_set_mac_address(struct net_device *dev, void *addr) 313static int bpq_set_mac_address(struct net_device *dev, void *addr)
@@ -454,7 +443,7 @@ static int bpq_seq_show(struct seq_file *seq, void *v)
454 return 0; 443 return 0;
455} 444}
456 445
457static struct seq_operations bpq_seqops = { 446static const struct seq_operations bpq_seqops = {
458 .start = bpq_seq_start, 447 .start = bpq_seq_start,
459 .next = bpq_seq_next, 448 .next = bpq_seq_next,
460 .stop = bpq_seq_stop, 449 .stop = bpq_seq_stop,
@@ -477,16 +466,17 @@ static const struct file_operations bpq_info_fops = {
477 466
478/* ------------------------------------------------------------------------ */ 467/* ------------------------------------------------------------------------ */
479 468
469static const struct net_device_ops bpq_netdev_ops = {
470 .ndo_open = bpq_open,
471 .ndo_stop = bpq_close,
472 .ndo_start_xmit = bpq_xmit,
473 .ndo_set_mac_address = bpq_set_mac_address,
474 .ndo_do_ioctl = bpq_ioctl,
475};
480 476
481static void bpq_setup(struct net_device *dev) 477static void bpq_setup(struct net_device *dev)
482{ 478{
483 479 dev->netdev_ops = &bpq_netdev_ops;
484 dev->hard_start_xmit = bpq_xmit;
485 dev->open = bpq_open;
486 dev->stop = bpq_close;
487 dev->set_mac_address = bpq_set_mac_address;
488 dev->get_stats = bpq_get_stats;
489 dev->do_ioctl = bpq_ioctl;
490 dev->destructor = free_netdev; 480 dev->destructor = free_netdev;
491 481
492 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 482 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e67103396ed..881bf818bb4 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -195,7 +195,7 @@ struct scc_priv {
195 int chip; 195 int chip;
196 struct net_device *dev; 196 struct net_device *dev;
197 struct scc_info *info; 197 struct scc_info *info;
198 struct net_device_stats stats; 198
199 int channel; 199 int channel;
200 int card_base, scc_cmd, scc_data; 200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode; 201 int tmr_cnt, tmr_ctrl, tmr_mode;
@@ -239,7 +239,6 @@ static int scc_open(struct net_device *dev);
239static int scc_close(struct net_device *dev); 239static int scc_close(struct net_device *dev);
240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); 241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242static struct net_device_stats *scc_get_stats(struct net_device *dev);
243static int scc_set_mac_address(struct net_device *dev, void *sa); 242static int scc_set_mac_address(struct net_device *dev, void *sa);
244 243
245static inline void tx_on(struct scc_priv *priv); 244static inline void tx_on(struct scc_priv *priv);
@@ -441,6 +440,13 @@ static void __init dev_setup(struct net_device *dev)
441 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 440 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
442} 441}
443 442
443static const struct net_device_ops scc_netdev_ops = {
444 .ndo_open = scc_open,
445 .ndo_stop = scc_close,
446 .ndo_start_xmit = scc_send_packet,
447 .ndo_do_ioctl = scc_ioctl,
448};
449
444static int __init setup_adapter(int card_base, int type, int n) 450static int __init setup_adapter(int card_base, int type, int n)
445{ 451{
446 int i, irq, chip; 452 int i, irq, chip;
@@ -576,11 +582,7 @@ static int __init setup_adapter(int card_base, int type, int n)
576 sprintf(dev->name, "dmascc%i", 2 * n + i); 582 sprintf(dev->name, "dmascc%i", 2 * n + i);
577 dev->base_addr = card_base; 583 dev->base_addr = card_base;
578 dev->irq = irq; 584 dev->irq = irq;
579 dev->open = scc_open; 585 dev->netdev_ops = &scc_netdev_ops;
580 dev->stop = scc_close;
581 dev->do_ioctl = scc_ioctl;
582 dev->hard_start_xmit = scc_send_packet;
583 dev->get_stats = scc_get_stats;
584 dev->header_ops = &ax25_header_ops; 586 dev->header_ops = &ax25_header_ops;
585 dev->set_mac_address = scc_set_mac_address; 587 dev->set_mac_address = scc_set_mac_address;
586 } 588 }
@@ -961,14 +963,6 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
961} 963}
962 964
963 965
964static struct net_device_stats *scc_get_stats(struct net_device *dev)
965{
966 struct scc_priv *priv = dev->ml_priv;
967
968 return &priv->stats;
969}
970
971
972static int scc_set_mac_address(struct net_device *dev, void *sa) 966static int scc_set_mac_address(struct net_device *dev, void *sa)
973{ 967{
974 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, 968 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
@@ -1216,17 +1210,17 @@ static void special_condition(struct scc_priv *priv, int rc)
1216 } 1210 }
1217 if (priv->rx_over) { 1211 if (priv->rx_over) {
1218 /* We had an overrun */ 1212 /* We had an overrun */
1219 priv->stats.rx_errors++; 1213 priv->dev->stats.rx_errors++;
1220 if (priv->rx_over == 2) 1214 if (priv->rx_over == 2)
1221 priv->stats.rx_length_errors++; 1215 priv->dev->stats.rx_length_errors++;
1222 else 1216 else
1223 priv->stats.rx_fifo_errors++; 1217 priv->dev->stats.rx_fifo_errors++;
1224 priv->rx_over = 0; 1218 priv->rx_over = 0;
1225 } else if (rc & CRC_ERR) { 1219 } else if (rc & CRC_ERR) {
1226 /* Count invalid CRC only if packet length >= minimum */ 1220 /* Count invalid CRC only if packet length >= minimum */
1227 if (cb >= 15) { 1221 if (cb >= 15) {
1228 priv->stats.rx_errors++; 1222 priv->dev->stats.rx_errors++;
1229 priv->stats.rx_crc_errors++; 1223 priv->dev->stats.rx_crc_errors++;
1230 } 1224 }
1231 } else { 1225 } else {
1232 if (cb >= 15) { 1226 if (cb >= 15) {
@@ -1239,8 +1233,8 @@ static void special_condition(struct scc_priv *priv, int rc)
1239 priv->rx_count++; 1233 priv->rx_count++;
1240 schedule_work(&priv->rx_work); 1234 schedule_work(&priv->rx_work);
1241 } else { 1235 } else {
1242 priv->stats.rx_errors++; 1236 priv->dev->stats.rx_errors++;
1243 priv->stats.rx_over_errors++; 1237 priv->dev->stats.rx_over_errors++;
1244 } 1238 }
1245 } 1239 }
1246 } 1240 }
@@ -1275,7 +1269,7 @@ static void rx_bh(struct work_struct *ugli_api)
1275 skb = dev_alloc_skb(cb + 1); 1269 skb = dev_alloc_skb(cb + 1);
1276 if (skb == NULL) { 1270 if (skb == NULL) {
1277 /* Drop packet */ 1271 /* Drop packet */
1278 priv->stats.rx_dropped++; 1272 priv->dev->stats.rx_dropped++;
1279 } else { 1273 } else {
1280 /* Fill buffer */ 1274 /* Fill buffer */
1281 data = skb_put(skb, cb + 1); 1275 data = skb_put(skb, cb + 1);
@@ -1283,8 +1277,8 @@ static void rx_bh(struct work_struct *ugli_api)
1283 memcpy(&data[1], priv->rx_buf[i], cb); 1277 memcpy(&data[1], priv->rx_buf[i], cb);
1284 skb->protocol = ax25_type_trans(skb, priv->dev); 1278 skb->protocol = ax25_type_trans(skb, priv->dev);
1285 netif_rx(skb); 1279 netif_rx(skb);
1286 priv->stats.rx_packets++; 1280 priv->dev->stats.rx_packets++;
1287 priv->stats.rx_bytes += cb; 1281 priv->dev->stats.rx_bytes += cb;
1288 } 1282 }
1289 spin_lock_irqsave(&priv->ring_lock, flags); 1283 spin_lock_irqsave(&priv->ring_lock, flags);
1290 /* Move tail */ 1284 /* Move tail */
@@ -1351,15 +1345,15 @@ static void es_isr(struct scc_priv *priv)
1351 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); 1345 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1352 if (res) { 1346 if (res) {
1353 /* Update packet statistics */ 1347 /* Update packet statistics */
1354 priv->stats.tx_errors++; 1348 priv->dev->stats.tx_errors++;
1355 priv->stats.tx_fifo_errors++; 1349 priv->dev->stats.tx_fifo_errors++;
1356 /* Other underrun interrupts may already be waiting */ 1350 /* Other underrun interrupts may already be waiting */
1357 write_scc(priv, R0, RES_EXT_INT); 1351 write_scc(priv, R0, RES_EXT_INT);
1358 write_scc(priv, R0, RES_EXT_INT); 1352 write_scc(priv, R0, RES_EXT_INT);
1359 } else { 1353 } else {
1360 /* Update packet statistics */ 1354 /* Update packet statistics */
1361 priv->stats.tx_packets++; 1355 priv->dev->stats.tx_packets++;
1362 priv->stats.tx_bytes += priv->tx_len[i]; 1356 priv->dev->stats.tx_bytes += priv->tx_len[i];
1363 /* Remove frame from FIFO */ 1357 /* Remove frame from FIFO */
1364 priv->tx_tail = (i + 1) % NUM_TX_BUF; 1358 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1365 priv->tx_count--; 1359 priv->tx_count--;
@@ -1425,7 +1419,7 @@ static void tm_isr(struct scc_priv *priv)
1425 write_scc(priv, R15, DCDIE); 1419 write_scc(priv, R15, DCDIE);
1426 priv->rr0 = read_scc(priv, R0); 1420 priv->rr0 = read_scc(priv, R0);
1427 if (priv->rr0 & DCD) { 1421 if (priv->rr0 & DCD) {
1428 priv->stats.collisions++; 1422 priv->dev->stats.collisions++;
1429 rx_on(priv); 1423 rx_on(priv);
1430 priv->state = RX_ON; 1424 priv->state = RX_ON;
1431 } else { 1425 } else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8eba61a1d4a..61de56e45ee 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -154,7 +154,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
154 pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */ 154 pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */
155 if (!(skb = dev_alloc_skb(pkt_len))) { 155 if (!(skb = dev_alloc_skb(pkt_len))) {
156 printk("%s: memory squeeze, dropping packet\n", dev->name); 156 printk("%s: memory squeeze, dropping packet\n", dev->name);
157 s->stats.rx_dropped++; 157 dev->stats.rx_dropped++;
158 return; 158 return;
159 } 159 }
160 cp = skb_put(skb, pkt_len); 160 cp = skb_put(skb, pkt_len);
@@ -162,7 +162,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); 162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
163 skb->protocol = ax25_type_trans(skb, dev); 163 skb->protocol = ax25_type_trans(skb, dev);
164 netif_rx(skb); 164 netif_rx(skb);
165 s->stats.rx_packets++; 165 dev->stats.rx_packets++;
166} 166}
167 167
168void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s) 168void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s)
@@ -326,7 +326,7 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
326 s->hdlctx.len = pkt_len+2; /* the appended CRC */ 326 s->hdlctx.len = pkt_len+2; /* the appended CRC */
327 s->hdlctx.tx_state = 2; 327 s->hdlctx.tx_state = 2;
328 s->hdlctx.bitstream = 0; 328 s->hdlctx.bitstream = 0;
329 s->stats.tx_packets++; 329 dev->stats.tx_packets++;
330 break; 330 break;
331 case 2: 331 case 2:
332 if (!s->hdlctx.len) { 332 if (!s->hdlctx.len) {
@@ -427,19 +427,6 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
427} 427}
428 428
429/* --------------------------------------------------------------------- */ 429/* --------------------------------------------------------------------- */
430
431static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev)
432{
433 struct hdlcdrv_state *sm = netdev_priv(dev);
434
435 /*
436 * Get the current statistics. This may be called with the
437 * card open or closed.
438 */
439 return &sm->stats;
440}
441
442/* --------------------------------------------------------------------- */
443/* 430/*
444 * Open/initialize the board. This is called (in the current kernel) 431 * Open/initialize the board. This is called (in the current kernel)
445 * sometime after booting when the 'ifconfig' program is run. 432 * sometime after booting when the 'ifconfig' program is run.
@@ -568,10 +555,10 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
568 bi.data.cs.ptt = hdlcdrv_ptt(s); 555 bi.data.cs.ptt = hdlcdrv_ptt(s);
569 bi.data.cs.dcd = s->hdlcrx.dcd; 556 bi.data.cs.dcd = s->hdlcrx.dcd;
570 bi.data.cs.ptt_keyed = s->ptt_keyed; 557 bi.data.cs.ptt_keyed = s->ptt_keyed;
571 bi.data.cs.tx_packets = s->stats.tx_packets; 558 bi.data.cs.tx_packets = dev->stats.tx_packets;
572 bi.data.cs.tx_errors = s->stats.tx_errors; 559 bi.data.cs.tx_errors = dev->stats.tx_errors;
573 bi.data.cs.rx_packets = s->stats.rx_packets; 560 bi.data.cs.rx_packets = dev->stats.rx_packets;
574 bi.data.cs.rx_errors = s->stats.rx_errors; 561 bi.data.cs.rx_errors = dev->stats.rx_errors;
575 break; 562 break;
576 563
577 case HDLCDRVCTL_OLDGETSTAT: 564 case HDLCDRVCTL_OLDGETSTAT:
@@ -630,6 +617,14 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
630 617
631/* --------------------------------------------------------------------- */ 618/* --------------------------------------------------------------------- */
632 619
620static const struct net_device_ops hdlcdrv_netdev = {
621 .ndo_open = hdlcdrv_open,
622 .ndo_stop = hdlcdrv_close,
623 .ndo_start_xmit = hdlcdrv_send_packet,
624 .ndo_do_ioctl = hdlcdrv_ioctl,
625 .ndo_set_mac_address = hdlcdrv_set_mac_address,
626};
627
633/* 628/*
634 * Initialize fields in hdlcdrv 629 * Initialize fields in hdlcdrv
635 */ 630 */
@@ -669,21 +664,13 @@ static void hdlcdrv_setup(struct net_device *dev)
669 s->bitbuf_hdlc.shreg = 0x80; 664 s->bitbuf_hdlc.shreg = 0x80;
670#endif /* HDLCDRV_DEBUG */ 665#endif /* HDLCDRV_DEBUG */
671 666
672 /*
673 * initialize the device struct
674 */
675 dev->open = hdlcdrv_open;
676 dev->stop = hdlcdrv_close;
677 dev->do_ioctl = hdlcdrv_ioctl;
678 dev->hard_start_xmit = hdlcdrv_send_packet;
679 dev->get_stats = hdlcdrv_get_stats;
680 667
681 /* Fill in the fields of the device structure */ 668 /* Fill in the fields of the device structure */
682 669
683 s->skb = NULL; 670 s->skb = NULL;
684 671
672 dev->netdev_ops = &hdlcdrv_netdev;
685 dev->header_ops = &ax25_header_ops; 673 dev->header_ops = &ax25_header_ops;
686 dev->set_mac_address = hdlcdrv_set_mac_address;
687 674
688 dev->type = ARPHRD_AX25; /* AF_AX25 device */ 675 dev->type = ARPHRD_AX25; /* AF_AX25 device */
689 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; 676 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index bbdb311b842..ed5b37d4333 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -59,8 +59,6 @@ struct mkiss {
59 unsigned char *xhead; /* pointer to next byte to XMIT */ 59 unsigned char *xhead; /* pointer to next byte to XMIT */
60 int xleft; /* bytes left in XMIT queue */ 60 int xleft; /* bytes left in XMIT queue */
61 61
62 struct net_device_stats stats;
63
64 /* Detailed SLIP statistics. */ 62 /* Detailed SLIP statistics. */
65 int mtu; /* Our mtu (to spot changes!) */ 63 int mtu; /* Our mtu (to spot changes!) */
66 int buffsize; /* Max buffers sizes */ 64 int buffsize; /* Max buffers sizes */
@@ -253,7 +251,7 @@ static void ax_bump(struct mkiss *ax)
253 if (ax->rbuff[0] > 0x0f) { 251 if (ax->rbuff[0] > 0x0f) {
254 if (ax->rbuff[0] & 0x80) { 252 if (ax->rbuff[0] & 0x80) {
255 if (check_crc_16(ax->rbuff, ax->rcount) < 0) { 253 if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
256 ax->stats.rx_errors++; 254 ax->dev->stats.rx_errors++;
257 spin_unlock_bh(&ax->buflock); 255 spin_unlock_bh(&ax->buflock);
258 256
259 return; 257 return;
@@ -268,7 +266,7 @@ static void ax_bump(struct mkiss *ax)
268 *ax->rbuff &= ~0x80; 266 *ax->rbuff &= ~0x80;
269 } else if (ax->rbuff[0] & 0x20) { 267 } else if (ax->rbuff[0] & 0x20) {
270 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { 268 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
271 ax->stats.rx_errors++; 269 ax->dev->stats.rx_errors++;
272 spin_unlock_bh(&ax->buflock); 270 spin_unlock_bh(&ax->buflock);
273 return; 271 return;
274 } 272 }
@@ -295,7 +293,7 @@ static void ax_bump(struct mkiss *ax)
295 if ((skb = dev_alloc_skb(count)) == NULL) { 293 if ((skb = dev_alloc_skb(count)) == NULL) {
296 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", 294 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
297 ax->dev->name); 295 ax->dev->name);
298 ax->stats.rx_dropped++; 296 ax->dev->stats.rx_dropped++;
299 spin_unlock_bh(&ax->buflock); 297 spin_unlock_bh(&ax->buflock);
300 return; 298 return;
301 } 299 }
@@ -303,8 +301,8 @@ static void ax_bump(struct mkiss *ax)
303 memcpy(skb_put(skb,count), ax->rbuff, count); 301 memcpy(skb_put(skb,count), ax->rbuff, count);
304 skb->protocol = ax25_type_trans(skb, ax->dev); 302 skb->protocol = ax25_type_trans(skb, ax->dev);
305 netif_rx(skb); 303 netif_rx(skb);
306 ax->stats.rx_packets++; 304 ax->dev->stats.rx_packets++;
307 ax->stats.rx_bytes += count; 305 ax->dev->stats.rx_bytes += count;
308 spin_unlock_bh(&ax->buflock); 306 spin_unlock_bh(&ax->buflock);
309} 307}
310 308
@@ -344,7 +342,7 @@ static void kiss_unesc(struct mkiss *ax, unsigned char s)
344 return; 342 return;
345 } 343 }
346 344
347 ax->stats.rx_over_errors++; 345 ax->dev->stats.rx_over_errors++;
348 set_bit(AXF_ERROR, &ax->flags); 346 set_bit(AXF_ERROR, &ax->flags);
349 } 347 }
350 spin_unlock_bh(&ax->buflock); 348 spin_unlock_bh(&ax->buflock);
@@ -406,7 +404,7 @@ static void ax_changedmtu(struct mkiss *ax)
406 memcpy(ax->xbuff, ax->xhead, ax->xleft); 404 memcpy(ax->xbuff, ax->xhead, ax->xleft);
407 } else { 405 } else {
408 ax->xleft = 0; 406 ax->xleft = 0;
409 ax->stats.tx_dropped++; 407 dev->stats.tx_dropped++;
410 } 408 }
411 } 409 }
412 410
@@ -417,7 +415,7 @@ static void ax_changedmtu(struct mkiss *ax)
417 memcpy(ax->rbuff, orbuff, ax->rcount); 415 memcpy(ax->rbuff, orbuff, ax->rcount);
418 } else { 416 } else {
419 ax->rcount = 0; 417 ax->rcount = 0;
420 ax->stats.rx_over_errors++; 418 dev->stats.rx_over_errors++;
421 set_bit(AXF_ERROR, &ax->flags); 419 set_bit(AXF_ERROR, &ax->flags);
422 } 420 }
423 } 421 }
@@ -444,7 +442,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
444 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ 442 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
445 len = ax->mtu; 443 len = ax->mtu;
446 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); 444 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
447 ax->stats.tx_dropped++; 445 dev->stats.tx_dropped++;
448 netif_start_queue(dev); 446 netif_start_queue(dev);
449 return; 447 return;
450 } 448 }
@@ -518,8 +516,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
518 516
519 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 517 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
520 actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); 518 actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
521 ax->stats.tx_packets++; 519 dev->stats.tx_packets++;
522 ax->stats.tx_bytes += actual; 520 dev->stats.tx_bytes += actual;
523 521
524 ax->dev->trans_start = jiffies; 522 ax->dev->trans_start = jiffies;
525 ax->xleft = count - actual; 523 ax->xleft = count - actual;
@@ -664,32 +662,28 @@ static int ax_close(struct net_device *dev)
664 return 0; 662 return 0;
665} 663}
666 664
667static struct net_device_stats *ax_get_stats(struct net_device *dev)
668{
669 struct mkiss *ax = netdev_priv(dev);
670
671 return &ax->stats;
672}
673
674static const struct header_ops ax_header_ops = { 665static const struct header_ops ax_header_ops = {
675 .create = ax_header, 666 .create = ax_header,
676 .rebuild = ax_rebuild_header, 667 .rebuild = ax_rebuild_header,
677}; 668};
678 669
670static const struct net_device_ops ax_netdev_ops = {
671 .ndo_open = ax_open_dev,
672 .ndo_stop = ax_close,
673 .ndo_start_xmit = ax_xmit,
674 .ndo_set_mac_address = ax_set_mac_address,
675};
676
679static void ax_setup(struct net_device *dev) 677static void ax_setup(struct net_device *dev)
680{ 678{
681 /* Finish setting up the DEVICE info. */ 679 /* Finish setting up the DEVICE info. */
682 dev->mtu = AX_MTU; 680 dev->mtu = AX_MTU;
683 dev->hard_start_xmit = ax_xmit;
684 dev->open = ax_open_dev;
685 dev->stop = ax_close;
686 dev->get_stats = ax_get_stats;
687 dev->set_mac_address = ax_set_mac_address;
688 dev->hard_header_len = 0; 681 dev->hard_header_len = 0;
689 dev->addr_len = 0; 682 dev->addr_len = 0;
690 dev->type = ARPHRD_AX25; 683 dev->type = ARPHRD_AX25;
691 dev->tx_queue_len = 10; 684 dev->tx_queue_len = 10;
692 dev->header_ops = &ax_header_ops; 685 dev->header_ops = &ax_header_ops;
686 dev->netdev_ops = &ax_netdev_ops;
693 687
694 688
695 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 689 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
@@ -929,7 +923,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
929 while (count--) { 923 while (count--) {
930 if (fp != NULL && *fp++) { 924 if (fp != NULL && *fp++) {
931 if (!test_and_set_bit(AXF_ERROR, &ax->flags)) 925 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
932 ax->stats.rx_errors++; 926 ax->dev->stats.rx_errors++;
933 cp++; 927 cp++;
934 continue; 928 continue;
935 } 929 }
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c011af7088e..2acb18f0697 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1542,23 +1542,24 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
1542/* * Network driver methods * */ 1542/* * Network driver methods * */
1543/* ******************************************************************** */ 1543/* ******************************************************************** */
1544 1544
1545static const struct net_device_ops scc_netdev_ops = {
1546 .ndo_open = scc_net_open,
1547 .ndo_stop = scc_net_close,
1548 .ndo_start_xmit = scc_net_tx,
1549 .ndo_set_mac_address = scc_net_set_mac_address,
1550 .ndo_get_stats = scc_net_get_stats,
1551 .ndo_do_ioctl = scc_net_ioctl,
1552};
1553
1545/* ----> Initialize device <----- */ 1554/* ----> Initialize device <----- */
1546 1555
1547static void scc_net_setup(struct net_device *dev) 1556static void scc_net_setup(struct net_device *dev)
1548{ 1557{
1549 dev->tx_queue_len = 16; /* should be enough... */ 1558 dev->tx_queue_len = 16; /* should be enough... */
1550 1559
1551 dev->open = scc_net_open; 1560 dev->netdev_ops = &scc_netdev_ops;
1552 dev->stop = scc_net_close;
1553
1554 dev->hard_start_xmit = scc_net_tx;
1555 dev->header_ops = &ax25_header_ops; 1561 dev->header_ops = &ax25_header_ops;
1556 1562
1557 dev->set_mac_address = scc_net_set_mac_address;
1558 dev->get_stats = scc_net_get_stats;
1559 dev->do_ioctl = scc_net_ioctl;
1560 dev->tx_timeout = NULL;
1561
1562 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 1563 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1563 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 1564 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1564 1565
@@ -2073,7 +2074,7 @@ static int scc_net_seq_show(struct seq_file *seq, void *v)
2073 return 0; 2074 return 0;
2074} 2075}
2075 2076
2076static struct seq_operations scc_net_seq_ops = { 2077static const struct seq_operations scc_net_seq_ops = {
2077 .start = scc_net_seq_start, 2078 .start = scc_net_seq_start,
2078 .next = scc_net_seq_next, 2079 .next = scc_net_seq_next,
2079 .stop = scc_net_seq_stop, 2080 .stop = scc_net_seq_stop,
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5407f7486c9..82a8be7613d 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -115,10 +115,6 @@ struct yam_port {
115 115
116 struct net_device *dev; 116 struct net_device *dev;
117 117
118 /* Stats section */
119
120 struct net_device_stats stats;
121
122 int nb_rxint; 118 int nb_rxint;
123 int nb_mdint; 119 int nb_mdint;
124 120
@@ -507,7 +503,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
507 } else { 503 } else {
508 if (!(skb = dev_alloc_skb(pkt_len))) { 504 if (!(skb = dev_alloc_skb(pkt_len))) {
509 printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); 505 printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
510 ++yp->stats.rx_dropped; 506 ++dev->stats.rx_dropped;
511 } else { 507 } else {
512 unsigned char *cp; 508 unsigned char *cp;
513 cp = skb_put(skb, pkt_len); 509 cp = skb_put(skb, pkt_len);
@@ -515,7 +511,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
515 memcpy(cp, yp->rx_buf, pkt_len - 1); 511 memcpy(cp, yp->rx_buf, pkt_len - 1);
516 skb->protocol = ax25_type_trans(skb, dev); 512 skb->protocol = ax25_type_trans(skb, dev);
517 netif_rx(skb); 513 netif_rx(skb);
518 ++yp->stats.rx_packets; 514 ++dev->stats.rx_packets;
519 } 515 }
520 } 516 }
521 } 517 }
@@ -677,7 +673,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
677 yp->tx_count = 1; 673 yp->tx_count = 1;
678 yp->tx_state = TX_HEAD; 674 yp->tx_state = TX_HEAD;
679 } 675 }
680 ++yp->stats.tx_packets; 676 ++dev->stats.tx_packets;
681 break; 677 break;
682 case TX_TAIL: 678 case TX_TAIL:
683 if (--yp->tx_count <= 0) { 679 if (--yp->tx_count <= 0) {
@@ -716,7 +712,7 @@ static irqreturn_t yam_interrupt(int irq, void *dev_id)
716 handled = 1; 712 handled = 1;
717 713
718 if (lsr & LSR_OE) 714 if (lsr & LSR_OE)
719 ++yp->stats.rx_fifo_errors; 715 ++dev->stats.rx_fifo_errors;
720 716
721 yp->dcd = (msr & RX_DCD) ? 1 : 0; 717 yp->dcd = (msr & RX_DCD) ? 1 : 0;
722 718
@@ -778,16 +774,16 @@ static int yam_seq_show(struct seq_file *seq, void *v)
778 seq_printf(seq, " TxTail %u\n", yp->txtail); 774 seq_printf(seq, " TxTail %u\n", yp->txtail);
779 seq_printf(seq, " SlotTime %u\n", yp->slot); 775 seq_printf(seq, " SlotTime %u\n", yp->slot);
780 seq_printf(seq, " Persist %u\n", yp->pers); 776 seq_printf(seq, " Persist %u\n", yp->pers);
781 seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets); 777 seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets);
782 seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets); 778 seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets);
783 seq_printf(seq, " TxInt %u\n", yp->nb_mdint); 779 seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
784 seq_printf(seq, " RxInt %u\n", yp->nb_rxint); 780 seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
785 seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors); 781 seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors);
786 seq_printf(seq, "\n"); 782 seq_printf(seq, "\n");
787 return 0; 783 return 0;
788} 784}
789 785
790static struct seq_operations yam_seqops = { 786static const struct seq_operations yam_seqops = {
791 .start = yam_seq_start, 787 .start = yam_seq_start,
792 .next = yam_seq_next, 788 .next = yam_seq_next,
793 .stop = yam_seq_stop, 789 .stop = yam_seq_stop,
@@ -812,26 +808,6 @@ static const struct file_operations yam_info_fops = {
812 808
813/* --------------------------------------------------------------------- */ 809/* --------------------------------------------------------------------- */
814 810
815static struct net_device_stats *yam_get_stats(struct net_device *dev)
816{
817 struct yam_port *yp;
818
819 if (!dev)
820 return NULL;
821
822 yp = netdev_priv(dev);
823 if (yp->magic != YAM_MAGIC)
824 return NULL;
825
826 /*
827 * Get the current statistics. This may be called with the
828 * card open or closed.
829 */
830 return &yp->stats;
831}
832
833/* --------------------------------------------------------------------- */
834
835static int yam_open(struct net_device *dev) 811static int yam_open(struct net_device *dev)
836{ 812{
837 struct yam_port *yp = netdev_priv(dev); 813 struct yam_port *yp = netdev_priv(dev);
@@ -878,9 +854,9 @@ static int yam_open(struct net_device *dev)
878 /* Reset overruns for all ports - FPGA programming makes overruns */ 854 /* Reset overruns for all ports - FPGA programming makes overruns */
879 for (i = 0; i < NR_PORTS; i++) { 855 for (i = 0; i < NR_PORTS; i++) {
880 struct net_device *dev = yam_devs[i]; 856 struct net_device *dev = yam_devs[i];
881 struct yam_port *yp = netdev_priv(dev); 857
882 inb(LSR(dev->base_addr)); 858 inb(LSR(dev->base_addr));
883 yp->stats.rx_fifo_errors = 0; 859 dev->stats.rx_fifo_errors = 0;
884 } 860 }
885 861
886 printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, 862 printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
@@ -1068,6 +1044,14 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
1068 1044
1069/* --------------------------------------------------------------------- */ 1045/* --------------------------------------------------------------------- */
1070 1046
1047static const struct net_device_ops yam_netdev_ops = {
1048 .ndo_open = yam_open,
1049 .ndo_stop = yam_close,
1050 .ndo_start_xmit = yam_send_packet,
1051 .ndo_do_ioctl = yam_ioctl,
1052 .ndo_set_mac_address = yam_set_mac_address,
1053};
1054
1071static void yam_setup(struct net_device *dev) 1055static void yam_setup(struct net_device *dev)
1072{ 1056{
1073 struct yam_port *yp = netdev_priv(dev); 1057 struct yam_port *yp = netdev_priv(dev);
@@ -1088,18 +1072,11 @@ static void yam_setup(struct net_device *dev)
1088 dev->base_addr = yp->iobase; 1072 dev->base_addr = yp->iobase;
1089 dev->irq = yp->irq; 1073 dev->irq = yp->irq;
1090 1074
1091 dev->open = yam_open;
1092 dev->stop = yam_close;
1093 dev->do_ioctl = yam_ioctl;
1094 dev->hard_start_xmit = yam_send_packet;
1095 dev->get_stats = yam_get_stats;
1096
1097 skb_queue_head_init(&yp->send_queue); 1075 skb_queue_head_init(&yp->send_queue);
1098 1076
1077 dev->netdev_ops = &yam_netdev_ops;
1099 dev->header_ops = &ax25_header_ops; 1078 dev->header_ops = &ax25_header_ops;
1100 1079
1101 dev->set_mac_address = yam_set_mac_address;
1102
1103 dev->type = ARPHRD_AX25; 1080 dev->type = ARPHRD_AX25;
1104 dev->hard_header_len = AX25_MAX_HEADER_LEN; 1081 dev->hard_header_len = AX25_MAX_HEADER_LEN;
1105 dev->mtu = AX25_MTU; 1082 dev->mtu = AX25_MTU;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index dfa6348ac1d..5c6315df86b 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1028,10 +1028,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1028 1028
1029 ibmveth_assert(lpar_rc == H_SUCCESS); 1029 ibmveth_assert(lpar_rc == H_SUCCESS);
1030 1030
1031 netif_rx_complete(napi); 1031 napi_complete(napi);
1032 1032
1033 if (ibmveth_rxq_pending_buffer(adapter) && 1033 if (ibmveth_rxq_pending_buffer(adapter) &&
1034 netif_rx_reschedule(napi)) { 1034 napi_reschedule(napi)) {
1035 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1035 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1036 VIO_IRQ_DISABLE); 1036 VIO_IRQ_DISABLE);
1037 goto restart_poll; 1037 goto restart_poll;
@@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1047 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1047 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1048 unsigned long lpar_rc; 1048 unsigned long lpar_rc;
1049 1049
1050 if (netif_rx_schedule_prep(&adapter->napi)) { 1050 if (napi_schedule_prep(&adapter->napi)) {
1051 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1051 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1052 VIO_IRQ_DISABLE); 1052 VIO_IRQ_DISABLE);
1053 ibmveth_assert(lpar_rc == H_SUCCESS); 1053 ibmveth_assert(lpar_rc == H_SUCCESS);
1054 __netif_rx_schedule(&adapter->napi); 1054 __napi_schedule(&adapter->napi);
1055 } 1055 }
1056 return IRQ_HANDLED; 1056 return IRQ_HANDLED;
1057} 1057}
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
index 1927b3fd6f0..cda3ad51baf 100644
--- a/drivers/net/igb/Makefile
+++ b/drivers/net/igb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82575 PCI-Express Ethernet Linux driver 3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation. 4# Copyright(c) 1999 - 2009 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 13ca73f96ec..7f43e253c56 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -62,17 +62,12 @@ static bool igb_sgmii_active_82575(struct e1000_hw *);
62static s32 igb_reset_init_script_82575(struct e1000_hw *); 62static s32 igb_reset_init_script_82575(struct e1000_hw *);
63static s32 igb_read_mac_addr_82575(struct e1000_hw *); 63static s32 igb_read_mac_addr_82575(struct e1000_hw *);
64 64
65
66struct e1000_dev_spec_82575 {
67 bool sgmii_active;
68};
69
70static s32 igb_get_invariants_82575(struct e1000_hw *hw) 65static s32 igb_get_invariants_82575(struct e1000_hw *hw)
71{ 66{
72 struct e1000_phy_info *phy = &hw->phy; 67 struct e1000_phy_info *phy = &hw->phy;
73 struct e1000_nvm_info *nvm = &hw->nvm; 68 struct e1000_nvm_info *nvm = &hw->nvm;
74 struct e1000_mac_info *mac = &hw->mac; 69 struct e1000_mac_info *mac = &hw->mac;
75 struct e1000_dev_spec_82575 *dev_spec; 70 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
76 u32 eecd; 71 u32 eecd;
77 s32 ret_val; 72 s32 ret_val;
78 u16 size; 73 u16 size;
@@ -94,17 +89,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
94 break; 89 break;
95 } 90 }
96 91
97 /* MAC initialization */
98 hw->dev_spec_size = sizeof(struct e1000_dev_spec_82575);
99
100 /* Device-specific structure allocation */
101 hw->dev_spec = kzalloc(hw->dev_spec_size, GFP_KERNEL);
102
103 if (!hw->dev_spec)
104 return -ENOMEM;
105
106 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
107
108 /* Set media type */ 92 /* Set media type */
109 /* 93 /*
110 * The 82575 uses bits 22:23 for link mode. The mode can be changed 94 * The 82575 uses bits 22:23 for link mode. The mode can be changed
@@ -195,13 +179,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
195 179
196 /* PHY function pointers */ 180 /* PHY function pointers */
197 if (igb_sgmii_active_82575(hw)) { 181 if (igb_sgmii_active_82575(hw)) {
198 phy->ops.reset_phy = igb_phy_hw_reset_sgmii_82575; 182 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
199 phy->ops.read_phy_reg = igb_read_phy_reg_sgmii_82575; 183 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
200 phy->ops.write_phy_reg = igb_write_phy_reg_sgmii_82575; 184 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
201 } else { 185 } else {
202 phy->ops.reset_phy = igb_phy_hw_reset; 186 phy->ops.reset = igb_phy_hw_reset;
203 phy->ops.read_phy_reg = igb_read_phy_reg_igp; 187 phy->ops.read_reg = igb_read_phy_reg_igp;
204 phy->ops.write_phy_reg = igb_write_phy_reg_igp; 188 phy->ops.write_reg = igb_write_phy_reg_igp;
205 } 189 }
206 190
207 /* Set phy->phy_addr and phy->id. */ 191 /* Set phy->phy_addr and phy->id. */
@@ -451,7 +435,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
451 * SFP documentation requires the following to configure the SPF module 435 * SFP documentation requires the following to configure the SPF module
452 * to work on SGMII. No further documentation is given. 436 * to work on SGMII. No further documentation is given.
453 */ 437 */
454 ret_val = hw->phy.ops.write_phy_reg(hw, 0x1B, 0x8084); 438 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
455 if (ret_val) 439 if (ret_val)
456 goto out; 440 goto out;
457 441
@@ -480,28 +464,28 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
480 s32 ret_val; 464 s32 ret_val;
481 u16 data; 465 u16 data;
482 466
483 ret_val = phy->ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 467 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
484 if (ret_val) 468 if (ret_val)
485 goto out; 469 goto out;
486 470
487 if (active) { 471 if (active) {
488 data |= IGP02E1000_PM_D0_LPLU; 472 data |= IGP02E1000_PM_D0_LPLU;
489 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 473 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
490 data); 474 data);
491 if (ret_val) 475 if (ret_val)
492 goto out; 476 goto out;
493 477
494 /* When LPLU is enabled, we should disable SmartSpeed */ 478 /* When LPLU is enabled, we should disable SmartSpeed */
495 ret_val = phy->ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 479 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
496 &data); 480 &data);
497 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 481 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
498 ret_val = phy->ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 482 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
499 data); 483 data);
500 if (ret_val) 484 if (ret_val)
501 goto out; 485 goto out;
502 } else { 486 } else {
503 data &= ~IGP02E1000_PM_D0_LPLU; 487 data &= ~IGP02E1000_PM_D0_LPLU;
504 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 488 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
505 data); 489 data);
506 /* 490 /*
507 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 491 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
@@ -510,24 +494,24 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
510 * SmartSpeed, so performance is maintained. 494 * SmartSpeed, so performance is maintained.
511 */ 495 */
512 if (phy->smart_speed == e1000_smart_speed_on) { 496 if (phy->smart_speed == e1000_smart_speed_on) {
513 ret_val = phy->ops.read_phy_reg(hw, 497 ret_val = phy->ops.read_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG, &data); 498 IGP01E1000_PHY_PORT_CONFIG, &data);
515 if (ret_val) 499 if (ret_val)
516 goto out; 500 goto out;
517 501
518 data |= IGP01E1000_PSCFR_SMART_SPEED; 502 data |= IGP01E1000_PSCFR_SMART_SPEED;
519 ret_val = phy->ops.write_phy_reg(hw, 503 ret_val = phy->ops.write_reg(hw,
520 IGP01E1000_PHY_PORT_CONFIG, data); 504 IGP01E1000_PHY_PORT_CONFIG, data);
521 if (ret_val) 505 if (ret_val)
522 goto out; 506 goto out;
523 } else if (phy->smart_speed == e1000_smart_speed_off) { 507 } else if (phy->smart_speed == e1000_smart_speed_off) {
524 ret_val = phy->ops.read_phy_reg(hw, 508 ret_val = phy->ops.read_reg(hw,
525 IGP01E1000_PHY_PORT_CONFIG, &data); 509 IGP01E1000_PHY_PORT_CONFIG, &data);
526 if (ret_val) 510 if (ret_val)
527 goto out; 511 goto out;
528 512
529 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 513 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
530 ret_val = phy->ops.write_phy_reg(hw, 514 ret_val = phy->ops.write_reg(hw,
531 IGP01E1000_PHY_PORT_CONFIG, data); 515 IGP01E1000_PHY_PORT_CONFIG, data);
532 if (ret_val) 516 if (ret_val)
533 goto out; 517 goto out;
@@ -803,7 +787,7 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
803} 787}
804 788
805/** 789/**
806 * igb_update_mc_addr_list_82575 - Update Multicast addresses 790 * igb_update_mc_addr_list - Update Multicast addresses
807 * @hw: pointer to the HW structure 791 * @hw: pointer to the HW structure
808 * @mc_addr_list: array of multicast addresses to program 792 * @mc_addr_list: array of multicast addresses to program
809 * @mc_addr_count: number of multicast addresses to program 793 * @mc_addr_count: number of multicast addresses to program
@@ -815,9 +799,9 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
815 * The parameter rar_count will usually be hw->mac.rar_entry_count 799 * The parameter rar_count will usually be hw->mac.rar_entry_count
816 * unless there are workarounds that change this. 800 * unless there are workarounds that change this.
817 **/ 801 **/
818void igb_update_mc_addr_list_82575(struct e1000_hw *hw, 802void igb_update_mc_addr_list(struct e1000_hw *hw,
819 u8 *mc_addr_list, u32 mc_addr_count, 803 u8 *mc_addr_list, u32 mc_addr_count,
820 u32 rar_used_count, u32 rar_count) 804 u32 rar_used_count, u32 rar_count)
821{ 805{
822 u32 hash_value; 806 u32 hash_value;
823 u32 i; 807 u32 i;
@@ -1051,7 +1035,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1051 * depending on user settings. 1035 * depending on user settings.
1052 */ 1036 */
1053 hw_dbg("Forcing Speed and Duplex\n"); 1037 hw_dbg("Forcing Speed and Duplex\n");
1054 ret_val = igb_phy_force_speed_duplex(hw); 1038 ret_val = hw->phy.ops.force_speed_duplex(hw);
1055 if (ret_val) { 1039 if (ret_val) {
1056 hw_dbg("Error Forcing Speed and Duplex\n"); 1040 hw_dbg("Error Forcing Speed and Duplex\n");
1057 goto out; 1041 goto out;
@@ -1110,6 +1094,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1110 E1000_CTRL_SWDPIN1; 1094 E1000_CTRL_SWDPIN1;
1111 wr32(E1000_CTRL, reg); 1095 wr32(E1000_CTRL, reg);
1112 1096
1097 /* Power on phy for 82576 fiber adapters */
1098 if (hw->mac.type == e1000_82576) {
1099 reg = rd32(E1000_CTRL_EXT);
1100 reg &= ~E1000_CTRL_EXT_SDP7_DATA;
1101 wr32(E1000_CTRL_EXT, reg);
1102 }
1103
1113 /* Set switch control to serdes energy detect */ 1104 /* Set switch control to serdes energy detect */
1114 reg = rd32(E1000_CONNSW); 1105 reg = rd32(E1000_CONNSW);
1115 reg |= E1000_CONNSW_ENRGSRC; 1106 reg |= E1000_CONNSW_ENRGSRC;
@@ -1227,20 +1218,12 @@ out:
1227 **/ 1218 **/
1228static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1219static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1229{ 1220{
1230 struct e1000_dev_spec_82575 *dev_spec; 1221 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1231 bool ret_val;
1232 1222
1233 if (hw->mac.type != e1000_82575) { 1223 if (hw->mac.type != e1000_82575 && hw->mac.type != e1000_82576)
1234 ret_val = false; 1224 return false;
1235 goto out;
1236 }
1237 1225
1238 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec; 1226 return dev_spec->sgmii_active;
1239
1240 ret_val = dev_spec->sgmii_active;
1241
1242out:
1243 return ret_val;
1244} 1227}
1245 1228
1246/** 1229/**
@@ -1440,16 +1423,16 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
1440}; 1423};
1441 1424
1442static struct e1000_phy_operations e1000_phy_ops_82575 = { 1425static struct e1000_phy_operations e1000_phy_ops_82575 = {
1443 .acquire_phy = igb_acquire_phy_82575, 1426 .acquire = igb_acquire_phy_82575,
1444 .get_cfg_done = igb_get_cfg_done_82575, 1427 .get_cfg_done = igb_get_cfg_done_82575,
1445 .release_phy = igb_release_phy_82575, 1428 .release = igb_release_phy_82575,
1446}; 1429};
1447 1430
1448static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 1431static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
1449 .acquire_nvm = igb_acquire_nvm_82575, 1432 .acquire = igb_acquire_nvm_82575,
1450 .read_nvm = igb_read_nvm_eerd, 1433 .read = igb_read_nvm_eerd,
1451 .release_nvm = igb_release_nvm_82575, 1434 .release = igb_release_nvm_82575,
1452 .write_nvm = igb_write_nvm_spi, 1435 .write = igb_write_nvm_spi,
1453}; 1436};
1454 1437
1455const struct e1000_info e1000_82575_info = { 1438const struct e1000_info e1000_82575_info = {
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index c1928b5efe1..dd50237c8cb 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,7 +28,7 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 31void igb_update_mc_addr_list(struct e1000_hw*, u8*, u32, u32, u32);
32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
34 34
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 40d03426c12..5342e231c1d 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -413,6 +413,7 @@
413/* LAN connected device generates an interrupt */ 413/* LAN connected device generates an interrupt */
414#define E1000_ICR_PHYINT 0x00001000 414#define E1000_ICR_PHYINT 0x00001000
415#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ 415#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
416#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
416 417
417/* Extended Interrupt Cause Read */ 418/* Extended Interrupt Cause Read */
418#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ 419#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
@@ -441,7 +442,8 @@
441 E1000_IMS_TXDW | \ 442 E1000_IMS_TXDW | \
442 E1000_IMS_RXDMT0 | \ 443 E1000_IMS_RXDMT0 | \
443 E1000_IMS_RXSEQ | \ 444 E1000_IMS_RXSEQ | \
444 E1000_IMS_LSC) 445 E1000_IMS_LSC | \
446 E1000_IMS_DOUTSYNC)
445 447
446/* Interrupt Mask Set */ 448/* Interrupt Mask Set */
447#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 449#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -449,6 +451,7 @@
449#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
450#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
451#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 453#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
454#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
452 455
453/* Extended Interrupt Mask Set */ 456/* Extended Interrupt Mask Set */
454#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ 457#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
@@ -457,6 +460,7 @@
457/* Interrupt Cause Set */ 460/* Interrupt Cause Set */
458#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 461#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
459#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 462#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
463#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
460 464
461/* Extended Interrupt Cause Set */ 465/* Extended Interrupt Cause Set */
462 466
@@ -481,6 +485,8 @@
481 * manageability enabled, allowing us room for 15 multicast addresses. 485 * manageability enabled, allowing us room for 15 multicast addresses.
482 */ 486 */
483#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 487#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
488#define E1000_RAL_MAC_ADDR_LEN 4
489#define E1000_RAH_MAC_ADDR_LEN 2
484 490
485/* Error Codes */ 491/* Error Codes */
486#define E1000_ERR_NVM 1 492#define E1000_ERR_NVM 1
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 99504a600a8..bd86cebed37 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -359,6 +359,7 @@ struct e1000_hw_stats {
359 u64 lenerrs; 359 u64 lenerrs;
360 u64 scvpc; 360 u64 scvpc;
361 u64 hrmpc; 361 u64 hrmpc;
362 u64 doosync;
362}; 363};
363 364
364struct e1000_phy_stats { 365struct e1000_phy_stats {
@@ -422,25 +423,25 @@ struct e1000_mac_operations {
422}; 423};
423 424
424struct e1000_phy_operations { 425struct e1000_phy_operations {
425 s32 (*acquire_phy)(struct e1000_hw *); 426 s32 (*acquire)(struct e1000_hw *);
426 s32 (*check_reset_block)(struct e1000_hw *); 427 s32 (*check_reset_block)(struct e1000_hw *);
427 s32 (*force_speed_duplex)(struct e1000_hw *); 428 s32 (*force_speed_duplex)(struct e1000_hw *);
428 s32 (*get_cfg_done)(struct e1000_hw *hw); 429 s32 (*get_cfg_done)(struct e1000_hw *hw);
429 s32 (*get_cable_length)(struct e1000_hw *); 430 s32 (*get_cable_length)(struct e1000_hw *);
430 s32 (*get_phy_info)(struct e1000_hw *); 431 s32 (*get_phy_info)(struct e1000_hw *);
431 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 432 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
432 void (*release_phy)(struct e1000_hw *); 433 void (*release)(struct e1000_hw *);
433 s32 (*reset_phy)(struct e1000_hw *); 434 s32 (*reset)(struct e1000_hw *);
434 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 435 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
435 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 436 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
436 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 437 s32 (*write_reg)(struct e1000_hw *, u32, u16);
437}; 438};
438 439
439struct e1000_nvm_operations { 440struct e1000_nvm_operations {
440 s32 (*acquire_nvm)(struct e1000_hw *); 441 s32 (*acquire)(struct e1000_hw *);
441 s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); 442 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
442 void (*release_nvm)(struct e1000_hw *); 443 void (*release)(struct e1000_hw *);
443 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); 444 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
444}; 445};
445 446
446struct e1000_info { 447struct e1000_info {
@@ -483,7 +484,6 @@ struct e1000_mac_info {
483 bool asf_firmware_present; 484 bool asf_firmware_present;
484 bool autoneg; 485 bool autoneg;
485 bool autoneg_failed; 486 bool autoneg_failed;
486 bool disable_av;
487 bool disable_hw_init_bits; 487 bool disable_hw_init_bits;
488 bool get_link_status; 488 bool get_link_status;
489 bool ifs_params_forced; 489 bool ifs_params_forced;
@@ -565,9 +565,12 @@ struct e1000_fc_info {
565 enum e1000_fc_type original_type; 565 enum e1000_fc_type original_type;
566}; 566};
567 567
568struct e1000_dev_spec_82575 {
569 bool sgmii_active;
570};
571
568struct e1000_hw { 572struct e1000_hw {
569 void *back; 573 void *back;
570 void *dev_spec;
571 574
572 u8 __iomem *hw_addr; 575 u8 __iomem *hw_addr;
573 u8 __iomem *flash_address; 576 u8 __iomem *flash_address;
@@ -580,7 +583,9 @@ struct e1000_hw {
580 struct e1000_bus_info bus; 583 struct e1000_bus_info bus;
581 struct e1000_host_mng_dhcp_cookie mng_cookie; 584 struct e1000_host_mng_dhcp_cookie mng_cookie;
582 585
583 u32 dev_spec_size; 586 union {
587 struct e1000_dev_spec_82575 _82575;
588 } dev_spec;
584 589
585 u16 device_id; 590 u16 device_id;
586 u16 subsystem_vendor_id; 591 u16 subsystem_vendor_id;
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 97f0049a5d6..5c249e2ce93 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,19 +37,6 @@
37static s32 igb_set_default_fc(struct e1000_hw *hw); 37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39 39
40/**
41 * igb_remove_device - Free device specific structure
42 * @hw: pointer to the HW structure
43 *
44 * If a device specific structure was allocated, this function will
45 * free it.
46 **/
47void igb_remove_device(struct e1000_hw *hw)
48{
49 /* Freeing the dev_spec member of e1000_hw structure */
50 kfree(hw->dev_spec);
51}
52
53static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 40static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
54{ 41{
55 struct igb_adapter *adapter = hw->back; 42 struct igb_adapter *adapter = hw->back;
@@ -148,7 +135,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
148 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 135 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
149 u8 alt_mac_addr[ETH_ALEN]; 136 u8 alt_mac_addr[ETH_ALEN];
150 137
151 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 138 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
152 &nvm_alt_mac_addr_offset); 139 &nvm_alt_mac_addr_offset);
153 if (ret_val) { 140 if (ret_val) {
154 hw_dbg("NVM Read Error\n"); 141 hw_dbg("NVM Read Error\n");
@@ -165,7 +152,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
165 152
166 for (i = 0; i < ETH_ALEN; i += 2) { 153 for (i = 0; i < ETH_ALEN; i += 2) {
167 offset = nvm_alt_mac_addr_offset + (i >> 1); 154 offset = nvm_alt_mac_addr_offset + (i >> 1);
168 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 155 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
169 if (ret_val) { 156 if (ret_val) {
170 hw_dbg("NVM Read Error\n"); 157 hw_dbg("NVM Read Error\n");
171 goto out; 158 goto out;
@@ -213,7 +200,8 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
213 200
214 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 201 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
215 202
216 if (!hw->mac.disable_av) 203 /* If MAC address zero, no need to set the AV bit */
204 if (rar_low || rar_high)
217 rar_high |= E1000_RAH_AV; 205 rar_high |= E1000_RAH_AV;
218 206
219 wr32(E1000_RAL(index), rar_low); 207 wr32(E1000_RAL(index), rar_low);
@@ -588,8 +576,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
588 * control setting, then the variable hw->fc will 576 * control setting, then the variable hw->fc will
589 * be initialized based on a value in the EEPROM. 577 * be initialized based on a value in the EEPROM.
590 */ 578 */
591 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, 579 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
592 &nvm_data);
593 580
594 if (ret_val) { 581 if (ret_val) {
595 hw_dbg("NVM Read Error\n"); 582 hw_dbg("NVM Read Error\n");
@@ -720,11 +707,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
720 * has completed. We read this twice because this reg has 707 * has completed. We read this twice because this reg has
721 * some "sticky" (latched) bits. 708 * some "sticky" (latched) bits.
722 */ 709 */
723 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, 710 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
724 &mii_status_reg); 711 &mii_status_reg);
725 if (ret_val) 712 if (ret_val)
726 goto out; 713 goto out;
727 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, 714 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
728 &mii_status_reg); 715 &mii_status_reg);
729 if (ret_val) 716 if (ret_val)
730 goto out; 717 goto out;
@@ -742,11 +729,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
742 * Page Ability Register (Address 5) to determine how 729 * Page Ability Register (Address 5) to determine how
743 * flow control was negotiated. 730 * flow control was negotiated.
744 */ 731 */
745 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV, 732 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
746 &mii_nway_adv_reg); 733 &mii_nway_adv_reg);
747 if (ret_val) 734 if (ret_val)
748 goto out; 735 goto out;
749 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY, 736 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
750 &mii_nway_lp_ability_reg); 737 &mii_nway_lp_ability_reg);
751 if (ret_val) 738 if (ret_val)
752 goto out; 739 goto out;
@@ -1041,7 +1028,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1041{ 1028{
1042 s32 ret_val; 1029 s32 ret_val;
1043 1030
1044 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1031 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1045 if (ret_val) { 1032 if (ret_val) {
1046 hw_dbg("NVM Read Error\n"); 1033 hw_dbg("NVM Read Error\n");
1047 goto out; 1034 goto out;
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index cbee6af7d91..91461de083f 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -63,7 +63,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
63void igb_put_hw_semaphore(struct e1000_hw *hw); 63void igb_put_hw_semaphore(struct e1000_hw *hw);
64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
65s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 65s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
66void igb_remove_device(struct e1000_hw *hw);
67void igb_reset_adaptive(struct e1000_hw *hw); 66void igb_reset_adaptive(struct e1000_hw *hw);
68void igb_update_adaptive(struct e1000_hw *hw); 67void igb_update_adaptive(struct e1000_hw *hw);
69void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); 68void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a84e4e429fa..a88bfe2f1e8 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -419,7 +419,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
419 goto out; 419 goto out;
420 } 420 }
421 421
422 ret_val = hw->nvm.ops.acquire_nvm(hw); 422 ret_val = hw->nvm.ops.acquire(hw);
423 if (ret_val) 423 if (ret_val)
424 goto out; 424 goto out;
425 425
@@ -468,7 +468,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
468 468
469 msleep(10); 469 msleep(10);
470release: 470release:
471 hw->nvm.ops.release_nvm(hw); 471 hw->nvm.ops.release(hw);
472 472
473out: 473out:
474 return ret_val; 474 return ret_val;
@@ -487,14 +487,14 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
487 s32 ret_val; 487 s32 ret_val;
488 u16 nvm_data; 488 u16 nvm_data;
489 489
490 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 490 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
491 if (ret_val) { 491 if (ret_val) {
492 hw_dbg("NVM Read Error\n"); 492 hw_dbg("NVM Read Error\n");
493 goto out; 493 goto out;
494 } 494 }
495 *part_num = (u32)(nvm_data << 16); 495 *part_num = (u32)(nvm_data << 16);
496 496
497 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 497 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
498 if (ret_val) { 498 if (ret_val) {
499 hw_dbg("NVM Read Error\n"); 499 hw_dbg("NVM Read Error\n");
500 goto out; 500 goto out;
@@ -515,29 +515,23 @@ out:
515 **/ 515 **/
516s32 igb_read_mac_addr(struct e1000_hw *hw) 516s32 igb_read_mac_addr(struct e1000_hw *hw)
517{ 517{
518 s32 ret_val = 0; 518 u32 rar_high;
519 u16 offset, nvm_data, i; 519 u32 rar_low;
520 u16 i;
520 521
521 for (i = 0; i < ETH_ALEN; i += 2) { 522 rar_high = rd32(E1000_RAH(0));
522 offset = i >> 1; 523 rar_low = rd32(E1000_RAL(0));
523 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 524
524 if (ret_val) { 525 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
525 hw_dbg("NVM Read Error\n"); 526 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
526 goto out;
527 }
528 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
529 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
530 }
531 527
532 /* Flip last bit of mac address if we're on second port */ 528 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
533 if (hw->bus.func == E1000_FUNC_1) 529 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
534 hw->mac.perm_addr[5] ^= 1;
535 530
536 for (i = 0; i < ETH_ALEN; i++) 531 for (i = 0; i < ETH_ALEN; i++)
537 hw->mac.addr[i] = hw->mac.perm_addr[i]; 532 hw->mac.addr[i] = hw->mac.perm_addr[i];
538 533
539out: 534 return 0;
540 return ret_val;
541} 535}
542 536
543/** 537/**
@@ -554,7 +548,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
554 u16 i, nvm_data; 548 u16 i, nvm_data;
555 549
556 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 550 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
557 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 551 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
558 if (ret_val) { 552 if (ret_val) {
559 hw_dbg("NVM Read Error\n"); 553 hw_dbg("NVM Read Error\n");
560 goto out; 554 goto out;
@@ -587,7 +581,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
587 u16 i, nvm_data; 581 u16 i, nvm_data;
588 582
589 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 583 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
590 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 584 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
591 if (ret_val) { 585 if (ret_val) {
592 hw_dbg("NVM Read Error while updating checksum.\n"); 586 hw_dbg("NVM Read Error while updating checksum.\n");
593 goto out; 587 goto out;
@@ -595,7 +589,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
595 checksum += nvm_data; 589 checksum += nvm_data;
596 } 590 }
597 checksum = (u16) NVM_SUM - checksum; 591 checksum = (u16) NVM_SUM - checksum;
598 ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 592 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
599 if (ret_val) 593 if (ret_val)
600 hw_dbg("NVM Write Error while updating checksum.\n"); 594 hw_dbg("NVM Write Error while updating checksum.\n");
601 595
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 17fddb91c9f..ff0050e5d0b 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,6 @@
31#include "e1000_mac.h" 31#include "e1000_mac.h"
32#include "e1000_phy.h" 32#include "e1000_phy.h"
33 33
34static s32 igb_get_phy_cfg_done(struct e1000_hw *hw);
35static void igb_release_phy(struct e1000_hw *hw);
36static s32 igb_acquire_phy(struct e1000_hw *hw);
37static s32 igb_phy_reset_dsp(struct e1000_hw *hw);
38static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); 34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
39static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
40 u16 *phy_ctrl); 36 u16 *phy_ctrl);
@@ -91,13 +87,13 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
91 s32 ret_val = 0; 87 s32 ret_val = 0;
92 u16 phy_id; 88 u16 phy_id;
93 89
94 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID1, &phy_id); 90 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
95 if (ret_val) 91 if (ret_val)
96 goto out; 92 goto out;
97 93
98 phy->id = (u32)(phy_id << 16); 94 phy->id = (u32)(phy_id << 16);
99 udelay(20); 95 udelay(20);
100 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID2, &phy_id); 96 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
101 if (ret_val) 97 if (ret_val)
102 goto out; 98 goto out;
103 99
@@ -118,11 +114,11 @@ static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
118{ 114{
119 s32 ret_val; 115 s32 ret_val;
120 116
121 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); 117 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
122 if (ret_val) 118 if (ret_val)
123 goto out; 119 goto out;
124 120
125 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); 121 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
126 122
127out: 123out:
128 return ret_val; 124 return ret_val;
@@ -257,9 +253,12 @@ out:
257 **/ 253 **/
258s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 254s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
259{ 255{
260 s32 ret_val; 256 s32 ret_val = 0;
257
258 if (!(hw->phy.ops.acquire))
259 goto out;
261 260
262 ret_val = igb_acquire_phy(hw); 261 ret_val = hw->phy.ops.acquire(hw);
263 if (ret_val) 262 if (ret_val)
264 goto out; 263 goto out;
265 264
@@ -268,16 +267,15 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
268 IGP01E1000_PHY_PAGE_SELECT, 267 IGP01E1000_PHY_PAGE_SELECT,
269 (u16)offset); 268 (u16)offset);
270 if (ret_val) { 269 if (ret_val) {
271 igb_release_phy(hw); 270 hw->phy.ops.release(hw);
272 goto out; 271 goto out;
273 } 272 }
274 } 273 }
275 274
276 ret_val = igb_read_phy_reg_mdic(hw, 275 ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
277 MAX_PHY_REG_ADDRESS & offset, 276 data);
278 data);
279 277
280 igb_release_phy(hw); 278 hw->phy.ops.release(hw);
281 279
282out: 280out:
283 return ret_val; 281 return ret_val;
@@ -294,9 +292,12 @@ out:
294 **/ 292 **/
295s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 293s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
296{ 294{
297 s32 ret_val; 295 s32 ret_val = 0;
298 296
299 ret_val = igb_acquire_phy(hw); 297 if (!(hw->phy.ops.acquire))
298 goto out;
299
300 ret_val = hw->phy.ops.acquire(hw);
300 if (ret_val) 301 if (ret_val)
301 goto out; 302 goto out;
302 303
@@ -305,16 +306,15 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
305 IGP01E1000_PHY_PAGE_SELECT, 306 IGP01E1000_PHY_PAGE_SELECT,
306 (u16)offset); 307 (u16)offset);
307 if (ret_val) { 308 if (ret_val) {
308 igb_release_phy(hw); 309 hw->phy.ops.release(hw);
309 goto out; 310 goto out;
310 } 311 }
311 } 312 }
312 313
313 ret_val = igb_write_phy_reg_mdic(hw, 314 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
314 MAX_PHY_REG_ADDRESS & offset,
315 data); 315 data);
316 316
317 igb_release_phy(hw); 317 hw->phy.ops.release(hw);
318 318
319out: 319out:
320 return ret_val; 320 return ret_val;
@@ -339,8 +339,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
339 } 339 }
340 340
341 /* Enable CRS on TX. This must be set for half-duplex operation. */ 341 /* Enable CRS on TX. This must be set for half-duplex operation. */
342 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 342 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
343 &phy_data);
344 if (ret_val) 343 if (ret_val)
345 goto out; 344 goto out;
346 345
@@ -383,8 +382,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
383 if (phy->disable_polarity_correction == 1) 382 if (phy->disable_polarity_correction == 1)
384 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 383 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
385 384
386 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 385 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
387 phy_data);
388 if (ret_val) 386 if (ret_val)
389 goto out; 387 goto out;
390 388
@@ -393,8 +391,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
393 * Force TX_CLK in the Extended PHY Specific Control Register 391 * Force TX_CLK in the Extended PHY Specific Control Register
394 * to 25MHz clock. 392 * to 25MHz clock.
395 */ 393 */
396 ret_val = hw->phy.ops.read_phy_reg(hw, 394 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
397 M88E1000_EXT_PHY_SPEC_CTRL,
398 &phy_data); 395 &phy_data);
399 if (ret_val) 396 if (ret_val)
400 goto out; 397 goto out;
@@ -413,8 +410,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
413 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 410 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
414 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 411 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
415 } 412 }
416 ret_val = hw->phy.ops.write_phy_reg(hw, 413 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
417 M88E1000_EXT_PHY_SPEC_CTRL,
418 phy_data); 414 phy_data);
419 if (ret_val) 415 if (ret_val)
420 goto out; 416 goto out;
@@ -449,7 +445,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
449 goto out; 445 goto out;
450 } 446 }
451 447
452 ret_val = hw->phy.ops.reset_phy(hw); 448 ret_val = phy->ops.reset(hw);
453 if (ret_val) { 449 if (ret_val) {
454 hw_dbg("Error resetting the PHY.\n"); 450 hw_dbg("Error resetting the PHY.\n");
455 goto out; 451 goto out;
@@ -464,8 +460,8 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
464 */ 460 */
465 if (phy->type == e1000_phy_igp) { 461 if (phy->type == e1000_phy_igp) {
466 /* disable lplu d3 during driver init */ 462 /* disable lplu d3 during driver init */
467 if (hw->phy.ops.set_d3_lplu_state) 463 if (phy->ops.set_d3_lplu_state)
468 ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); 464 ret_val = phy->ops.set_d3_lplu_state(hw, false);
469 if (ret_val) { 465 if (ret_val) {
470 hw_dbg("Error Disabling LPLU D3\n"); 466 hw_dbg("Error Disabling LPLU D3\n");
471 goto out; 467 goto out;
@@ -473,13 +469,13 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
473 } 469 }
474 470
475 /* disable lplu d0 during driver init */ 471 /* disable lplu d0 during driver init */
476 ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); 472 ret_val = phy->ops.set_d0_lplu_state(hw, false);
477 if (ret_val) { 473 if (ret_val) {
478 hw_dbg("Error Disabling LPLU D0\n"); 474 hw_dbg("Error Disabling LPLU D0\n");
479 goto out; 475 goto out;
480 } 476 }
481 /* Configure mdi-mdix settings */ 477 /* Configure mdi-mdix settings */
482 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); 478 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
483 if (ret_val) 479 if (ret_val)
484 goto out; 480 goto out;
485 481
@@ -497,7 +493,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
497 data |= IGP01E1000_PSCR_AUTO_MDIX; 493 data |= IGP01E1000_PSCR_AUTO_MDIX;
498 break; 494 break;
499 } 495 }
500 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); 496 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
501 if (ret_val) 497 if (ret_val)
502 goto out; 498 goto out;
503 499
@@ -510,33 +506,31 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
510 */ 506 */
511 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { 507 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
512 /* Disable SmartSpeed */ 508 /* Disable SmartSpeed */
513 ret_val = hw->phy.ops.read_phy_reg(hw, 509 ret_val = phy->ops.read_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG, 510 IGP01E1000_PHY_PORT_CONFIG,
515 &data); 511 &data);
516 if (ret_val) 512 if (ret_val)
517 goto out; 513 goto out;
518 514
519 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 515 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
520 ret_val = hw->phy.ops.write_phy_reg(hw, 516 ret_val = phy->ops.write_reg(hw,
521 IGP01E1000_PHY_PORT_CONFIG, 517 IGP01E1000_PHY_PORT_CONFIG,
522 data); 518 data);
523 if (ret_val) 519 if (ret_val)
524 goto out; 520 goto out;
525 521
526 /* Set auto Master/Slave resolution process */ 522 /* Set auto Master/Slave resolution process */
527 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, 523 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
528 &data);
529 if (ret_val) 524 if (ret_val)
530 goto out; 525 goto out;
531 526
532 data &= ~CR_1000T_MS_ENABLE; 527 data &= ~CR_1000T_MS_ENABLE;
533 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, 528 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
534 data);
535 if (ret_val) 529 if (ret_val)
536 goto out; 530 goto out;
537 } 531 }
538 532
539 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, &data); 533 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
540 if (ret_val) 534 if (ret_val)
541 goto out; 535 goto out;
542 536
@@ -560,7 +554,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
560 default: 554 default:
561 break; 555 break;
562 } 556 }
563 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, data); 557 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
564 if (ret_val) 558 if (ret_val)
565 goto out; 559 goto out;
566 } 560 }
@@ -609,12 +603,12 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw)
609 * Restart auto-negotiation by setting the Auto Neg Enable bit and 603 * Restart auto-negotiation by setting the Auto Neg Enable bit and
610 * the Auto Neg Restart bit in the PHY control register. 604 * the Auto Neg Restart bit in the PHY control register.
611 */ 605 */
612 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 606 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
613 if (ret_val) 607 if (ret_val)
614 goto out; 608 goto out;
615 609
616 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 610 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
617 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 611 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
618 if (ret_val) 612 if (ret_val)
619 goto out; 613 goto out;
620 614
@@ -656,15 +650,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
656 phy->autoneg_advertised &= phy->autoneg_mask; 650 phy->autoneg_advertised &= phy->autoneg_mask;
657 651
658 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 652 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
659 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV, 653 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
660 &mii_autoneg_adv_reg);
661 if (ret_val) 654 if (ret_val)
662 goto out; 655 goto out;
663 656
664 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 657 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
665 /* Read the MII 1000Base-T Control Register (Address 9). */ 658 /* Read the MII 1000Base-T Control Register (Address 9). */
666 ret_val = hw->phy.ops.read_phy_reg(hw, 659 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
667 PHY_1000T_CTRL,
668 &mii_1000t_ctrl_reg); 660 &mii_1000t_ctrl_reg);
669 if (ret_val) 661 if (ret_val)
670 goto out; 662 goto out;
@@ -785,17 +777,16 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
785 goto out; 777 goto out;
786 } 778 }
787 779
788 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_AUTONEG_ADV, 780 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
789 mii_autoneg_adv_reg);
790 if (ret_val) 781 if (ret_val)
791 goto out; 782 goto out;
792 783
793 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 784 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
794 785
795 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 786 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
796 ret_val = hw->phy.ops.write_phy_reg(hw, 787 ret_val = phy->ops.write_reg(hw,
797 PHY_1000T_CTRL, 788 PHY_1000T_CTRL,
798 mii_1000t_ctrl_reg); 789 mii_1000t_ctrl_reg);
799 if (ret_val) 790 if (ret_val)
800 goto out; 791 goto out;
801 } 792 }
@@ -819,13 +810,13 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
819 u16 phy_data; 810 u16 phy_data;
820 bool link; 811 bool link;
821 812
822 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 813 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
823 if (ret_val) 814 if (ret_val)
824 goto out; 815 goto out;
825 816
826 igb_phy_force_speed_duplex_setup(hw, &phy_data); 817 igb_phy_force_speed_duplex_setup(hw, &phy_data);
827 818
828 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data); 819 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
829 if (ret_val) 820 if (ret_val)
830 goto out; 821 goto out;
831 822
@@ -833,16 +824,14 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
833 * Clear Auto-Crossover to force MDI manually. IGP requires MDI 824 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
834 * forced whenever speed and duplex are forced. 825 * forced whenever speed and duplex are forced.
835 */ 826 */
836 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 827 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
837 &phy_data);
838 if (ret_val) 828 if (ret_val)
839 goto out; 829 goto out;
840 830
841 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 831 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
842 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 832 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
843 833
844 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 834 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
845 phy_data);
846 if (ret_val) 835 if (ret_val)
847 goto out; 836 goto out;
848 837
@@ -897,20 +886,18 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
897 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 886 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
898 * forced whenever speed and duplex are forced. 887 * forced whenever speed and duplex are forced.
899 */ 888 */
900 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 889 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
901 &phy_data);
902 if (ret_val) 890 if (ret_val)
903 goto out; 891 goto out;
904 892
905 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 893 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
906 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 894 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
907 phy_data);
908 if (ret_val) 895 if (ret_val)
909 goto out; 896 goto out;
910 897
911 hw_dbg("M88E1000 PSCR: %X\n", phy_data); 898 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
912 899
913 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 900 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
914 if (ret_val) 901 if (ret_val)
915 goto out; 902 goto out;
916 903
@@ -919,7 +906,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
919 /* Reset the phy to commit changes. */ 906 /* Reset the phy to commit changes. */
920 phy_data |= MII_CR_RESET; 907 phy_data |= MII_CR_RESET;
921 908
922 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data); 909 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
923 if (ret_val) 910 if (ret_val)
924 goto out; 911 goto out;
925 912
@@ -940,7 +927,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
940 * We didn't get link. 927 * We didn't get link.
941 * Reset the DSP and cross our fingers. 928 * Reset the DSP and cross our fingers.
942 */ 929 */
943 ret_val = hw->phy.ops.write_phy_reg(hw, 930 ret_val = phy->ops.write_reg(hw,
944 M88E1000_PHY_PAGE_SELECT, 931 M88E1000_PHY_PAGE_SELECT,
945 0x001d); 932 0x001d);
946 if (ret_val) 933 if (ret_val)
@@ -957,8 +944,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
957 goto out; 944 goto out;
958 } 945 }
959 946
960 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 947 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
961 &phy_data);
962 if (ret_val) 948 if (ret_val)
963 goto out; 949 goto out;
964 950
@@ -968,8 +954,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
968 * the reset value of 2.5MHz. 954 * the reset value of 2.5MHz.
969 */ 955 */
970 phy_data |= M88E1000_EPSCR_TX_CLK_25; 956 phy_data |= M88E1000_EPSCR_TX_CLK_25;
971 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 957 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
972 phy_data);
973 if (ret_val) 958 if (ret_val)
974 goto out; 959 goto out;
975 960
@@ -977,14 +962,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
977 * In addition, we must re-enable CRS on Tx for both half and full 962 * In addition, we must re-enable CRS on Tx for both half and full
978 * duplex. 963 * duplex.
979 */ 964 */
980 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 965 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
981 &phy_data);
982 if (ret_val) 966 if (ret_val)
983 goto out; 967 goto out;
984 968
985 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 969 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
986 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 970 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
987 phy_data);
988 971
989out: 972out:
990 return ret_val; 973 return ret_val;
@@ -1071,15 +1054,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1071 s32 ret_val; 1054 s32 ret_val;
1072 u16 data; 1055 u16 data;
1073 1056
1074 ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 1057 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1075 &data);
1076 if (ret_val) 1058 if (ret_val)
1077 goto out; 1059 goto out;
1078 1060
1079 if (!active) { 1061 if (!active) {
1080 data &= ~IGP02E1000_PM_D3_LPLU; 1062 data &= ~IGP02E1000_PM_D3_LPLU;
1081 ret_val = hw->phy.ops.write_phy_reg(hw, 1063 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1082 IGP02E1000_PHY_POWER_MGMT,
1083 data); 1064 data);
1084 if (ret_val) 1065 if (ret_val)
1085 goto out; 1066 goto out;
@@ -1090,27 +1071,27 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1090 * SmartSpeed, so performance is maintained. 1071 * SmartSpeed, so performance is maintained.
1091 */ 1072 */
1092 if (phy->smart_speed == e1000_smart_speed_on) { 1073 if (phy->smart_speed == e1000_smart_speed_on) {
1093 ret_val = hw->phy.ops.read_phy_reg(hw, 1074 ret_val = phy->ops.read_reg(hw,
1094 IGP01E1000_PHY_PORT_CONFIG, 1075 IGP01E1000_PHY_PORT_CONFIG,
1095 &data); 1076 &data);
1096 if (ret_val) 1077 if (ret_val)
1097 goto out; 1078 goto out;
1098 1079
1099 data |= IGP01E1000_PSCFR_SMART_SPEED; 1080 data |= IGP01E1000_PSCFR_SMART_SPEED;
1100 ret_val = hw->phy.ops.write_phy_reg(hw, 1081 ret_val = phy->ops.write_reg(hw,
1101 IGP01E1000_PHY_PORT_CONFIG, 1082 IGP01E1000_PHY_PORT_CONFIG,
1102 data); 1083 data);
1103 if (ret_val) 1084 if (ret_val)
1104 goto out; 1085 goto out;
1105 } else if (phy->smart_speed == e1000_smart_speed_off) { 1086 } else if (phy->smart_speed == e1000_smart_speed_off) {
1106 ret_val = hw->phy.ops.read_phy_reg(hw, 1087 ret_val = phy->ops.read_reg(hw,
1107 IGP01E1000_PHY_PORT_CONFIG, 1088 IGP01E1000_PHY_PORT_CONFIG,
1108 &data); 1089 &data);
1109 if (ret_val) 1090 if (ret_val)
1110 goto out; 1091 goto out;
1111 1092
1112 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1093 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1113 ret_val = hw->phy.ops.write_phy_reg(hw, 1094 ret_val = phy->ops.write_reg(hw,
1114 IGP01E1000_PHY_PORT_CONFIG, 1095 IGP01E1000_PHY_PORT_CONFIG,
1115 data); 1096 data);
1116 if (ret_val) 1097 if (ret_val)
@@ -1120,22 +1101,19 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1120 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1101 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1121 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1102 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1122 data |= IGP02E1000_PM_D3_LPLU; 1103 data |= IGP02E1000_PM_D3_LPLU;
1123 ret_val = hw->phy.ops.write_phy_reg(hw, 1104 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1124 IGP02E1000_PHY_POWER_MGMT,
1125 data); 1105 data);
1126 if (ret_val) 1106 if (ret_val)
1127 goto out; 1107 goto out;
1128 1108
1129 /* When LPLU is enabled, we should disable SmartSpeed */ 1109 /* When LPLU is enabled, we should disable SmartSpeed */
1130 ret_val = hw->phy.ops.read_phy_reg(hw, 1110 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1131 IGP01E1000_PHY_PORT_CONFIG,
1132 &data); 1111 &data);
1133 if (ret_val) 1112 if (ret_val)
1134 goto out; 1113 goto out;
1135 1114
1136 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1115 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1137 ret_val = hw->phy.ops.write_phy_reg(hw, 1116 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1138 IGP01E1000_PHY_PORT_CONFIG,
1139 data); 1117 data);
1140 } 1118 }
1141 1119
@@ -1176,7 +1154,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
1176 goto out; 1154 goto out;
1177 } 1155 }
1178 1156
1179 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &phy_data); 1157 ret_val = phy->ops.read_reg(hw, offset, &phy_data);
1180 1158
1181 if (!ret_val) 1159 if (!ret_val)
1182 phy->speed_downgraded = (phy_data & mask) ? true : false; 1160 phy->speed_downgraded = (phy_data & mask) ? true : false;
@@ -1199,7 +1177,7 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw)
1199 s32 ret_val; 1177 s32 ret_val;
1200 u16 data; 1178 u16 data;
1201 1179
1202 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); 1180 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
1203 1181
1204 if (!ret_val) 1182 if (!ret_val)
1205 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) 1183 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
@@ -1228,8 +1206,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1228 * Polarity is determined based on the speed of 1206 * Polarity is determined based on the speed of
1229 * our connection. 1207 * our connection.
1230 */ 1208 */
1231 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1209 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1232 &data);
1233 if (ret_val) 1210 if (ret_val)
1234 goto out; 1211 goto out;
1235 1212
@@ -1246,7 +1223,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1246 mask = IGP01E1000_PSSR_POLARITY_REVERSED; 1223 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1247 } 1224 }
1248 1225
1249 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &data); 1226 ret_val = phy->ops.read_reg(hw, offset, &data);
1250 1227
1251 if (!ret_val) 1228 if (!ret_val)
1252 phy->cable_polarity = (data & mask) 1229 phy->cable_polarity = (data & mask)
@@ -1271,10 +1248,10 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1271 1248
1272 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ 1249 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1273 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { 1250 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1274 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1251 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1275 if (ret_val) 1252 if (ret_val)
1276 break; 1253 break;
1277 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1254 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1278 if (ret_val) 1255 if (ret_val)
1279 break; 1256 break;
1280 if (phy_status & MII_SR_AUTONEG_COMPLETE) 1257 if (phy_status & MII_SR_AUTONEG_COMPLETE)
@@ -1310,10 +1287,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1310 * twice due to the link bit being sticky. No harm doing 1287 * twice due to the link bit being sticky. No harm doing
1311 * it across the board. 1288 * it across the board.
1312 */ 1289 */
1313 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1290 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1314 if (ret_val) 1291 if (ret_val)
1315 break; 1292 break;
1316 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1293 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1317 if (ret_val) 1294 if (ret_val)
1318 break; 1295 break;
1319 if (phy_status & MII_SR_LINK_STATUS) 1296 if (phy_status & MII_SR_LINK_STATUS)
@@ -1350,8 +1327,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1350 s32 ret_val; 1327 s32 ret_val;
1351 u16 phy_data, index; 1328 u16 phy_data, index;
1352 1329
1353 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1330 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1354 &phy_data);
1355 if (ret_val) 1331 if (ret_val)
1356 goto out; 1332 goto out;
1357 1333
@@ -1372,8 +1348,8 @@ out:
1372 * 1348 *
1373 * The automatic gain control (agc) normalizes the amplitude of the 1349 * The automatic gain control (agc) normalizes the amplitude of the
1374 * received signal, adjusting for the attenuation produced by the 1350 * received signal, adjusting for the attenuation produced by the
1375 * cable. By reading the AGC registers, which reperesent the 1351 * cable. By reading the AGC registers, which represent the
1376 * cobination of course and fine gain value, the value can be put 1352 * combination of coarse and fine gain value, the value can be put
1377 * into a lookup table to obtain the approximate cable length 1353 * into a lookup table to obtain the approximate cable length
1378 * for each channel. 1354 * for each channel.
1379 **/ 1355 **/
@@ -1392,14 +1368,13 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1392 1368
1393 /* Read the AGC registers for all channels */ 1369 /* Read the AGC registers for all channels */
1394 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1370 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1395 ret_val = hw->phy.ops.read_phy_reg(hw, agc_reg_array[i], 1371 ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
1396 &phy_data);
1397 if (ret_val) 1372 if (ret_val)
1398 goto out; 1373 goto out;
1399 1374
1400 /* 1375 /*
1401 * Getting bits 15:9, which represent the combination of 1376 * Getting bits 15:9, which represent the combination of
1402 * course and fine gain values. The result is a number 1377 * coarse and fine gain values. The result is a number
1403 * that can be put into the lookup table to obtain the 1378 * that can be put into the lookup table to obtain the
1404 * approximate cable length. 1379 * approximate cable length.
1405 */ 1380 */
@@ -1456,7 +1431,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1456 u16 phy_data; 1431 u16 phy_data;
1457 bool link; 1432 bool link;
1458 1433
1459 if (hw->phy.media_type != e1000_media_type_copper) { 1434 if (phy->media_type != e1000_media_type_copper) {
1460 hw_dbg("Phy info is only valid for copper media\n"); 1435 hw_dbg("Phy info is only valid for copper media\n");
1461 ret_val = -E1000_ERR_CONFIG; 1436 ret_val = -E1000_ERR_CONFIG;
1462 goto out; 1437 goto out;
@@ -1472,33 +1447,29 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1472 goto out; 1447 goto out;
1473 } 1448 }
1474 1449
1475 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1450 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1476 &phy_data);
1477 if (ret_val) 1451 if (ret_val)
1478 goto out; 1452 goto out;
1479 1453
1480 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) 1454 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
1481 ? true 1455 ? true : false;
1482 : false;
1483 1456
1484 ret_val = igb_check_polarity_m88(hw); 1457 ret_val = igb_check_polarity_m88(hw);
1485 if (ret_val) 1458 if (ret_val)
1486 goto out; 1459 goto out;
1487 1460
1488 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1461 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1489 &phy_data);
1490 if (ret_val) 1462 if (ret_val)
1491 goto out; 1463 goto out;
1492 1464
1493 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; 1465 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
1494 1466
1495 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 1467 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1496 ret_val = hw->phy.ops.get_cable_length(hw); 1468 ret_val = phy->ops.get_cable_length(hw);
1497 if (ret_val) 1469 if (ret_val)
1498 goto out; 1470 goto out;
1499 1471
1500 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 1472 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
1501 &phy_data);
1502 if (ret_val) 1473 if (ret_val)
1503 goto out; 1474 goto out;
1504 1475
@@ -1552,8 +1523,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1552 if (ret_val) 1523 if (ret_val)
1553 goto out; 1524 goto out;
1554 1525
1555 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1526 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1556 &data);
1557 if (ret_val) 1527 if (ret_val)
1558 goto out; 1528 goto out;
1559 1529
@@ -1561,12 +1531,11 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1561 1531
1562 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 1532 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1563 IGP01E1000_PSSR_SPEED_1000MBPS) { 1533 IGP01E1000_PSSR_SPEED_1000MBPS) {
1564 ret_val = hw->phy.ops.get_cable_length(hw); 1534 ret_val = phy->ops.get_cable_length(hw);
1565 if (ret_val) 1535 if (ret_val)
1566 goto out; 1536 goto out;
1567 1537
1568 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 1538 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
1569 &data);
1570 if (ret_val) 1539 if (ret_val)
1571 goto out; 1540 goto out;
1572 1541
@@ -1599,12 +1568,12 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw)
1599 s32 ret_val; 1568 s32 ret_val;
1600 u16 phy_ctrl; 1569 u16 phy_ctrl;
1601 1570
1602 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 1571 ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
1603 if (ret_val) 1572 if (ret_val)
1604 goto out; 1573 goto out;
1605 1574
1606 phy_ctrl |= MII_CR_RESET; 1575 phy_ctrl |= MII_CR_RESET;
1607 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 1576 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
1608 if (ret_val) 1577 if (ret_val)
1609 goto out; 1578 goto out;
1610 1579
@@ -1635,7 +1604,7 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
1635 goto out; 1604 goto out;
1636 } 1605 }
1637 1606
1638 ret_val = igb_acquire_phy(hw); 1607 ret_val = phy->ops.acquire(hw);
1639 if (ret_val) 1608 if (ret_val)
1640 goto out; 1609 goto out;
1641 1610
@@ -1650,74 +1619,14 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
1650 1619
1651 udelay(150); 1620 udelay(150);
1652 1621
1653 igb_release_phy(hw); 1622 phy->ops.release(hw);
1654 1623
1655 ret_val = igb_get_phy_cfg_done(hw); 1624 ret_val = phy->ops.get_cfg_done(hw);
1656 1625
1657out: 1626out:
1658 return ret_val; 1627 return ret_val;
1659} 1628}
1660 1629
1661/* Internal function pointers */
1662
1663/**
1664 * igb_get_phy_cfg_done - Generic PHY configuration done
1665 * @hw: pointer to the HW structure
1666 *
1667 * Return success if silicon family did not implement a family specific
1668 * get_cfg_done function.
1669 **/
1670static s32 igb_get_phy_cfg_done(struct e1000_hw *hw)
1671{
1672 if (hw->phy.ops.get_cfg_done)
1673 return hw->phy.ops.get_cfg_done(hw);
1674
1675 return 0;
1676}
1677
1678/**
1679 * igb_release_phy - Generic release PHY
1680 * @hw: pointer to the HW structure
1681 *
1682 * Return if silicon family does not require a semaphore when accessing the
1683 * PHY.
1684 **/
1685static void igb_release_phy(struct e1000_hw *hw)
1686{
1687 if (hw->phy.ops.release_phy)
1688 hw->phy.ops.release_phy(hw);
1689}
1690
1691/**
1692 * igb_acquire_phy - Generic acquire PHY
1693 * @hw: pointer to the HW structure
1694 *
1695 * Return success if silicon family does not require a semaphore when
1696 * accessing the PHY.
1697 **/
1698static s32 igb_acquire_phy(struct e1000_hw *hw)
1699{
1700 if (hw->phy.ops.acquire_phy)
1701 return hw->phy.ops.acquire_phy(hw);
1702
1703 return 0;
1704}
1705
1706/**
1707 * igb_phy_force_speed_duplex - Generic force PHY speed/duplex
1708 * @hw: pointer to the HW structure
1709 *
1710 * When the silicon family has not implemented a forced speed/duplex
1711 * function for the PHY, simply return 0.
1712 **/
1713s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
1714{
1715 if (hw->phy.ops.force_speed_duplex)
1716 return hw->phy.ops.force_speed_duplex(hw);
1717
1718 return 0;
1719}
1720
1721/** 1630/**
1722 * igb_phy_init_script_igp3 - Inits the IGP3 PHY 1631 * igb_phy_init_script_igp3 - Inits the IGP3 PHY
1723 * @hw: pointer to the HW structure 1632 * @hw: pointer to the HW structure
@@ -1730,75 +1639,75 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1730 1639
1731 /* PHY init IGP 3 */ 1640 /* PHY init IGP 3 */
1732 /* Enable rise/fall, 10-mode work in class-A */ 1641 /* Enable rise/fall, 10-mode work in class-A */
1733 hw->phy.ops.write_phy_reg(hw, 0x2F5B, 0x9018); 1642 hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
1734 /* Remove all caps from Replica path filter */ 1643 /* Remove all caps from Replica path filter */
1735 hw->phy.ops.write_phy_reg(hw, 0x2F52, 0x0000); 1644 hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
1736 /* Bias trimming for ADC, AFE and Driver (Default) */ 1645 /* Bias trimming for ADC, AFE and Driver (Default) */
1737 hw->phy.ops.write_phy_reg(hw, 0x2FB1, 0x8B24); 1646 hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
1738 /* Increase Hybrid poly bias */ 1647 /* Increase Hybrid poly bias */
1739 hw->phy.ops.write_phy_reg(hw, 0x2FB2, 0xF8F0); 1648 hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
1740 /* Add 4% to TX amplitude in Giga mode */ 1649 /* Add 4% to TX amplitude in Giga mode */
1741 hw->phy.ops.write_phy_reg(hw, 0x2010, 0x10B0); 1650 hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
1742 /* Disable trimming (TTT) */ 1651 /* Disable trimming (TTT) */
1743 hw->phy.ops.write_phy_reg(hw, 0x2011, 0x0000); 1652 hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
1744 /* Poly DC correction to 94.6% + 2% for all channels */ 1653 /* Poly DC correction to 94.6% + 2% for all channels */
1745 hw->phy.ops.write_phy_reg(hw, 0x20DD, 0x249A); 1654 hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
1746 /* ABS DC correction to 95.9% */ 1655 /* ABS DC correction to 95.9% */
1747 hw->phy.ops.write_phy_reg(hw, 0x20DE, 0x00D3); 1656 hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
1748 /* BG temp curve trim */ 1657 /* BG temp curve trim */
1749 hw->phy.ops.write_phy_reg(hw, 0x28B4, 0x04CE); 1658 hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
1750 /* Increasing ADC OPAMP stage 1 currents to max */ 1659 /* Increasing ADC OPAMP stage 1 currents to max */
1751 hw->phy.ops.write_phy_reg(hw, 0x2F70, 0x29E4); 1660 hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
1752 /* Force 1000 ( required for enabling PHY regs configuration) */ 1661 /* Force 1000 ( required for enabling PHY regs configuration) */
1753 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x0140); 1662 hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
1754 /* Set upd_freq to 6 */ 1663 /* Set upd_freq to 6 */
1755 hw->phy.ops.write_phy_reg(hw, 0x1F30, 0x1606); 1664 hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
1756 /* Disable NPDFE */ 1665 /* Disable NPDFE */
1757 hw->phy.ops.write_phy_reg(hw, 0x1F31, 0xB814); 1666 hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
1758 /* Disable adaptive fixed FFE (Default) */ 1667 /* Disable adaptive fixed FFE (Default) */
1759 hw->phy.ops.write_phy_reg(hw, 0x1F35, 0x002A); 1668 hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
1760 /* Enable FFE hysteresis */ 1669 /* Enable FFE hysteresis */
1761 hw->phy.ops.write_phy_reg(hw, 0x1F3E, 0x0067); 1670 hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
1762 /* Fixed FFE for short cable lengths */ 1671 /* Fixed FFE for short cable lengths */
1763 hw->phy.ops.write_phy_reg(hw, 0x1F54, 0x0065); 1672 hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
1764 /* Fixed FFE for medium cable lengths */ 1673 /* Fixed FFE for medium cable lengths */
1765 hw->phy.ops.write_phy_reg(hw, 0x1F55, 0x002A); 1674 hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
1766 /* Fixed FFE for long cable lengths */ 1675 /* Fixed FFE for long cable lengths */
1767 hw->phy.ops.write_phy_reg(hw, 0x1F56, 0x002A); 1676 hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
1768 /* Enable Adaptive Clip Threshold */ 1677 /* Enable Adaptive Clip Threshold */
1769 hw->phy.ops.write_phy_reg(hw, 0x1F72, 0x3FB0); 1678 hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
1770 /* AHT reset limit to 1 */ 1679 /* AHT reset limit to 1 */
1771 hw->phy.ops.write_phy_reg(hw, 0x1F76, 0xC0FF); 1680 hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
1772 /* Set AHT master delay to 127 msec */ 1681 /* Set AHT master delay to 127 msec */
1773 hw->phy.ops.write_phy_reg(hw, 0x1F77, 0x1DEC); 1682 hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
1774 /* Set scan bits for AHT */ 1683 /* Set scan bits for AHT */
1775 hw->phy.ops.write_phy_reg(hw, 0x1F78, 0xF9EF); 1684 hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
1776 /* Set AHT Preset bits */ 1685 /* Set AHT Preset bits */
1777 hw->phy.ops.write_phy_reg(hw, 0x1F79, 0x0210); 1686 hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
1778 /* Change integ_factor of channel A to 3 */ 1687 /* Change integ_factor of channel A to 3 */
1779 hw->phy.ops.write_phy_reg(hw, 0x1895, 0x0003); 1688 hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
1780 /* Change prop_factor of channels BCD to 8 */ 1689 /* Change prop_factor of channels BCD to 8 */
1781 hw->phy.ops.write_phy_reg(hw, 0x1796, 0x0008); 1690 hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
1782 /* Change cg_icount + enable integbp for channels BCD */ 1691 /* Change cg_icount + enable integbp for channels BCD */
1783 hw->phy.ops.write_phy_reg(hw, 0x1798, 0xD008); 1692 hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
1784 /* 1693 /*
1785 * Change cg_icount + enable integbp + change prop_factor_master 1694 * Change cg_icount + enable integbp + change prop_factor_master
1786 * to 8 for channel A 1695 * to 8 for channel A
1787 */ 1696 */
1788 hw->phy.ops.write_phy_reg(hw, 0x1898, 0xD918); 1697 hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
1789 /* Disable AHT in Slave mode on channel A */ 1698 /* Disable AHT in Slave mode on channel A */
1790 hw->phy.ops.write_phy_reg(hw, 0x187A, 0x0800); 1699 hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
1791 /* 1700 /*
1792 * Enable LPLU and disable AN to 1000 in non-D0a states, 1701 * Enable LPLU and disable AN to 1000 in non-D0a states,
1793 * Enable SPD+B2B 1702 * Enable SPD+B2B
1794 */ 1703 */
1795 hw->phy.ops.write_phy_reg(hw, 0x0019, 0x008D); 1704 hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
1796 /* Enable restart AN on an1000_dis change */ 1705 /* Enable restart AN on an1000_dis change */
1797 hw->phy.ops.write_phy_reg(hw, 0x001B, 0x2080); 1706 hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
1798 /* Enable wh_fifo read clock in 10/100 modes */ 1707 /* Enable wh_fifo read clock in 10/100 modes */
1799 hw->phy.ops.write_phy_reg(hw, 0x0014, 0x0045); 1708 hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
1800 /* Restart AN, Speed selection is 1000 */ 1709 /* Restart AN, Speed selection is 1000 */
1801 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x1340); 1710 hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
1802 1711
1803 return 0; 1712 return 0;
1804} 1713}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 8f8fe0a780d..3228a862031 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -44,7 +44,6 @@ enum e1000_smart_speed {
44s32 igb_check_downshift(struct e1000_hw *hw); 44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw); 46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_phy_force_speed_duplex(struct e1000_hw *hw);
48s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 47s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
49s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 48s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
50s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index bdf5d839c4b..5038b73c78e 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index aebef8e48e7..e507449b3cc 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -36,12 +36,6 @@
36 36
37struct igb_adapter; 37struct igb_adapter;
38 38
39#ifdef CONFIG_IGB_LRO
40#include <linux/inet_lro.h>
41#define MAX_LRO_AGGR 32
42#define MAX_LRO_DESCRIPTORS 8
43#endif
44
45/* Interrupt defines */ 39/* Interrupt defines */
46#define IGB_MIN_DYN_ITR 3000 40#define IGB_MIN_DYN_ITR 3000
47#define IGB_MAX_DYN_ITR 96000 41#define IGB_MAX_DYN_ITR 96000
@@ -176,10 +170,6 @@ struct igb_ring {
176 struct napi_struct napi; 170 struct napi_struct napi;
177 int set_itr; 171 int set_itr;
178 struct igb_ring *buddy; 172 struct igb_ring *buddy;
179#ifdef CONFIG_IGB_LRO
180 struct net_lro_mgr lro_mgr;
181 bool lro_used;
182#endif
183 }; 173 };
184 }; 174 };
185 175
@@ -248,7 +238,6 @@ struct igb_adapter {
248 238
249 u64 hw_csum_err; 239 u64 hw_csum_err;
250 u64 hw_csum_good; 240 u64 hw_csum_good;
251 u64 rx_hdr_split;
252 u32 alloc_rx_buff_failed; 241 u32 alloc_rx_buff_failed;
253 bool rx_csum; 242 bool rx_csum;
254 u32 gorc; 243 u32 gorc;
@@ -283,17 +272,7 @@ struct igb_adapter {
283 unsigned int flags; 272 unsigned int flags;
284 u32 eeprom_wol; 273 u32 eeprom_wol;
285 274
286 /* for ioport free */
287 int bars;
288 int need_ioport;
289
290 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; 275 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
291#ifdef CONFIG_IGB_LRO
292 unsigned int lro_max_aggr;
293 unsigned int lro_aggregated;
294 unsigned int lro_flushed;
295 unsigned int lro_no_desc;
296#endif
297 unsigned int tx_ring_count; 276 unsigned int tx_ring_count;
298 unsigned int rx_ring_count; 277 unsigned int rx_ring_count;
299}; 278};
@@ -301,9 +280,8 @@ struct igb_adapter {
301#define IGB_FLAG_HAS_MSI (1 << 0) 280#define IGB_FLAG_HAS_MSI (1 << 0)
302#define IGB_FLAG_MSI_ENABLE (1 << 1) 281#define IGB_FLAG_MSI_ENABLE (1 << 1)
303#define IGB_FLAG_DCA_ENABLED (1 << 2) 282#define IGB_FLAG_DCA_ENABLED (1 << 2)
304#define IGB_FLAG_IN_NETPOLL (1 << 3) 283#define IGB_FLAG_QUAD_PORT_A (1 << 3)
305#define IGB_FLAG_QUAD_PORT_A (1 << 4) 284#define IGB_FLAG_NEED_CTX_IDX (1 << 4)
306#define IGB_FLAG_NEED_CTX_IDX (1 << 5)
307 285
308enum e1000_state_t { 286enum e1000_state_t {
309 __IGB_TESTING, 287 __IGB_TESTING,
@@ -333,24 +311,24 @@ extern void igb_set_ethtool_ops(struct net_device *);
333 311
334static inline s32 igb_reset_phy(struct e1000_hw *hw) 312static inline s32 igb_reset_phy(struct e1000_hw *hw)
335{ 313{
336 if (hw->phy.ops.reset_phy) 314 if (hw->phy.ops.reset)
337 return hw->phy.ops.reset_phy(hw); 315 return hw->phy.ops.reset(hw);
338 316
339 return 0; 317 return 0;
340} 318}
341 319
342static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) 320static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
343{ 321{
344 if (hw->phy.ops.read_phy_reg) 322 if (hw->phy.ops.read_reg)
345 return hw->phy.ops.read_phy_reg(hw, offset, data); 323 return hw->phy.ops.read_reg(hw, offset, data);
346 324
347 return 0; 325 return 0;
348} 326}
349 327
350static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) 328static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
351{ 329{
352 if (hw->phy.ops.write_phy_reg) 330 if (hw->phy.ops.write_reg)
353 return hw->phy.ops.write_phy_reg(hw, offset, data); 331 return hw->phy.ops.write_reg(hw, offset, data);
354 332
355 return 0; 333 return 0;
356} 334}
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472a..bd050b1dab7 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -88,16 +88,11 @@ static const struct igb_stats igb_gstrings_stats[] = {
88 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 88 { "rx_long_byte_count", IGB_STAT(stats.gorc) },
89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
91 { "rx_header_split", IGB_STAT(rx_hdr_split) }, 91 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96#ifdef CONFIG_IGB_LRO
97 { "lro_aggregated", IGB_STAT(lro_aggregated) },
98 { "lro_flushed", IGB_STAT(lro_flushed) },
99 { "lro_no_desc", IGB_STAT(lro_no_desc) },
100#endif
101}; 96};
102 97
103#define IGB_QUEUE_STATS_LEN \ 98#define IGB_QUEUE_STATS_LEN \
@@ -293,15 +288,15 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
293 288
294static u32 igb_get_tx_csum(struct net_device *netdev) 289static u32 igb_get_tx_csum(struct net_device *netdev)
295{ 290{
296 return (netdev->features & NETIF_F_HW_CSUM) != 0; 291 return (netdev->features & NETIF_F_IP_CSUM) != 0;
297} 292}
298 293
299static int igb_set_tx_csum(struct net_device *netdev, u32 data) 294static int igb_set_tx_csum(struct net_device *netdev, u32 data)
300{ 295{
301 if (data) 296 if (data)
302 netdev->features |= NETIF_F_HW_CSUM; 297 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
303 else 298 else
304 netdev->features &= ~NETIF_F_HW_CSUM; 299 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 300
306 return 0; 301 return 0;
307} 302}
@@ -310,15 +305,13 @@ static int igb_set_tso(struct net_device *netdev, u32 data)
310{ 305{
311 struct igb_adapter *adapter = netdev_priv(netdev); 306 struct igb_adapter *adapter = netdev_priv(netdev);
312 307
313 if (data) 308 if (data) {
314 netdev->features |= NETIF_F_TSO; 309 netdev->features |= NETIF_F_TSO;
315 else
316 netdev->features &= ~NETIF_F_TSO;
317
318 if (data)
319 netdev->features |= NETIF_F_TSO6; 310 netdev->features |= NETIF_F_TSO6;
320 else 311 } else {
312 netdev->features &= ~NETIF_F_TSO;
321 netdev->features &= ~NETIF_F_TSO6; 313 netdev->features &= ~NETIF_F_TSO6;
314 }
322 315
323 dev_info(&adapter->pdev->dev, "TSO is %s\n", 316 dev_info(&adapter->pdev->dev, "TSO is %s\n",
324 data ? "Enabled" : "Disabled"); 317 data ? "Enabled" : "Disabled");
@@ -598,12 +591,12 @@ static int igb_get_eeprom(struct net_device *netdev,
598 return -ENOMEM; 591 return -ENOMEM;
599 592
600 if (hw->nvm.type == e1000_nvm_eeprom_spi) 593 if (hw->nvm.type == e1000_nvm_eeprom_spi)
601 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 594 ret_val = hw->nvm.ops.read(hw, first_word,
602 last_word - first_word + 1, 595 last_word - first_word + 1,
603 eeprom_buff); 596 eeprom_buff);
604 else { 597 else {
605 for (i = 0; i < last_word - first_word + 1; i++) { 598 for (i = 0; i < last_word - first_word + 1; i++) {
606 ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1, 599 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
607 &eeprom_buff[i]); 600 &eeprom_buff[i]);
608 if (ret_val) 601 if (ret_val)
609 break; 602 break;
@@ -650,14 +643,14 @@ static int igb_set_eeprom(struct net_device *netdev,
650 if (eeprom->offset & 1) { 643 if (eeprom->offset & 1) {
651 /* need read/modify/write of first changed EEPROM word */ 644 /* need read/modify/write of first changed EEPROM word */
652 /* only the second byte of the word is being modified */ 645 /* only the second byte of the word is being modified */
653 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1, 646 ret_val = hw->nvm.ops.read(hw, first_word, 1,
654 &eeprom_buff[0]); 647 &eeprom_buff[0]);
655 ptr++; 648 ptr++;
656 } 649 }
657 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 650 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
658 /* need read/modify/write of last changed EEPROM word */ 651 /* need read/modify/write of last changed EEPROM word */
659 /* only the first byte of the word is being modified */ 652 /* only the first byte of the word is being modified */
660 ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1, 653 ret_val = hw->nvm.ops.read(hw, last_word, 1,
661 &eeprom_buff[last_word - first_word]); 654 &eeprom_buff[last_word - first_word]);
662 } 655 }
663 656
@@ -670,7 +663,7 @@ static int igb_set_eeprom(struct net_device *netdev,
670 for (i = 0; i < last_word - first_word + 1; i++) 663 for (i = 0; i < last_word - first_word + 1; i++)
671 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 664 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
672 665
673 ret_val = hw->nvm.ops.write_nvm(hw, first_word, 666 ret_val = hw->nvm.ops.write(hw, first_word,
674 last_word - first_word + 1, eeprom_buff); 667 last_word - first_word + 1, eeprom_buff);
675 668
676 /* Update the checksum over the first part of the EEPROM if needed 669 /* Update the checksum over the first part of the EEPROM if needed
@@ -694,7 +687,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
694 687
695 /* EEPROM image version # is reported as firmware version # for 688 /* EEPROM image version # is reported as firmware version # for
696 * 82575 controllers */ 689 * 82575 controllers */
697 adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data); 690 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
698 sprintf(firmware_version, "%d.%d-%d", 691 sprintf(firmware_version, "%d.%d-%d",
699 (eeprom_data & 0xF000) >> 12, 692 (eeprom_data & 0xF000) >> 12,
700 (eeprom_data & 0x0FF0) >> 4, 693 (eeprom_data & 0x0FF0) >> 4,
@@ -863,23 +856,26 @@ static struct igb_reg_test reg_test_82576[] = {
863 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 856 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
864 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 857 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
865 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 858 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
866 { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 859 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
867 { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 860 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
868 { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 861 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
869 /* Enable all four RX queues before testing. */ 862 /* Enable all RX queues before testing. */
870 { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 863 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
864 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
871 /* RDH is read-only for 82576, only test RDT. */ 865 /* RDH is read-only for 82576, only test RDT. */
872 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 866 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
867 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
873 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 868 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
869 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
874 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 870 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
875 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 871 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
876 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 872 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
877 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 873 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
878 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 874 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
879 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 875 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
880 { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 876 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
881 { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 877 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
882 { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 878 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
883 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 879 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
884 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 880 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
885 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 881 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
@@ -926,12 +922,13 @@ static struct igb_reg_test reg_test_82575[] = {
926static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, 922static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
927 int reg, u32 mask, u32 write) 923 int reg, u32 mask, u32 write)
928{ 924{
925 struct e1000_hw *hw = &adapter->hw;
929 u32 pat, val; 926 u32 pat, val;
930 u32 _test[] = 927 u32 _test[] =
931 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 928 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
932 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 929 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
933 writel((_test[pat] & write), (adapter->hw.hw_addr + reg)); 930 wr32(reg, (_test[pat] & write));
934 val = readl(adapter->hw.hw_addr + reg); 931 val = rd32(reg);
935 if (val != (_test[pat] & write & mask)) { 932 if (val != (_test[pat] & write & mask)) {
936 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 933 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
937 "failed: got 0x%08X expected 0x%08X\n", 934 "failed: got 0x%08X expected 0x%08X\n",
@@ -946,9 +943,10 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
946static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 943static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
947 int reg, u32 mask, u32 write) 944 int reg, u32 mask, u32 write)
948{ 945{
946 struct e1000_hw *hw = &adapter->hw;
949 u32 val; 947 u32 val;
950 writel((write & mask), (adapter->hw.hw_addr + reg)); 948 wr32(reg, write & mask);
951 val = readl(adapter->hw.hw_addr + reg); 949 val = rd32(reg);
952 if ((write & mask) != (val & mask)) { 950 if ((write & mask) != (val & mask)) {
953 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 951 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
954 " got 0x%08X expected 0x%08X\n", reg, 952 " got 0x%08X expected 0x%08X\n", reg,
@@ -1014,12 +1012,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1014 for (i = 0; i < test->array_len; i++) { 1012 for (i = 0; i < test->array_len; i++) {
1015 switch (test->test_type) { 1013 switch (test->test_type) {
1016 case PATTERN_TEST: 1014 case PATTERN_TEST:
1017 REG_PATTERN_TEST(test->reg + (i * test->reg_offset), 1015 REG_PATTERN_TEST(test->reg +
1016 (i * test->reg_offset),
1018 test->mask, 1017 test->mask,
1019 test->write); 1018 test->write);
1020 break; 1019 break;
1021 case SET_READ_TEST: 1020 case SET_READ_TEST:
1022 REG_SET_AND_CHECK(test->reg + (i * test->reg_offset), 1021 REG_SET_AND_CHECK(test->reg +
1022 (i * test->reg_offset),
1023 test->mask, 1023 test->mask,
1024 test->write); 1024 test->write);
1025 break; 1025 break;
@@ -1061,7 +1061,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1061 *data = 0; 1061 *data = 0;
1062 /* Read and add up the contents of the EEPROM */ 1062 /* Read and add up the contents of the EEPROM */
1063 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1063 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1064 if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp)) 1064 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
1065 < 0) { 1065 < 0) {
1066 *data = 1; 1066 *data = 1;
1067 break; 1067 break;
@@ -1091,16 +1091,17 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1091{ 1091{
1092 struct e1000_hw *hw = &adapter->hw; 1092 struct e1000_hw *hw = &adapter->hw;
1093 struct net_device *netdev = adapter->netdev; 1093 struct net_device *netdev = adapter->netdev;
1094 u32 mask, i = 0, shared_int = true; 1094 u32 mask, ics_mask, i = 0, shared_int = true;
1095 u32 irq = adapter->pdev->irq; 1095 u32 irq = adapter->pdev->irq;
1096 1096
1097 *data = 0; 1097 *data = 0;
1098 1098
1099 /* Hook up test interrupt handler just for this test */ 1099 /* Hook up test interrupt handler just for this test */
1100 if (adapter->msix_entries) { 1100 if (adapter->msix_entries)
1101 /* NOTE: we don't test MSI-X interrupts here, yet */ 1101 /* NOTE: we don't test MSI-X interrupts here, yet */
1102 return 0; 1102 return 0;
1103 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1103
1104 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1104 shared_int = false; 1105 shared_int = false;
1105 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1106 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1106 *data = 1; 1107 *data = 1;
@@ -1116,16 +1117,31 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1116 } 1117 }
1117 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1118 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1118 (shared_int ? "shared" : "unshared")); 1119 (shared_int ? "shared" : "unshared"));
1119
1120 /* Disable all the interrupts */ 1120 /* Disable all the interrupts */
1121 wr32(E1000_IMC, 0xFFFFFFFF); 1121 wr32(E1000_IMC, 0xFFFFFFFF);
1122 msleep(10); 1122 msleep(10);
1123 1123
1124 /* Define all writable bits for ICS */
1125 switch(hw->mac.type) {
1126 case e1000_82575:
1127 ics_mask = 0x37F47EDD;
1128 break;
1129 case e1000_82576:
1130 ics_mask = 0x77D4FBFD;
1131 break;
1132 default:
1133 ics_mask = 0x7FFFFFFF;
1134 break;
1135 }
1136
1124 /* Test each interrupt */ 1137 /* Test each interrupt */
1125 for (; i < 10; i++) { 1138 for (; i < 31; i++) {
1126 /* Interrupt to test */ 1139 /* Interrupt to test */
1127 mask = 1 << i; 1140 mask = 1 << i;
1128 1141
1142 if (!(mask & ics_mask))
1143 continue;
1144
1129 if (!shared_int) { 1145 if (!shared_int) {
1130 /* Disable the interrupt to be reported in 1146 /* Disable the interrupt to be reported in
1131 * the cause register and then force the same 1147 * the cause register and then force the same
@@ -1134,8 +1150,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1134 * test failed. 1150 * test failed.
1135 */ 1151 */
1136 adapter->test_icr = 0; 1152 adapter->test_icr = 0;
1137 wr32(E1000_IMC, ~mask & 0x00007FFF); 1153
1138 wr32(E1000_ICS, ~mask & 0x00007FFF); 1154 /* Flush any pending interrupts */
1155 wr32(E1000_ICR, ~0);
1156
1157 wr32(E1000_IMC, mask);
1158 wr32(E1000_ICS, mask);
1139 msleep(10); 1159 msleep(10);
1140 1160
1141 if (adapter->test_icr & mask) { 1161 if (adapter->test_icr & mask) {
@@ -1151,6 +1171,10 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1151 * test failed. 1171 * test failed.
1152 */ 1172 */
1153 adapter->test_icr = 0; 1173 adapter->test_icr = 0;
1174
1175 /* Flush any pending interrupts */
1176 wr32(E1000_ICR, ~0);
1177
1154 wr32(E1000_IMS, mask); 1178 wr32(E1000_IMS, mask);
1155 wr32(E1000_ICS, mask); 1179 wr32(E1000_ICS, mask);
1156 msleep(10); 1180 msleep(10);
@@ -1168,11 +1192,15 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1168 * test failed. 1192 * test failed.
1169 */ 1193 */
1170 adapter->test_icr = 0; 1194 adapter->test_icr = 0;
1171 wr32(E1000_IMC, ~mask & 0x00007FFF); 1195
1172 wr32(E1000_ICS, ~mask & 0x00007FFF); 1196 /* Flush any pending interrupts */
1197 wr32(E1000_ICR, ~0);
1198
1199 wr32(E1000_IMC, ~mask);
1200 wr32(E1000_ICS, ~mask);
1173 msleep(10); 1201 msleep(10);
1174 1202
1175 if (adapter->test_icr) { 1203 if (adapter->test_icr & mask) {
1176 *data = 5; 1204 *data = 5;
1177 break; 1205 break;
1178 } 1206 }
@@ -1180,7 +1208,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1180 } 1208 }
1181 1209
1182 /* Disable all the interrupts */ 1210 /* Disable all the interrupts */
1183 wr32(E1000_IMC, 0xFFFFFFFF); 1211 wr32(E1000_IMC, ~0);
1184 msleep(10); 1212 msleep(10);
1185 1213
1186 /* Unhook test interrupt handler */ 1214 /* Unhook test interrupt handler */
@@ -1458,7 +1486,7 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1458 E1000_CTRL_TFCE | 1486 E1000_CTRL_TFCE |
1459 E1000_CTRL_LRST); 1487 E1000_CTRL_LRST);
1460 reg |= E1000_CTRL_SLU | 1488 reg |= E1000_CTRL_SLU |
1461 E1000_CTRL_FD; 1489 E1000_CTRL_FD;
1462 wr32(E1000_CTRL, reg); 1490 wr32(E1000_CTRL, reg);
1463 1491
1464 /* Unset switch control to serdes energy detect */ 1492 /* Unset switch control to serdes energy detect */
@@ -1921,18 +1949,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1921 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1949 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1922 int j; 1950 int j;
1923 int i; 1951 int i;
1924#ifdef CONFIG_IGB_LRO
1925 int aggregated = 0, flushed = 0, no_desc = 0;
1926
1927 for (i = 0; i < adapter->num_rx_queues; i++) {
1928 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
1929 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
1930 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
1931 }
1932 adapter->lro_aggregated = aggregated;
1933 adapter->lro_flushed = flushed;
1934 adapter->lro_no_desc = no_desc;
1935#endif
1936 1952
1937 igb_update_stats(adapter); 1953 igb_update_stats(adapter);
1938 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1954 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index a50db5398fa..f8c2919bcec 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -48,12 +48,12 @@
48#endif 48#endif
49#include "igb.h" 49#include "igb.h"
50 50
51#define DRV_VERSION "1.2.45-k2" 51#define DRV_VERSION "1.3.16-k2"
52char igb_driver_name[] = "igb"; 52char igb_driver_name[] = "igb";
53char igb_driver_version[] = DRV_VERSION; 53char igb_driver_version[] = DRV_VERSION;
54static const char igb_driver_string[] = 54static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver"; 55 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; 56static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
57 57
58static const struct e1000_info *igb_info_tbl[] = { 58static const struct e1000_info *igb_info_tbl[] = {
59 [board_82575] = &e1000_82575_info, 59 [board_82575] = &e1000_82575_info,
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 115static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118#ifdef CONFIG_IGB_LRO
119static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
120#endif
121static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
122static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
123static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -407,7 +404,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
407 /* Turn on MSI-X capability first, or our settings 404 /* Turn on MSI-X capability first, or our settings
408 * won't stick. And it will take days to debug. */ 405 * won't stick. And it will take days to debug. */
409 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 406 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
410 E1000_GPIE_PBA | E1000_GPIE_EIAME | 407 E1000_GPIE_PBA | E1000_GPIE_EIAME |
411 E1000_GPIE_NSICR); 408 E1000_GPIE_NSICR);
412 409
413 for (i = 0; i < adapter->num_tx_queues; i++) { 410 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -546,6 +543,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
546 int err; 543 int err;
547 int numvecs, i; 544 int numvecs, i;
548 545
546 /* Number of supported queues. */
547 /* Having more queues than CPUs doesn't make sense. */
548 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
549 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
550
549 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 551 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
550 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 552 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
551 GFP_KERNEL); 553 GFP_KERNEL);
@@ -687,7 +689,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
687 wr32(E1000_EIAC, adapter->eims_enable_mask); 689 wr32(E1000_EIAC, adapter->eims_enable_mask);
688 wr32(E1000_EIAM, adapter->eims_enable_mask); 690 wr32(E1000_EIAM, adapter->eims_enable_mask);
689 wr32(E1000_EIMS, adapter->eims_enable_mask); 691 wr32(E1000_EIMS, adapter->eims_enable_mask);
690 wr32(E1000_IMS, E1000_IMS_LSC); 692 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
691 } else { 693 } else {
692 wr32(E1000_IMS, IMS_ENABLE_MASK); 694 wr32(E1000_IMS, IMS_ENABLE_MASK);
693 wr32(E1000_IAM, IMS_ENABLE_MASK); 695 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -856,6 +858,10 @@ void igb_down(struct igb_adapter *adapter)
856 858
857 netdev->tx_queue_len = adapter->tx_queue_len; 859 netdev->tx_queue_len = adapter->tx_queue_len;
858 netif_carrier_off(netdev); 860 netif_carrier_off(netdev);
861
862 /* record the stats before reset*/
863 igb_update_stats(adapter);
864
859 adapter->link_speed = 0; 865 adapter->link_speed = 0;
860 adapter->link_duplex = 0; 866 adapter->link_duplex = 0;
861 867
@@ -886,11 +892,14 @@ void igb_reset(struct igb_adapter *adapter)
886 /* Repartition Pba for greater than 9k mtu 892 /* Repartition Pba for greater than 9k mtu
887 * To take effect CTRL.RST is required. 893 * To take effect CTRL.RST is required.
888 */ 894 */
889 if (mac->type != e1000_82576) { 895 switch (mac->type) {
890 pba = E1000_PBA_34K; 896 case e1000_82576:
891 }
892 else {
893 pba = E1000_PBA_64K; 897 pba = E1000_PBA_64K;
898 break;
899 case e1000_82575:
900 default:
901 pba = E1000_PBA_34K;
902 break;
894 } 903 }
895 904
896 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && 905 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -972,21 +981,6 @@ void igb_reset(struct igb_adapter *adapter)
972 igb_get_phy_info(&adapter->hw); 981 igb_get_phy_info(&adapter->hw);
973} 982}
974 983
975/**
976 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
977 * @pdev: PCI device information struct
978 *
979 * Returns true if an adapter needs ioport resources
980 **/
981static int igb_is_need_ioport(struct pci_dev *pdev)
982{
983 switch (pdev->device) {
984 /* Currently there are no adapters that need ioport resources */
985 default:
986 return false;
987 }
988}
989
990static const struct net_device_ops igb_netdev_ops = { 984static const struct net_device_ops igb_netdev_ops = {
991 .ndo_open = igb_open, 985 .ndo_open = igb_open,
992 .ndo_stop = igb_close, 986 .ndo_stop = igb_close,
@@ -1026,21 +1020,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1026 struct pci_dev *us_dev; 1020 struct pci_dev *us_dev;
1027 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1021 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1028 unsigned long mmio_start, mmio_len; 1022 unsigned long mmio_start, mmio_len;
1029 int i, err, pci_using_dac, pos; 1023 int err, pci_using_dac, pos;
1030 u16 eeprom_data = 0, state = 0; 1024 u16 eeprom_data = 0, state = 0;
1031 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1025 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1032 u32 part_num; 1026 u32 part_num;
1033 int bars, need_ioport;
1034 1027
1035 /* do not allocate ioport bars when not needed */ 1028 err = pci_enable_device_mem(pdev);
1036 need_ioport = igb_is_need_ioport(pdev);
1037 if (need_ioport) {
1038 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1039 err = pci_enable_device(pdev);
1040 } else {
1041 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1042 err = pci_enable_device_mem(pdev);
1043 }
1044 if (err) 1029 if (err)
1045 return err; 1030 return err;
1046 1031
@@ -1083,7 +1068,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1083 break; 1068 break;
1084 } 1069 }
1085 1070
1086 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1071 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1072 IORESOURCE_MEM),
1073 igb_driver_name);
1087 if (err) 1074 if (err)
1088 goto err_pci_reg; 1075 goto err_pci_reg;
1089 1076
@@ -1111,15 +1098,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1111 hw = &adapter->hw; 1098 hw = &adapter->hw;
1112 hw->back = adapter; 1099 hw->back = adapter;
1113 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; 1100 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1114 adapter->bars = bars;
1115 adapter->need_ioport = need_ioport;
1116 1101
1117 mmio_start = pci_resource_start(pdev, 0); 1102 mmio_start = pci_resource_start(pdev, 0);
1118 mmio_len = pci_resource_len(pdev, 0); 1103 mmio_len = pci_resource_len(pdev, 0);
1119 1104
1120 err = -EIO; 1105 err = -EIO;
1121 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 1106 hw->hw_addr = ioremap(mmio_start, mmio_len);
1122 if (!adapter->hw.hw_addr) 1107 if (!hw->hw_addr)
1123 goto err_ioremap; 1108 goto err_ioremap;
1124 1109
1125 netdev->netdev_ops = &igb_netdev_ops; 1110 netdev->netdev_ops = &igb_netdev_ops;
@@ -1147,8 +1132,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1147 /* Initialize skew-specific constants */ 1132 /* Initialize skew-specific constants */
1148 err = ei->get_invariants(hw); 1133 err = ei->get_invariants(hw);
1149 if (err) 1134 if (err)
1150 goto err_hw_init; 1135 goto err_sw_init;
1151 1136
1137 /* setup the private structure */
1152 err = igb_sw_init(adapter); 1138 err = igb_sw_init(adapter);
1153 if (err) 1139 if (err)
1154 goto err_sw_init; 1140 goto err_sw_init;
@@ -1180,27 +1166,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1180 "PHY reset is blocked due to SOL/IDER session.\n"); 1166 "PHY reset is blocked due to SOL/IDER session.\n");
1181 1167
1182 netdev->features = NETIF_F_SG | 1168 netdev->features = NETIF_F_SG |
1183 NETIF_F_HW_CSUM | 1169 NETIF_F_IP_CSUM |
1184 NETIF_F_HW_VLAN_TX | 1170 NETIF_F_HW_VLAN_TX |
1185 NETIF_F_HW_VLAN_RX | 1171 NETIF_F_HW_VLAN_RX |
1186 NETIF_F_HW_VLAN_FILTER; 1172 NETIF_F_HW_VLAN_FILTER;
1187 1173
1174 netdev->features |= NETIF_F_IPV6_CSUM;
1188 netdev->features |= NETIF_F_TSO; 1175 netdev->features |= NETIF_F_TSO;
1189 netdev->features |= NETIF_F_TSO6; 1176 netdev->features |= NETIF_F_TSO6;
1190 1177
1191#ifdef CONFIG_IGB_LRO 1178#ifdef CONFIG_IGB_LRO
1192 netdev->features |= NETIF_F_LRO; 1179 netdev->features |= NETIF_F_GRO;
1193#endif 1180#endif
1194 1181
1195 netdev->vlan_features |= NETIF_F_TSO; 1182 netdev->vlan_features |= NETIF_F_TSO;
1196 netdev->vlan_features |= NETIF_F_TSO6; 1183 netdev->vlan_features |= NETIF_F_TSO6;
1197 netdev->vlan_features |= NETIF_F_HW_CSUM; 1184 netdev->vlan_features |= NETIF_F_IP_CSUM;
1198 netdev->vlan_features |= NETIF_F_SG; 1185 netdev->vlan_features |= NETIF_F_SG;
1199 1186
1200 if (pci_using_dac) 1187 if (pci_using_dac)
1201 netdev->features |= NETIF_F_HIGHDMA; 1188 netdev->features |= NETIF_F_HIGHDMA;
1202 1189
1203 netdev->features |= NETIF_F_LLTX;
1204 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1190 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1205 1191
1206 /* before reading the NVM, reset the controller to put the device in a 1192 /* before reading the NVM, reset the controller to put the device in a
@@ -1238,14 +1224,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1238 INIT_WORK(&adapter->reset_task, igb_reset_task); 1224 INIT_WORK(&adapter->reset_task, igb_reset_task);
1239 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 1225 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1240 1226
1241 /* Initialize link & ring properties that are user-changeable */ 1227 /* Initialize link properties that are user-changeable */
1242 adapter->tx_ring->count = 256;
1243 for (i = 0; i < adapter->num_tx_queues; i++)
1244 adapter->tx_ring[i].count = adapter->tx_ring->count;
1245 adapter->rx_ring->count = 256;
1246 for (i = 0; i < adapter->num_rx_queues; i++)
1247 adapter->rx_ring[i].count = adapter->rx_ring->count;
1248
1249 adapter->fc_autoneg = true; 1228 adapter->fc_autoneg = true;
1250 hw->mac.autoneg = true; 1229 hw->mac.autoneg = true;
1251 hw->phy.autoneg_advertised = 0x2f; 1230 hw->phy.autoneg_advertised = 0x2f;
@@ -1266,8 +1245,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1266 1245
1267 if (hw->bus.func == 0 || 1246 if (hw->bus.func == 0 ||
1268 hw->device_id == E1000_DEV_ID_82575EB_COPPER) 1247 hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1269 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, 1248 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1270 &eeprom_data);
1271 1249
1272 if (eeprom_data & eeprom_apme_mask) 1250 if (eeprom_data & eeprom_apme_mask)
1273 adapter->eeprom_wol |= E1000_WUFC_MAG; 1251 adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -1352,15 +1330,14 @@ err_eeprom:
1352 if (hw->flash_address) 1330 if (hw->flash_address)
1353 iounmap(hw->flash_address); 1331 iounmap(hw->flash_address);
1354 1332
1355 igb_remove_device(hw);
1356 igb_free_queues(adapter); 1333 igb_free_queues(adapter);
1357err_sw_init: 1334err_sw_init:
1358err_hw_init:
1359 iounmap(hw->hw_addr); 1335 iounmap(hw->hw_addr);
1360err_ioremap: 1336err_ioremap:
1361 free_netdev(netdev); 1337 free_netdev(netdev);
1362err_alloc_etherdev: 1338err_alloc_etherdev:
1363 pci_release_selected_regions(pdev, bars); 1339 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1340 IORESOURCE_MEM));
1364err_pci_reg: 1341err_pci_reg:
1365err_dma: 1342err_dma:
1366 pci_disable_device(pdev); 1343 pci_disable_device(pdev);
@@ -1380,9 +1357,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1380{ 1357{
1381 struct net_device *netdev = pci_get_drvdata(pdev); 1358 struct net_device *netdev = pci_get_drvdata(pdev);
1382 struct igb_adapter *adapter = netdev_priv(netdev); 1359 struct igb_adapter *adapter = netdev_priv(netdev);
1383#ifdef CONFIG_IGB_DCA
1384 struct e1000_hw *hw = &adapter->hw; 1360 struct e1000_hw *hw = &adapter->hw;
1385#endif
1386 int err; 1361 int err;
1387 1362
1388 /* flush_scheduled work may reschedule our watchdog task, so 1363 /* flush_scheduled work may reschedule our watchdog task, so
@@ -1411,15 +1386,15 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1411 if (!igb_check_reset_block(&adapter->hw)) 1386 if (!igb_check_reset_block(&adapter->hw))
1412 igb_reset_phy(&adapter->hw); 1387 igb_reset_phy(&adapter->hw);
1413 1388
1414 igb_remove_device(&adapter->hw);
1415 igb_reset_interrupt_capability(adapter); 1389 igb_reset_interrupt_capability(adapter);
1416 1390
1417 igb_free_queues(adapter); 1391 igb_free_queues(adapter);
1418 1392
1419 iounmap(adapter->hw.hw_addr); 1393 iounmap(hw->hw_addr);
1420 if (adapter->hw.flash_address) 1394 if (hw->flash_address)
1421 iounmap(adapter->hw.flash_address); 1395 iounmap(hw->flash_address);
1422 pci_release_selected_regions(pdev, adapter->bars); 1396 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1397 IORESOURCE_MEM));
1423 1398
1424 free_netdev(netdev); 1399 free_netdev(netdev);
1425 1400
@@ -1454,11 +1429,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1454 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1429 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1455 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1430 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1456 1431
1457 /* Number of supported queues. */
1458 /* Having more queues than CPUs doesn't make sense. */
1459 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1460 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
1461
1462 /* This call may decrease the number of queues depending on 1432 /* This call may decrease the number of queues depending on
1463 * interrupt mode. */ 1433 * interrupt mode. */
1464 igb_set_interrupt_capability(adapter); 1434 igb_set_interrupt_capability(adapter);
@@ -1657,7 +1627,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1657 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 1627 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1658 r_idx = i % adapter->num_tx_queues; 1628 r_idx = i % adapter->num_tx_queues;
1659 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 1629 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1660 } 1630 }
1661 return err; 1631 return err;
1662} 1632}
1663 1633
@@ -1738,14 +1708,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1738 struct pci_dev *pdev = adapter->pdev; 1708 struct pci_dev *pdev = adapter->pdev;
1739 int size, desc_len; 1709 int size, desc_len;
1740 1710
1741#ifdef CONFIG_IGB_LRO
1742 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1743 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1744 if (!rx_ring->lro_mgr.lro_arr)
1745 goto err;
1746 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1747#endif
1748
1749 size = sizeof(struct igb_buffer) * rx_ring->count; 1711 size = sizeof(struct igb_buffer) * rx_ring->count;
1750 rx_ring->buffer_info = vmalloc(size); 1712 rx_ring->buffer_info = vmalloc(size);
1751 if (!rx_ring->buffer_info) 1713 if (!rx_ring->buffer_info)
@@ -1772,10 +1734,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1772 return 0; 1734 return 0;
1773 1735
1774err: 1736err:
1775#ifdef CONFIG_IGB_LRO
1776 vfree(rx_ring->lro_mgr.lro_arr);
1777 rx_ring->lro_mgr.lro_arr = NULL;
1778#endif
1779 vfree(rx_ring->buffer_info); 1737 vfree(rx_ring->buffer_info);
1780 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1738 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1781 "the receive descriptor ring\n"); 1739 "the receive descriptor ring\n");
@@ -1824,7 +1782,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1824 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1782 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1825 1783
1826 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 1784 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1827 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1785 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1828 1786
1829 /* 1787 /*
1830 * enable stripping of CRC. It's unlikely this will break BMC 1788 * enable stripping of CRC. It's unlikely this will break BMC
@@ -1929,16 +1887,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1929 rxdctl |= IGB_RX_HTHRESH << 8; 1887 rxdctl |= IGB_RX_HTHRESH << 8;
1930 rxdctl |= IGB_RX_WTHRESH << 16; 1888 rxdctl |= IGB_RX_WTHRESH << 16;
1931 wr32(E1000_RXDCTL(j), rxdctl); 1889 wr32(E1000_RXDCTL(j), rxdctl);
1932#ifdef CONFIG_IGB_LRO
1933 /* Intitial LRO Settings */
1934 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1935 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1936 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1937 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1938 ring->lro_mgr.dev = adapter->netdev;
1939 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1940 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1941#endif
1942 } 1890 }
1943 1891
1944 if (adapter->num_rx_queues > 1) { 1892 if (adapter->num_rx_queues > 1) {
@@ -2127,11 +2075,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2127 vfree(rx_ring->buffer_info); 2075 vfree(rx_ring->buffer_info);
2128 rx_ring->buffer_info = NULL; 2076 rx_ring->buffer_info = NULL;
2129 2077
2130#ifdef CONFIG_IGB_LRO
2131 vfree(rx_ring->lro_mgr.lro_arr);
2132 rx_ring->lro_mgr.lro_arr = NULL;
2133#endif
2134
2135 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2078 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2136 2079
2137 rx_ring->desc = NULL; 2080 rx_ring->desc = NULL;
@@ -2231,15 +2174,16 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2231static int igb_set_mac(struct net_device *netdev, void *p) 2174static int igb_set_mac(struct net_device *netdev, void *p)
2232{ 2175{
2233 struct igb_adapter *adapter = netdev_priv(netdev); 2176 struct igb_adapter *adapter = netdev_priv(netdev);
2177 struct e1000_hw *hw = &adapter->hw;
2234 struct sockaddr *addr = p; 2178 struct sockaddr *addr = p;
2235 2179
2236 if (!is_valid_ether_addr(addr->sa_data)) 2180 if (!is_valid_ether_addr(addr->sa_data))
2237 return -EADDRNOTAVAIL; 2181 return -EADDRNOTAVAIL;
2238 2182
2239 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2183 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2240 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 2184 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2241 2185
2242 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2186 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2243 2187
2244 return 0; 2188 return 0;
2245} 2189}
@@ -2282,8 +2226,8 @@ static void igb_set_multi(struct net_device *netdev)
2282 2226
2283 if (!netdev->mc_count) { 2227 if (!netdev->mc_count) {
2284 /* nothing to program, so clear mc list */ 2228 /* nothing to program, so clear mc list */
2285 igb_update_mc_addr_list_82575(hw, NULL, 0, 1, 2229 igb_update_mc_addr_list(hw, NULL, 0, 1,
2286 mac->rar_entry_count); 2230 mac->rar_entry_count);
2287 return; 2231 return;
2288 } 2232 }
2289 2233
@@ -2300,8 +2244,7 @@ static void igb_set_multi(struct net_device *netdev)
2300 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2244 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2301 mc_ptr = mc_ptr->next; 2245 mc_ptr = mc_ptr->next;
2302 } 2246 }
2303 igb_update_mc_addr_list_82575(hw, mta_list, i, 1, 2247 igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
2304 mac->rar_entry_count);
2305 kfree(mta_list); 2248 kfree(mta_list);
2306} 2249}
2307 2250
@@ -2314,6 +2257,46 @@ static void igb_update_phy_info(unsigned long data)
2314} 2257}
2315 2258
2316/** 2259/**
2260 * igb_has_link - check shared code for link and determine up/down
2261 * @adapter: pointer to driver private info
2262 **/
2263static bool igb_has_link(struct igb_adapter *adapter)
2264{
2265 struct e1000_hw *hw = &adapter->hw;
2266 bool link_active = false;
2267 s32 ret_val = 0;
2268
2269 /* get_link_status is set on LSC (link status) interrupt or
2270 * rx sequence error interrupt. get_link_status will stay
2271 * false until the e1000_check_for_link establishes link
2272 * for copper adapters ONLY
2273 */
2274 switch (hw->phy.media_type) {
2275 case e1000_media_type_copper:
2276 if (hw->mac.get_link_status) {
2277 ret_val = hw->mac.ops.check_for_link(hw);
2278 link_active = !hw->mac.get_link_status;
2279 } else {
2280 link_active = true;
2281 }
2282 break;
2283 case e1000_media_type_fiber:
2284 ret_val = hw->mac.ops.check_for_link(hw);
2285 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2286 break;
2287 case e1000_media_type_internal_serdes:
2288 ret_val = hw->mac.ops.check_for_link(hw);
2289 link_active = hw->mac.serdes_has_link;
2290 break;
2291 default:
2292 case e1000_media_type_unknown:
2293 break;
2294 }
2295
2296 return link_active;
2297}
2298
2299/**
2317 * igb_watchdog - Timer Call-back 2300 * igb_watchdog - Timer Call-back
2318 * @data: pointer to adapter cast into an unsigned long 2301 * @data: pointer to adapter cast into an unsigned long
2319 **/ 2302 **/
@@ -2329,34 +2312,16 @@ static void igb_watchdog_task(struct work_struct *work)
2329 struct igb_adapter *adapter = container_of(work, 2312 struct igb_adapter *adapter = container_of(work,
2330 struct igb_adapter, watchdog_task); 2313 struct igb_adapter, watchdog_task);
2331 struct e1000_hw *hw = &adapter->hw; 2314 struct e1000_hw *hw = &adapter->hw;
2332
2333 struct net_device *netdev = adapter->netdev; 2315 struct net_device *netdev = adapter->netdev;
2334 struct igb_ring *tx_ring = adapter->tx_ring; 2316 struct igb_ring *tx_ring = adapter->tx_ring;
2335 struct e1000_mac_info *mac = &adapter->hw.mac;
2336 u32 link; 2317 u32 link;
2337 u32 eics = 0; 2318 u32 eics = 0;
2338 s32 ret_val;
2339 int i; 2319 int i;
2340 2320
2341 if ((netif_carrier_ok(netdev)) && 2321 link = igb_has_link(adapter);
2342 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2322 if ((netif_carrier_ok(netdev)) && link)
2343 goto link_up; 2323 goto link_up;
2344 2324
2345 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2346 if ((ret_val == E1000_ERR_PHY) &&
2347 (hw->phy.type == e1000_phy_igp_3) &&
2348 (rd32(E1000_CTRL) &
2349 E1000_PHY_CTRL_GBE_DISABLE))
2350 dev_info(&adapter->pdev->dev,
2351 "Gigabit has been disabled, downgrading speed\n");
2352
2353 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2354 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2355 link = mac->serdes_has_link;
2356 else
2357 link = rd32(E1000_STATUS) &
2358 E1000_STATUS_LU;
2359
2360 if (link) { 2325 if (link) {
2361 if (!netif_carrier_ok(netdev)) { 2326 if (!netif_carrier_ok(netdev)) {
2362 u32 ctrl; 2327 u32 ctrl;
@@ -2395,6 +2360,7 @@ static void igb_watchdog_task(struct work_struct *work)
2395 netif_carrier_on(netdev); 2360 netif_carrier_on(netdev);
2396 netif_tx_wake_all_queues(netdev); 2361 netif_tx_wake_all_queues(netdev);
2397 2362
2363 /* link state has changed, schedule phy info update */
2398 if (!test_bit(__IGB_DOWN, &adapter->state)) 2364 if (!test_bit(__IGB_DOWN, &adapter->state))
2399 mod_timer(&adapter->phy_info_timer, 2365 mod_timer(&adapter->phy_info_timer,
2400 round_jiffies(jiffies + 2 * HZ)); 2366 round_jiffies(jiffies + 2 * HZ));
@@ -2408,6 +2374,8 @@ static void igb_watchdog_task(struct work_struct *work)
2408 netdev->name); 2374 netdev->name);
2409 netif_carrier_off(netdev); 2375 netif_carrier_off(netdev);
2410 netif_tx_stop_all_queues(netdev); 2376 netif_tx_stop_all_queues(netdev);
2377
2378 /* link state has changed, schedule phy info update */
2411 if (!test_bit(__IGB_DOWN, &adapter->state)) 2379 if (!test_bit(__IGB_DOWN, &adapter->state))
2412 mod_timer(&adapter->phy_info_timer, 2380 mod_timer(&adapter->phy_info_timer,
2413 round_jiffies(jiffies + 2 * HZ)); 2381 round_jiffies(jiffies + 2 * HZ));
@@ -2417,9 +2385,9 @@ static void igb_watchdog_task(struct work_struct *work)
2417link_up: 2385link_up:
2418 igb_update_stats(adapter); 2386 igb_update_stats(adapter);
2419 2387
2420 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2388 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2421 adapter->tpt_old = adapter->stats.tpt; 2389 adapter->tpt_old = adapter->stats.tpt;
2422 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 2390 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2423 adapter->colc_old = adapter->stats.colc; 2391 adapter->colc_old = adapter->stats.colc;
2424 2392
2425 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 2393 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
@@ -2779,12 +2747,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2779 2747
2780 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2748 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2781 switch (skb->protocol) { 2749 switch (skb->protocol) {
2782 case __constant_htons(ETH_P_IP): 2750 case cpu_to_be16(ETH_P_IP):
2783 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2751 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2784 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2752 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2785 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2753 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2786 break; 2754 break;
2787 case __constant_htons(ETH_P_IPV6): 2755 case cpu_to_be16(ETH_P_IPV6):
2788 /* XXX what about other V6 headers?? */ 2756 /* XXX what about other V6 headers?? */
2789 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2757 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2790 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2758 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -2803,6 +2771,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2803 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 2771 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2804 context_desc->mss_l4len_idx = 2772 context_desc->mss_l4len_idx =
2805 cpu_to_le32(tx_ring->queue_index << 4); 2773 cpu_to_le32(tx_ring->queue_index << 4);
2774 else
2775 context_desc->mss_l4len_idx = 0;
2806 2776
2807 buffer_info->time_stamp = jiffies; 2777 buffer_info->time_stamp = jiffies;
2808 buffer_info->next_to_watch = i; 2778 buffer_info->next_to_watch = i;
@@ -2981,12 +2951,9 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2981 struct igb_adapter *adapter = netdev_priv(netdev); 2951 struct igb_adapter *adapter = netdev_priv(netdev);
2982 unsigned int first; 2952 unsigned int first;
2983 unsigned int tx_flags = 0; 2953 unsigned int tx_flags = 0;
2984 unsigned int len;
2985 u8 hdr_len = 0; 2954 u8 hdr_len = 0;
2986 int tso = 0; 2955 int tso = 0;
2987 2956
2988 len = skb_headlen(skb);
2989
2990 if (test_bit(__IGB_DOWN, &adapter->state)) { 2957 if (test_bit(__IGB_DOWN, &adapter->state)) {
2991 dev_kfree_skb_any(skb); 2958 dev_kfree_skb_any(skb);
2992 return NETDEV_TX_OK; 2959 return NETDEV_TX_OK;
@@ -3072,8 +3039,8 @@ static void igb_tx_timeout(struct net_device *netdev)
3072 /* Do the reset outside of interrupt context */ 3039 /* Do the reset outside of interrupt context */
3073 adapter->tx_timeout_count++; 3040 adapter->tx_timeout_count++;
3074 schedule_work(&adapter->reset_task); 3041 schedule_work(&adapter->reset_task);
3075 wr32(E1000_EICS, adapter->eims_enable_mask & 3042 wr32(E1000_EICS,
3076 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER)); 3043 (adapter->eims_enable_mask & ~adapter->eims_other));
3077} 3044}
3078 3045
3079static void igb_reset_task(struct work_struct *work) 3046static void igb_reset_task(struct work_struct *work)
@@ -3317,15 +3284,20 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3317 u32 icr = rd32(E1000_ICR); 3284 u32 icr = rd32(E1000_ICR);
3318 3285
3319 /* reading ICR causes bit 31 of EICR to be cleared */ 3286 /* reading ICR causes bit 31 of EICR to be cleared */
3287
3288 if(icr & E1000_ICR_DOUTSYNC) {
3289 /* HW is reporting DMA is out of sync */
3290 adapter->stats.doosync++;
3291 }
3320 if (!(icr & E1000_ICR_LSC)) 3292 if (!(icr & E1000_ICR_LSC))
3321 goto no_link_interrupt; 3293 goto no_link_interrupt;
3322 hw->mac.get_link_status = 1; 3294 hw->mac.get_link_status = 1;
3323 /* guard against interrupt when we're going down */ 3295 /* guard against interrupt when we're going down */
3324 if (!test_bit(__IGB_DOWN, &adapter->state)) 3296 if (!test_bit(__IGB_DOWN, &adapter->state))
3325 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3297 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3326 3298
3327no_link_interrupt: 3299no_link_interrupt:
3328 wr32(E1000_IMS, E1000_IMS_LSC); 3300 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3329 wr32(E1000_EIMS, adapter->eims_other); 3301 wr32(E1000_EIMS, adapter->eims_other);
3330 3302
3331 return IRQ_HANDLED; 3303 return IRQ_HANDLED;
@@ -3385,8 +3357,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3385 3357
3386 igb_write_itr(rx_ring); 3358 igb_write_itr(rx_ring);
3387 3359
3388 if (netif_rx_schedule_prep(&rx_ring->napi)) 3360 if (napi_schedule_prep(&rx_ring->napi))
3389 __netif_rx_schedule(&rx_ring->napi); 3361 __napi_schedule(&rx_ring->napi);
3390 3362
3391#ifdef CONFIG_IGB_DCA 3363#ifdef CONFIG_IGB_DCA
3392 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 3364 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3529,19 +3501,24 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3529 3501
3530 igb_write_itr(adapter->rx_ring); 3502 igb_write_itr(adapter->rx_ring);
3531 3503
3504 if(icr & E1000_ICR_DOUTSYNC) {
3505 /* HW is reporting DMA is out of sync */
3506 adapter->stats.doosync++;
3507 }
3508
3532 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3509 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3533 hw->mac.get_link_status = 1; 3510 hw->mac.get_link_status = 1;
3534 if (!test_bit(__IGB_DOWN, &adapter->state)) 3511 if (!test_bit(__IGB_DOWN, &adapter->state))
3535 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3512 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3536 } 3513 }
3537 3514
3538 netif_rx_schedule(&adapter->rx_ring[0].napi); 3515 napi_schedule(&adapter->rx_ring[0].napi);
3539 3516
3540 return IRQ_HANDLED; 3517 return IRQ_HANDLED;
3541} 3518}
3542 3519
3543/** 3520/**
3544 * igb_intr - Interrupt Handler 3521 * igb_intr - Legacy Interrupt Handler
3545 * @irq: interrupt number 3522 * @irq: interrupt number
3546 * @data: pointer to a network interface device structure 3523 * @data: pointer to a network interface device structure
3547 **/ 3524 **/
@@ -3553,7 +3530,6 @@ static irqreturn_t igb_intr(int irq, void *data)
3553 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 3530 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3554 * need for the IMC write */ 3531 * need for the IMC write */
3555 u32 icr = rd32(E1000_ICR); 3532 u32 icr = rd32(E1000_ICR);
3556 u32 eicr = 0;
3557 if (!icr) 3533 if (!icr)
3558 return IRQ_NONE; /* Not our interrupt */ 3534 return IRQ_NONE; /* Not our interrupt */
3559 3535
@@ -3564,7 +3540,10 @@ static irqreturn_t igb_intr(int irq, void *data)
3564 if (!(icr & E1000_ICR_INT_ASSERTED)) 3540 if (!(icr & E1000_ICR_INT_ASSERTED))
3565 return IRQ_NONE; 3541 return IRQ_NONE;
3566 3542
3567 eicr = rd32(E1000_EICR); 3543 if(icr & E1000_ICR_DOUTSYNC) {
3544 /* HW is reporting DMA is out of sync */
3545 adapter->stats.doosync++;
3546 }
3568 3547
3569 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3548 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3570 hw->mac.get_link_status = 1; 3549 hw->mac.get_link_status = 1;
@@ -3573,7 +3552,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3573 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3552 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3574 } 3553 }
3575 3554
3576 netif_rx_schedule(&adapter->rx_ring[0].napi); 3555 napi_schedule(&adapter->rx_ring[0].napi);
3577 3556
3578 return IRQ_HANDLED; 3557 return IRQ_HANDLED;
3579} 3558}
@@ -3608,7 +3587,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3608 !netif_running(netdev)) { 3587 !netif_running(netdev)) {
3609 if (adapter->itr_setting & 3) 3588 if (adapter->itr_setting & 3)
3610 igb_set_itr(adapter); 3589 igb_set_itr(adapter);
3611 netif_rx_complete(napi); 3590 napi_complete(napi);
3612 if (!test_bit(__IGB_DOWN, &adapter->state)) 3591 if (!test_bit(__IGB_DOWN, &adapter->state))
3613 igb_irq_enable(adapter); 3592 igb_irq_enable(adapter);
3614 return 0; 3593 return 0;
@@ -3634,7 +3613,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3634 3613
3635 /* If not enough Rx work done, exit the polling mode */ 3614 /* If not enough Rx work done, exit the polling mode */
3636 if ((work_done == 0) || !netif_running(netdev)) { 3615 if ((work_done == 0) || !netif_running(netdev)) {
3637 netif_rx_complete(napi); 3616 napi_complete(napi);
3638 3617
3639 if (adapter->itr_setting & 3) { 3618 if (adapter->itr_setting & 3) {
3640 if (adapter->num_rx_queues == 1) 3619 if (adapter->num_rx_queues == 1)
@@ -3764,42 +3743,9 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3764 return (count < tx_ring->count); 3743 return (count < tx_ring->count);
3765} 3744}
3766 3745
3767#ifdef CONFIG_IGB_LRO
3768 /**
3769 * igb_get_skb_hdr - helper function for LRO header processing
3770 * @skb: pointer to sk_buff to be added to LRO packet
3771 * @iphdr: pointer to ip header structure
3772 * @tcph: pointer to tcp header structure
3773 * @hdr_flags: pointer to header flags
3774 * @priv: pointer to the receive descriptor for the current sk_buff
3775 **/
3776static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3777 u64 *hdr_flags, void *priv)
3778{
3779 union e1000_adv_rx_desc *rx_desc = priv;
3780 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3781 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3782
3783 /* Verify that this is a valid IPv4 TCP packet */
3784 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3785 E1000_RXDADV_PKTTYPE_TCP))
3786 return -1;
3787
3788 /* Set network headers */
3789 skb_reset_network_header(skb);
3790 skb_set_transport_header(skb, ip_hdrlen(skb));
3791 *iphdr = ip_hdr(skb);
3792 *tcph = tcp_hdr(skb);
3793 *hdr_flags = LRO_IPV4 | LRO_TCP;
3794
3795 return 0;
3796
3797}
3798#endif /* CONFIG_IGB_LRO */
3799
3800/** 3746/**
3801 * igb_receive_skb - helper function to handle rx indications 3747 * igb_receive_skb - helper function to handle rx indications
3802 * @ring: pointer to receive ring receving this packet 3748 * @ring: pointer to receive ring receving this packet
3803 * @status: descriptor status field as written by hardware 3749 * @status: descriptor status field as written by hardware
3804 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3750 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3805 * @skb: pointer to sk_buff to be indicated to stack 3751 * @skb: pointer to sk_buff to be indicated to stack
@@ -3811,28 +3757,21 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3811 struct igb_adapter * adapter = ring->adapter; 3757 struct igb_adapter * adapter = ring->adapter;
3812 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 3758 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3813 3759
3814#ifdef CONFIG_IGB_LRO 3760 skb_record_rx_queue(skb, ring->queue_index);
3815 if (adapter->netdev->features & NETIF_F_LRO && 3761 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3816 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3817 if (vlan_extracted) 3762 if (vlan_extracted)
3818 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 3763 vlan_gro_receive(&ring->napi, adapter->vlgrp,
3819 adapter->vlgrp, 3764 le16_to_cpu(rx_desc->wb.upper.vlan),
3820 le16_to_cpu(rx_desc->wb.upper.vlan), 3765 skb);
3821 rx_desc);
3822 else 3766 else
3823 lro_receive_skb(&ring->lro_mgr,skb, rx_desc); 3767 napi_gro_receive(&ring->napi, skb);
3824 ring->lro_used = 1;
3825 } else { 3768 } else {
3826#endif
3827 if (vlan_extracted) 3769 if (vlan_extracted)
3828 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3770 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3829 le16_to_cpu(rx_desc->wb.upper.vlan)); 3771 le16_to_cpu(rx_desc->wb.upper.vlan));
3830 else 3772 else
3831
3832 netif_receive_skb(skb); 3773 netif_receive_skb(skb);
3833#ifdef CONFIG_IGB_LRO
3834 } 3774 }
3835#endif
3836} 3775}
3837 3776
3838 3777
@@ -3874,6 +3813,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3874 unsigned int total_bytes = 0, total_packets = 0; 3813 unsigned int total_bytes = 0, total_packets = 0;
3875 3814
3876 i = rx_ring->next_to_clean; 3815 i = rx_ring->next_to_clean;
3816 buffer_info = &rx_ring->buffer_info[i];
3877 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3878 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3879 3819
@@ -3881,25 +3821,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3881 if (*work_done >= budget) 3821 if (*work_done >= budget)
3882 break; 3822 break;
3883 (*work_done)++; 3823 (*work_done)++;
3884 buffer_info = &rx_ring->buffer_info[i];
3885 3824
3886 /* HW will not DMA in data larger than the given buffer, even 3825 skb = buffer_info->skb;
3887 * if it parses the (NFS, of course) header to be larger. In 3826 prefetch(skb->data - NET_IP_ALIGN);
3888 * that case, it fills the header buffer and spills the rest 3827 buffer_info->skb = NULL;
3889 * into the page. 3828
3890 */ 3829 i++;
3891 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 3830 if (i == rx_ring->count)
3892 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 3831 i = 0;
3893 if (hlen > adapter->rx_ps_hdr_size) 3832 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3894 hlen = adapter->rx_ps_hdr_size; 3833 prefetch(next_rxd);
3834 next_buffer = &rx_ring->buffer_info[i];
3895 3835
3896 length = le16_to_cpu(rx_desc->wb.upper.length); 3836 length = le16_to_cpu(rx_desc->wb.upper.length);
3897 cleaned = true; 3837 cleaned = true;
3898 cleaned_count++; 3838 cleaned_count++;
3899 3839
3900 skb = buffer_info->skb;
3901 prefetch(skb->data - NET_IP_ALIGN);
3902 buffer_info->skb = NULL;
3903 if (!adapter->rx_ps_hdr_size) { 3840 if (!adapter->rx_ps_hdr_size) {
3904 pci_unmap_single(pdev, buffer_info->dma, 3841 pci_unmap_single(pdev, buffer_info->dma,
3905 adapter->rx_buffer_len + 3842 adapter->rx_buffer_len +
@@ -3909,6 +3846,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3909 goto send_up; 3846 goto send_up;
3910 } 3847 }
3911 3848
3849 /* HW will not DMA in data larger than the given buffer, even
3850 * if it parses the (NFS, of course) header to be larger. In
3851 * that case, it fills the header buffer and spills the rest
3852 * into the page.
3853 */
3854 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3855 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
3856 if (hlen > adapter->rx_ps_hdr_size)
3857 hlen = adapter->rx_ps_hdr_size;
3858
3912 if (!skb_shinfo(skb)->nr_frags) { 3859 if (!skb_shinfo(skb)->nr_frags) {
3913 pci_unmap_single(pdev, buffer_info->dma, 3860 pci_unmap_single(pdev, buffer_info->dma,
3914 adapter->rx_ps_hdr_size + 3861 adapter->rx_ps_hdr_size +
@@ -3938,13 +3885,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3938 3885
3939 skb->truesize += length; 3886 skb->truesize += length;
3940 } 3887 }
3941send_up:
3942 i++;
3943 if (i == rx_ring->count)
3944 i = 0;
3945 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3946 prefetch(next_rxd);
3947 next_buffer = &rx_ring->buffer_info[i];
3948 3888
3949 if (!(staterr & E1000_RXD_STAT_EOP)) { 3889 if (!(staterr & E1000_RXD_STAT_EOP)) {
3950 buffer_info->skb = next_buffer->skb; 3890 buffer_info->skb = next_buffer->skb;
@@ -3953,7 +3893,7 @@ send_up:
3953 next_buffer->dma = 0; 3893 next_buffer->dma = 0;
3954 goto next_desc; 3894 goto next_desc;
3955 } 3895 }
3956 3896send_up:
3957 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 3897 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3958 dev_kfree_skb_irq(skb); 3898 dev_kfree_skb_irq(skb);
3959 goto next_desc; 3899 goto next_desc;
@@ -3980,20 +3920,12 @@ next_desc:
3980 /* use prefetched values */ 3920 /* use prefetched values */
3981 rx_desc = next_rxd; 3921 rx_desc = next_rxd;
3982 buffer_info = next_buffer; 3922 buffer_info = next_buffer;
3983
3984 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 3923 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3985 } 3924 }
3986 3925
3987 rx_ring->next_to_clean = i; 3926 rx_ring->next_to_clean = i;
3988 cleaned_count = IGB_DESC_UNUSED(rx_ring); 3927 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3989 3928
3990#ifdef CONFIG_IGB_LRO
3991 if (rx_ring->lro_used) {
3992 lro_flush_all(&rx_ring->lro_mgr);
3993 rx_ring->lro_used = 0;
3994 }
3995#endif
3996
3997 if (cleaned_count) 3929 if (cleaned_count)
3998 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 3930 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3999 3931
@@ -4021,10 +3953,17 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4021 struct igb_buffer *buffer_info; 3953 struct igb_buffer *buffer_info;
4022 struct sk_buff *skb; 3954 struct sk_buff *skb;
4023 unsigned int i; 3955 unsigned int i;
3956 int bufsz;
4024 3957
4025 i = rx_ring->next_to_use; 3958 i = rx_ring->next_to_use;
4026 buffer_info = &rx_ring->buffer_info[i]; 3959 buffer_info = &rx_ring->buffer_info[i];
4027 3960
3961 if (adapter->rx_ps_hdr_size)
3962 bufsz = adapter->rx_ps_hdr_size;
3963 else
3964 bufsz = adapter->rx_buffer_len;
3965 bufsz += NET_IP_ALIGN;
3966
4028 while (cleaned_count--) { 3967 while (cleaned_count--) {
4029 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 3968 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4030 3969
@@ -4040,23 +3979,14 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4040 buffer_info->page_offset ^= PAGE_SIZE / 2; 3979 buffer_info->page_offset ^= PAGE_SIZE / 2;
4041 } 3980 }
4042 buffer_info->page_dma = 3981 buffer_info->page_dma =
4043 pci_map_page(pdev, 3982 pci_map_page(pdev, buffer_info->page,
4044 buffer_info->page,
4045 buffer_info->page_offset, 3983 buffer_info->page_offset,
4046 PAGE_SIZE / 2, 3984 PAGE_SIZE / 2,
4047 PCI_DMA_FROMDEVICE); 3985 PCI_DMA_FROMDEVICE);
4048 } 3986 }
4049 3987
4050 if (!buffer_info->skb) { 3988 if (!buffer_info->skb) {
4051 int bufsz;
4052
4053 if (adapter->rx_ps_hdr_size)
4054 bufsz = adapter->rx_ps_hdr_size;
4055 else
4056 bufsz = adapter->rx_buffer_len;
4057 bufsz += NET_IP_ALIGN;
4058 skb = netdev_alloc_skb(netdev, bufsz); 3989 skb = netdev_alloc_skb(netdev, bufsz);
4059
4060 if (!skb) { 3990 if (!skb) {
4061 adapter->alloc_rx_buff_failed++; 3991 adapter->alloc_rx_buff_failed++;
4062 goto no_buffers; 3992 goto no_buffers;
@@ -4072,7 +4002,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4072 buffer_info->dma = pci_map_single(pdev, skb->data, 4002 buffer_info->dma = pci_map_single(pdev, skb->data,
4073 bufsz, 4003 bufsz,
4074 PCI_DMA_FROMDEVICE); 4004 PCI_DMA_FROMDEVICE);
4075
4076 } 4005 }
4077 /* Refresh the desc even if buffer_addrs didn't change because 4006 /* Refresh the desc even if buffer_addrs didn't change because
4078 * each write-back erases this info. */ 4007 * each write-back erases this info. */
@@ -4206,7 +4135,7 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4206 struct e1000_hw *hw = &adapter->hw; 4135 struct e1000_hw *hw = &adapter->hw;
4207 u32 vfta, index; 4136 u32 vfta, index;
4208 4137
4209 if ((adapter->hw.mng_cookie.status & 4138 if ((hw->mng_cookie.status &
4210 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 4139 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4211 (vid == adapter->mng_vlan_id)) 4140 (vid == adapter->mng_vlan_id))
4212 return; 4141 return;
@@ -4390,10 +4319,7 @@ static int igb_resume(struct pci_dev *pdev)
4390 pci_set_power_state(pdev, PCI_D0); 4319 pci_set_power_state(pdev, PCI_D0);
4391 pci_restore_state(pdev); 4320 pci_restore_state(pdev);
4392 4321
4393 if (adapter->need_ioport) 4322 err = pci_enable_device_mem(pdev);
4394 err = pci_enable_device(pdev);
4395 else
4396 err = pci_enable_device_mem(pdev);
4397 if (err) { 4323 if (err) {
4398 dev_err(&pdev->dev, 4324 dev_err(&pdev->dev,
4399 "igb: Cannot enable PCI device from suspend\n"); 4325 "igb: Cannot enable PCI device from suspend\n");
@@ -4414,6 +4340,11 @@ static int igb_resume(struct pci_dev *pdev)
4414 /* e1000_power_up_phy(adapter); */ 4340 /* e1000_power_up_phy(adapter); */
4415 4341
4416 igb_reset(adapter); 4342 igb_reset(adapter);
4343
4344 /* let the f/w know that the h/w is now under the control of the
4345 * driver. */
4346 igb_get_hw_control(adapter);
4347
4417 wr32(E1000_WUS, ~0); 4348 wr32(E1000_WUS, ~0);
4418 4349
4419 if (netif_running(netdev)) { 4350 if (netif_running(netdev)) {
@@ -4424,10 +4355,6 @@ static int igb_resume(struct pci_dev *pdev)
4424 4355
4425 netif_device_attach(netdev); 4356 netif_device_attach(netdev);
4426 4357
4427 /* let the f/w know that the h/w is now under the control of the
4428 * driver. */
4429 igb_get_hw_control(adapter);
4430
4431 return 0; 4358 return 0;
4432} 4359}
4433#endif 4360#endif
@@ -4446,22 +4373,27 @@ static void igb_shutdown(struct pci_dev *pdev)
4446static void igb_netpoll(struct net_device *netdev) 4373static void igb_netpoll(struct net_device *netdev)
4447{ 4374{
4448 struct igb_adapter *adapter = netdev_priv(netdev); 4375 struct igb_adapter *adapter = netdev_priv(netdev);
4376 struct e1000_hw *hw = &adapter->hw;
4449 int i; 4377 int i;
4450 int work_done = 0;
4451 4378
4452 igb_irq_disable(adapter); 4379 if (!adapter->msix_entries) {
4453 adapter->flags |= IGB_FLAG_IN_NETPOLL; 4380 igb_irq_disable(adapter);
4454 4381 napi_schedule(&adapter->rx_ring[0].napi);
4455 for (i = 0; i < adapter->num_tx_queues; i++) 4382 return;
4456 igb_clean_tx_irq(&adapter->tx_ring[i]); 4383 }
4457 4384
4458 for (i = 0; i < adapter->num_rx_queues; i++) 4385 for (i = 0; i < adapter->num_tx_queues; i++) {
4459 igb_clean_rx_irq_adv(&adapter->rx_ring[i], 4386 struct igb_ring *tx_ring = &adapter->tx_ring[i];
4460 &work_done, 4387 wr32(E1000_EIMC, tx_ring->eims_value);
4461 adapter->rx_ring[i].napi.weight); 4388 igb_clean_tx_irq(tx_ring);
4389 wr32(E1000_EIMS, tx_ring->eims_value);
4390 }
4462 4391
4463 adapter->flags &= ~IGB_FLAG_IN_NETPOLL; 4392 for (i = 0; i < adapter->num_rx_queues; i++) {
4464 igb_irq_enable(adapter); 4393 struct igb_ring *rx_ring = &adapter->rx_ring[i];
4394 wr32(E1000_EIMC, rx_ring->eims_value);
4395 napi_schedule(&rx_ring->napi);
4396 }
4465} 4397}
4466#endif /* CONFIG_NET_POLL_CONTROLLER */ 4398#endif /* CONFIG_NET_POLL_CONTROLLER */
4467 4399
@@ -4504,12 +4436,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4504 pci_ers_result_t result; 4436 pci_ers_result_t result;
4505 int err; 4437 int err;
4506 4438
4507 if (adapter->need_ioport) 4439 if (pci_enable_device_mem(pdev)) {
4508 err = pci_enable_device(pdev);
4509 else
4510 err = pci_enable_device_mem(pdev);
4511
4512 if (err) {
4513 dev_err(&pdev->dev, 4440 dev_err(&pdev->dev,
4514 "Cannot re-enable PCI device after reset.\n"); 4441 "Cannot re-enable PCI device after reset.\n");
4515 result = PCI_ERS_RESULT_DISCONNECT; 4442 result = PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 5b5862499de..c23d211758a 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -753,7 +753,8 @@ static int sirdev_alloc_buffers(struct sir_dev *dev)
753 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; 753 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
754 754
755 /* Bootstrap ZeroCopy Rx */ 755 /* Bootstrap ZeroCopy Rx */
756 dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL); 756 dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
757 GFP_KERNEL);
757 if (dev->rx_buff.skb == NULL) 758 if (dev->rx_buff.skb == NULL)
758 return -ENOMEM; 759 return -ENOMEM;
759 skb_reserve(dev->rx_buff.skb, 1); 760 skb_reserve(dev->rx_buff.skb, 1);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index eee28d39568..e2ef16b2970 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data)
1721 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1721 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1722 mod_timer(&adapter->watchdog_timer, jiffies); 1722 mod_timer(&adapter->watchdog_timer, jiffies);
1723 1723
1724 if (netif_rx_schedule_prep(&adapter->napi)) { 1724 if (napi_schedule_prep(&adapter->napi)) {
1725 1725
1726 /* Disable interrupts and register for poll. The flush 1726 /* Disable interrupts and register for poll. The flush
1727 of the posted write is intentionally left out. 1727 of the posted write is intentionally left out.
1728 */ 1728 */
1729 1729
1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1731 __netif_rx_schedule(&adapter->napi); 1731 __napi_schedule(&adapter->napi);
1732 } 1732 }
1733 return IRQ_HANDLED; 1733 return IRQ_HANDLED;
1734} 1734}
@@ -1749,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
1749 1749
1750 /* If budget not fully consumed, exit the polling mode */ 1750 /* If budget not fully consumed, exit the polling mode */
1751 if (work_done < budget) { 1751 if (work_done < budget) {
1752 netif_rx_complete(napi); 1752 napi_complete(napi);
1753 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1753 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1754 ixgb_irq_enable(adapter); 1754 ixgb_irq_enable(adapter);
1755 } 1755 }
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 6e7ef765bcd..f6061950f5d 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 10 Gigabit PCI Express Linux driver 3# Intel 10 Gigabit PCI Express Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation. 4# Copyright(c) 1999 - 2009 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e112008f39c..e98ace8c578 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/inet_lro.h>
35#include <linux/aer.h> 34#include <linux/aer.h>
36 35
37#include "ixgbe_type.h" 36#include "ixgbe_type.h"
@@ -88,9 +87,6 @@
88#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 87#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
89#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 88#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
90 89
91#define IXGBE_MAX_LRO_DESCRIPTORS 8
92#define IXGBE_MAX_LRO_AGGREGATE 32
93
94/* wrapper around a pointer to a socket buffer, 90/* wrapper around a pointer to a socket buffer,
95 * so a DMA handle can be stored along with the buffer */ 91 * so a DMA handle can be stored along with the buffer */
96struct ixgbe_tx_buffer { 92struct ixgbe_tx_buffer {
@@ -142,8 +138,6 @@ struct ixgbe_ring {
142 /* cpu for tx queue */ 138 /* cpu for tx queue */
143 int cpu; 139 int cpu;
144#endif 140#endif
145 struct net_lro_mgr lro_mgr;
146 bool lro_used;
147 struct ixgbe_queue_stats stats; 141 struct ixgbe_queue_stats stats;
148 u16 v_idx; /* maps directly to the index for this ring in the hardware 142 u16 v_idx; /* maps directly to the index for this ring in the hardware
149 * vector array, can also be used for finding the bit in EICR 143 * vector array, can also be used for finding the bit in EICR
@@ -210,9 +204,13 @@ struct ixgbe_q_vector {
210#define OTHER_VECTOR 1 204#define OTHER_VECTOR 1
211#define NON_Q_VECTORS (OTHER_VECTOR) 205#define NON_Q_VECTORS (OTHER_VECTOR)
212 206
213#define MAX_MSIX_Q_VECTORS 16 207#define MAX_MSIX_VECTORS_82598 18
208#define MAX_MSIX_Q_VECTORS_82598 16
209
210#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82598
211#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82598
212
214#define MIN_MSIX_Q_VECTORS 2 213#define MIN_MSIX_Q_VECTORS 2
215#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
216#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 214#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
217 215
218/* board specific private data structure */ 216/* board specific private data structure */
@@ -250,6 +248,7 @@ struct ixgbe_adapter {
250 u64 hw_csum_rx_good; 248 u64 hw_csum_rx_good;
251 u64 non_eop_descs; 249 u64 non_eop_descs;
252 int num_msix_vectors; 250 int num_msix_vectors;
251 int max_msix_q_vectors; /* true count of q_vectors for device */
253 struct ixgbe_ring_feature ring_feature[3]; 252 struct ixgbe_ring_feature ring_feature[3];
254 struct msix_entry *msix_entries; 253 struct msix_entry *msix_entries;
255 254
@@ -301,9 +300,6 @@ struct ixgbe_adapter {
301 300
302 unsigned long state; 301 unsigned long state;
303 u64 tx_busy; 302 u64 tx_busy;
304 u64 lro_aggregated;
305 u64 lro_flushed;
306 u64 lro_no_desc;
307 unsigned int tx_ring_count; 303 unsigned int tx_ring_count;
308 unsigned int rx_ring_count; 304 unsigned int rx_ring_count;
309 305
@@ -314,6 +310,8 @@ struct ixgbe_adapter {
314 struct work_struct watchdog_task; 310 struct work_struct watchdog_task;
315 struct work_struct sfp_task; 311 struct work_struct sfp_task;
316 struct timer_list sfp_timer; 312 struct timer_list sfp_timer;
313
314 u16 eeprom_version;
317}; 315};
318 316
319enum ixbge_state_t { 317enum ixbge_state_t {
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index ad5699d9ab0..525bd87fea5 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -50,6 +50,27 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
50 u8 *eeprom_data); 50 u8 *eeprom_data);
51 51
52/** 52/**
53 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
54 * @hw: pointer to hardware structure
55 *
56 * Read PCIe configuration space, and get the MSI-X vector count from
57 * the capabilities table.
58 **/
59u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
60{
61 struct ixgbe_adapter *adapter = hw->back;
62 u16 msix_count;
63 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
64 &msix_count);
65 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
66
67 /* MSI-X count is zero-based in HW, so increment to give proper value */
68 msix_count++;
69
70 return msix_count;
71}
72
73/**
53 */ 74 */
54static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 75static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
55{ 76{
@@ -106,6 +127,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
106 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 127 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
107 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 128 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
108 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 129 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
130 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
109 131
110out: 132out:
111 return ret_val; 133 return ret_val;
@@ -124,18 +146,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
124 bool *autoneg) 146 bool *autoneg)
125{ 147{
126 s32 status = 0; 148 s32 status = 0;
127 s32 autoc_reg;
128
129 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
130 149
131 if (hw->mac.link_settings_loaded) { 150 /*
132 autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; 151 * Determine link capabilities based on the stored value of AUTOC,
133 autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; 152 * which represents EEPROM defaults.
134 autoc_reg |= hw->mac.link_attach_type; 153 */
135 autoc_reg |= hw->mac.link_mode_select; 154 switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
136 }
137
138 switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) {
139 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 155 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
140 *speed = IXGBE_LINK_SPEED_1GB_FULL; 156 *speed = IXGBE_LINK_SPEED_1GB_FULL;
141 *autoneg = false; 157 *autoneg = false;
@@ -154,9 +170,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
154 case IXGBE_AUTOC_LMS_KX4_AN: 170 case IXGBE_AUTOC_LMS_KX4_AN:
155 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 171 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
156 *speed = IXGBE_LINK_SPEED_UNKNOWN; 172 *speed = IXGBE_LINK_SPEED_UNKNOWN;
157 if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) 173 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
158 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 174 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
159 if (autoc_reg & IXGBE_AUTOC_KX_SUPP) 175 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
160 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 176 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
161 *autoneg = true; 177 *autoneg = true;
162 break; 178 break;
@@ -213,6 +229,10 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
213 229
214 /* Media type for I82598 is based on device ID */ 230 /* Media type for I82598 is based on device ID */
215 switch (hw->device_id) { 231 switch (hw->device_id) {
232 case IXGBE_DEV_ID_82598:
233 case IXGBE_DEV_ID_82598_BX:
234 media_type = ixgbe_media_type_backplane;
235 break;
216 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 236 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
217 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 237 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
218 case IXGBE_DEV_ID_82598EB_CX4: 238 case IXGBE_DEV_ID_82598EB_CX4:
@@ -235,104 +255,75 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
235} 255}
236 256
237/** 257/**
238 * ixgbe_setup_fc_82598 - Configure flow control settings 258 * ixgbe_fc_enable_82598 - Enable flow control
239 * @hw: pointer to hardware structure 259 * @hw: pointer to hardware structure
240 * @packetbuf_num: packet buffer number (0-7) 260 * @packetbuf_num: packet buffer number (0-7)
241 * 261 *
242 * Configures the flow control settings based on SW configuration. This 262 * Enable flow control according to the current settings.
243 * function is used for 802.3x flow control configuration only.
244 **/ 263 **/
245static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 264static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
246{ 265{
247 u32 frctl_reg; 266 s32 ret_val = 0;
267 u32 fctrl_reg;
248 u32 rmcs_reg; 268 u32 rmcs_reg;
269 u32 reg;
249 270
250 if (packetbuf_num < 0 || packetbuf_num > 7) { 271 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
251 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" 272 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
252 " 0-7\n", packetbuf_num);
253 }
254
255 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
256 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
257 273
258 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 274 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
259 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 275 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
260 276
261 /* 277 /*
262 * 10 gig parts do not have a word in the EEPROM to determine the 278 * The possible values of fc.current_mode are:
263 * default flow control setting, so we explicitly set it to full.
264 */
265 if (hw->fc.type == ixgbe_fc_default)
266 hw->fc.type = ixgbe_fc_full;
267
268 /*
269 * We want to save off the original Flow Control configuration just in
270 * case we get disconnected and then reconnected into a different hub
271 * or switch with different Flow Control capabilities.
272 */
273 hw->fc.original_type = hw->fc.type;
274
275 /*
276 * The possible values of the "flow_control" parameter are:
277 * 0: Flow control is completely disabled 279 * 0: Flow control is completely disabled
278 * 1: Rx flow control is enabled (we can receive pause frames but not 280 * 1: Rx flow control is enabled (we can receive pause frames,
279 * send pause frames). 281 * but not send pause frames).
280 * 2: Tx flow control is enabled (we can send pause frames but we do not 282 * 2: Tx flow control is enabled (we can send pause frames but
281 * support receiving pause frames) 283 * we do not support receiving pause frames).
282 * 3: Both Rx and Tx flow control (symmetric) are enabled. 284 * 3: Both Rx and Tx flow control (symmetric) are enabled.
283 * other: Invalid. 285 * other: Invalid.
284 */ 286 */
285 switch (hw->fc.type) { 287 switch (hw->fc.current_mode) {
286 case ixgbe_fc_none: 288 case ixgbe_fc_none:
289 /* Flow control completely disabled by software override. */
287 break; 290 break;
288 case ixgbe_fc_rx_pause: 291 case ixgbe_fc_rx_pause:
289 /* 292 /*
290 * Rx Flow control is enabled, 293 * Rx Flow control is enabled and Tx Flow control is
291 * and Tx Flow control is disabled. 294 * disabled by software override. Since there really
295 * isn't a way to advertise that we are capable of RX
296 * Pause ONLY, we will advertise that we support both
297 * symmetric and asymmetric Rx PAUSE. Later, we will
298 * disable the adapter's ability to send PAUSE frames.
292 */ 299 */
293 frctl_reg |= IXGBE_FCTRL_RFCE; 300 fctrl_reg |= IXGBE_FCTRL_RFCE;
294 break; 301 break;
295 case ixgbe_fc_tx_pause: 302 case ixgbe_fc_tx_pause:
296 /* 303 /*
297 * Tx Flow control is enabled, and Rx Flow control is disabled, 304 * Tx Flow control is enabled, and Rx Flow control is
298 * by a software over-ride. 305 * disabled by software override.
299 */ 306 */
300 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 307 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
301 break; 308 break;
302 case ixgbe_fc_full: 309 case ixgbe_fc_full:
303 /* 310 /* Flow control (both Rx and Tx) is enabled by SW override. */
304 * Flow control (both Rx and Tx) is enabled by a software 311 fctrl_reg |= IXGBE_FCTRL_RFCE;
305 * over-ride.
306 */
307 frctl_reg |= IXGBE_FCTRL_RFCE;
308 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 312 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
309 break; 313 break;
310 default: 314 default:
311 /* We should never get here. The value should be 0-3. */
312 hw_dbg(hw, "Flow control param set incorrectly\n"); 315 hw_dbg(hw, "Flow control param set incorrectly\n");
316 ret_val = -IXGBE_ERR_CONFIG;
317 goto out;
313 break; 318 break;
314 } 319 }
315 320
316 /* Enable 802.3x based flow control settings. */ 321 /* Enable 802.3x based flow control settings. */
317 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); 322 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
318 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 323 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
319 324
320 /* 325 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
321 * Check for invalid software configuration, zeros are completely 326 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
322 * invalid for all parameters used past this point, and if we enable
323 * flow control with zero water marks, we blast flow control packets.
324 */
325 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
326 hw_dbg(hw, "Flow control structure initialized incorrectly\n");
327 return IXGBE_ERR_INVALID_LINK_SETTINGS;
328 }
329
330 /*
331 * We need to set up the Receive Threshold high and low water
332 * marks as well as (optionally) enabling the transmission of
333 * XON frames.
334 */
335 if (hw->fc.type & ixgbe_fc_tx_pause) {
336 if (hw->fc.send_xon) { 327 if (hw->fc.send_xon) {
337 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 328 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
338 (hw->fc.low_water | IXGBE_FCRTL_XONE)); 329 (hw->fc.low_water | IXGBE_FCRTL_XONE));
@@ -340,14 +331,93 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
340 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 331 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
341 hw->fc.low_water); 332 hw->fc.low_water);
342 } 333 }
334
343 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 335 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
344 (hw->fc.high_water)|IXGBE_FCRTH_FCEN); 336 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
345 } 337 }
346 338
347 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); 339 /* Configure pause time (2 TCs per register) */
340 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
341 if ((packetbuf_num & 1) == 0)
342 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
343 else
344 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
345 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
346
348 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 347 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
349 348
350 return 0; 349out:
350 return ret_val;
351}
352
353/**
354 * ixgbe_setup_fc_82598 - Configure flow control settings
355 * @hw: pointer to hardware structure
356 * @packetbuf_num: packet buffer number (0-7)
357 *
358 * Configures the flow control settings based on SW configuration. This
359 * function is used for 802.3x flow control configuration only.
360 **/
361static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
362{
363 s32 ret_val = 0;
364 ixgbe_link_speed speed;
365 bool link_up;
366
367 /* Validate the packetbuf configuration */
368 if (packetbuf_num < 0 || packetbuf_num > 7) {
369 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
370 " 0-7\n", packetbuf_num);
371 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
372 goto out;
373 }
374
375 /*
376 * Validate the water mark configuration. Zero water marks are invalid
377 * because it causes the controller to just blast out fc packets.
378 */
379 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
380 hw_dbg(hw, "Invalid water mark configuration\n");
381 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
382 goto out;
383 }
384
385 /*
386 * Validate the requested mode. Strict IEEE mode does not allow
387 * ixgbe_fc_rx_pause because it will cause testing anomalies.
388 */
389 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
390 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
391 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
392 goto out;
393 }
394
395 /*
396 * 10gig parts do not have a word in the EEPROM to determine the
397 * default flow control setting, so we explicitly set it to full.
398 */
399 if (hw->fc.requested_mode == ixgbe_fc_default)
400 hw->fc.requested_mode = ixgbe_fc_full;
401
402 /*
403 * Save off the requested flow control mode for use later. Depending
404 * on the link partner's capabilities, we may or may not use this mode.
405 */
406
407 hw->fc.current_mode = hw->fc.requested_mode;
408
409 /* Decide whether to use autoneg or not. */
410 hw->mac.ops.check_link(hw, &speed, &link_up, false);
411 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL))
412 ret_val = ixgbe_fc_autoneg(hw);
413
414 if (ret_val)
415 goto out;
416
417 ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
418
419out:
420 return ret_val;
351} 421}
352 422
353/** 423/**
@@ -364,27 +434,17 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
364 u32 i; 434 u32 i;
365 s32 status = 0; 435 s32 status = 0;
366 436
367 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
368
369 if (hw->mac.link_settings_loaded) {
370 autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
371 autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
372 autoc_reg |= hw->mac.link_attach_type;
373 autoc_reg |= hw->mac.link_mode_select;
374
375 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
376 IXGBE_WRITE_FLUSH(hw);
377 msleep(50);
378 }
379
380 /* Restart link */ 437 /* Restart link */
438 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
381 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 439 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
382 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 440 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
383 441
384 /* Only poll for autoneg to complete if specified to do so */ 442 /* Only poll for autoneg to complete if specified to do so */
385 if (hw->phy.autoneg_wait_to_complete) { 443 if (hw->phy.autoneg_wait_to_complete) {
386 if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN || 444 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
387 hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 445 IXGBE_AUTOC_LMS_KX4_AN ||
446 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
447 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
388 links_reg = 0; /* Just in case Autoneg time = 0 */ 448 links_reg = 0; /* Just in case Autoneg time = 0 */
389 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 449 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
390 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 450 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -404,7 +464,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
404 * case we get disconnected and then reconnected into a different hub 464 * case we get disconnected and then reconnected into a different hub
405 * or switch with different Flow Control capabilities. 465 * or switch with different Flow Control capabilities.
406 */ 466 */
407 hw->fc.original_type = hw->fc.type;
408 ixgbe_setup_fc_82598(hw, 0); 467 ixgbe_setup_fc_82598(hw, 0);
409 468
410 /* Add delay to filter out noises during initial link setup */ 469 /* Add delay to filter out noises during initial link setup */
@@ -508,37 +567,43 @@ out:
508 * Set the link speed in the AUTOC register and restarts link. 567 * Set the link speed in the AUTOC register and restarts link.
509 **/ 568 **/
510static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, 569static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
511 ixgbe_link_speed speed, bool autoneg, 570 ixgbe_link_speed speed, bool autoneg,
512 bool autoneg_wait_to_complete) 571 bool autoneg_wait_to_complete)
513{ 572{
514 s32 status = 0; 573 s32 status = 0;
574 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
575 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
576 u32 autoc = curr_autoc;
577 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
515 578
516 /* If speed is 10G, then check for CX4 or XAUI. */ 579 /* Check to see if speed passed in is supported. */
517 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 580 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
518 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { 581 speed &= link_capabilities;
519 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 582
520 } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { 583 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
521 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
522 } else if (autoneg) {
523 /* BX mode - Autonegotiate 1G */
524 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
525 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
526 else /* KX/KX4 mode */
527 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN;
528 } else {
529 status = IXGBE_ERR_LINK_SETUP; 584 status = IXGBE_ERR_LINK_SETUP;
585
586 /* Set KX4/KX support according to speed requested */
587 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
588 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
589 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
590 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
591 autoc |= IXGBE_AUTOC_KX4_SUPP;
592 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
593 autoc |= IXGBE_AUTOC_KX_SUPP;
594 if (autoc != curr_autoc)
595 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
530 } 596 }
531 597
532 if (status == 0) { 598 if (status == 0) {
533 hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete; 599 hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
534 600
535 hw->mac.link_settings_loaded = true;
536 /* 601 /*
537 * Setup and restart the link based on the new values in 602 * Setup and restart the link based on the new values in
538 * ixgbe_hw This will write the AUTOC register based on the new 603 * ixgbe_hw This will write the AUTOC register based on the new
539 * stored values 604 * stored values
540 */ 605 */
541 ixgbe_setup_mac_link_82598(hw); 606 status = ixgbe_setup_mac_link_82598(hw);
542 } 607 }
543 608
544 return status; 609 return status;
@@ -561,10 +626,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
561 /* Restart autonegotiation on PHY */ 626 /* Restart autonegotiation on PHY */
562 status = hw->phy.ops.setup_link(hw); 627 status = hw->phy.ops.setup_link(hw);
563 628
564 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
565 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
566 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
567
568 /* Set up MAC */ 629 /* Set up MAC */
569 ixgbe_setup_mac_link_82598(hw); 630 ixgbe_setup_mac_link_82598(hw);
570 631
@@ -591,10 +652,6 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
591 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 652 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
592 autoneg_wait_to_complete); 653 autoneg_wait_to_complete);
593 654
594 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
595 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
596 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
597
598 /* Set up MAC */ 655 /* Set up MAC */
599 ixgbe_setup_mac_link_82598(hw); 656 ixgbe_setup_mac_link_82598(hw);
600 657
@@ -694,24 +751,16 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
694 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 751 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
695 752
696 /* 753 /*
697 * AUTOC register which stores link settings gets cleared 754 * Store the original AUTOC value if it has not been
698 * and reloaded from EEPROM after reset. We need to restore 755 * stored off yet. Otherwise restore the stored original
699 * our stored value from init in case SW changed the attach 756 * AUTOC value since the reset operation sets back to deaults.
700 * type or speed. If this is the first time and link settings
701 * have not been stored, store default settings from AUTOC.
702 */ 757 */
703 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 758 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
704 if (hw->mac.link_settings_loaded) { 759 if (hw->mac.orig_link_settings_stored == false) {
705 autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); 760 hw->mac.orig_autoc = autoc;
706 autoc &= ~(IXGBE_AUTOC_LMS_MASK); 761 hw->mac.orig_link_settings_stored = true;
707 autoc |= hw->mac.link_attach_type; 762 } else if (autoc != hw->mac.orig_autoc) {
708 autoc |= hw->mac.link_mode_select; 763 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
709 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
710 } else {
711 hw->mac.link_attach_type =
712 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
713 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
714 hw->mac.link_settings_loaded = true;
715 } 764 }
716 765
717 /* Store the permanent mac address */ 766 /* Store the permanent mac address */
@@ -1002,6 +1051,13 @@ static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1002 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1051 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1003 1052
1004 switch (hw->device_id) { 1053 switch (hw->device_id) {
1054 case IXGBE_DEV_ID_82598:
1055 /* Default device ID is mezzanine card KX/KX4 */
1056 physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
1057 IXGBE_PHYSICAL_LAYER_1000BASE_KX);
1058 break;
1059 case IXGBE_DEV_ID_82598_BX:
1060 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1005 case IXGBE_DEV_ID_82598EB_CX4: 1061 case IXGBE_DEV_ID_82598EB_CX4:
1006 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 1062 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
1007 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1063 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index f67c68404bb..5ae93989784 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -80,9 +80,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
80 /* Clear the VLAN filter table */ 80 /* Clear the VLAN filter table */
81 hw->mac.ops.clear_vfta(hw); 81 hw->mac.ops.clear_vfta(hw);
82 82
83 /* Set up link */
84 hw->mac.ops.setup_link(hw);
85
86 /* Clear statistics registers */ 83 /* Clear statistics registers */
87 hw->mac.ops.clear_hw_cntrs(hw); 84 hw->mac.ops.clear_hw_cntrs(hw);
88 85
@@ -1490,6 +1487,144 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1490} 1487}
1491 1488
1492/** 1489/**
1490 * ixgbe_fc_autoneg - Configure flow control
1491 * @hw: pointer to hardware structure
1492 *
1493 * Negotiates flow control capabilities with link partner using autoneg and
1494 * applies the results.
1495 **/
1496s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1497{
1498 s32 ret_val = 0;
1499 u32 i, reg, pcs_anadv_reg, pcs_lpab_reg;
1500
1501 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1502
1503 /*
1504 * The possible values of fc.current_mode are:
1505 * 0: Flow control is completely disabled
1506 * 1: Rx flow control is enabled (we can receive pause frames,
1507 * but not send pause frames).
1508 * 2: Tx flow control is enabled (we can send pause frames but
1509 * we do not support receiving pause frames).
1510 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1511 * other: Invalid.
1512 */
1513 switch (hw->fc.current_mode) {
1514 case ixgbe_fc_none:
1515 /* Flow control completely disabled by software override. */
1516 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1517 break;
1518 case ixgbe_fc_rx_pause:
1519 /*
1520 * Rx Flow control is enabled and Tx Flow control is
1521 * disabled by software override. Since there really
1522 * isn't a way to advertise that we are capable of RX
1523 * Pause ONLY, we will advertise that we support both
1524 * symmetric and asymmetric Rx PAUSE. Later, we will
1525 * disable the adapter's ability to send PAUSE frames.
1526 */
1527 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1528 break;
1529 case ixgbe_fc_tx_pause:
1530 /*
1531 * Tx Flow control is enabled, and Rx Flow control is
1532 * disabled by software override.
1533 */
1534 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1535 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
1536 break;
1537 case ixgbe_fc_full:
1538 /* Flow control (both Rx and Tx) is enabled by SW override. */
1539 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1540 break;
1541 default:
1542 hw_dbg(hw, "Flow control param set incorrectly\n");
1543 ret_val = -IXGBE_ERR_CONFIG;
1544 goto out;
1545 break;
1546 }
1547
1548 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1549 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1550
1551 /* Set PCS register for autoneg */
1552 /* Enable and restart autoneg */
1553 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1554
1555 /* Disable AN timeout */
1556 if (hw->fc.strict_ieee)
1557 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
1558
1559 hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1560 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1561
1562 /* See if autonegotiation has succeeded */
1563 hw->mac.autoneg_succeeded = 0;
1564 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
1565 msleep(10);
1566 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1567 if ((reg & (IXGBE_PCS1GLSTA_LINK_OK |
1568 IXGBE_PCS1GLSTA_AN_COMPLETE)) ==
1569 (IXGBE_PCS1GLSTA_LINK_OK |
1570 IXGBE_PCS1GLSTA_AN_COMPLETE)) {
1571 if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT))
1572 hw->mac.autoneg_succeeded = 1;
1573 break;
1574 }
1575 }
1576
1577 if (!hw->mac.autoneg_succeeded) {
1578 /* Autoneg failed to achieve a link, so we turn fc off */
1579 hw->fc.current_mode = ixgbe_fc_none;
1580 hw_dbg(hw, "Flow Control = NONE.\n");
1581 goto out;
1582 }
1583
1584 /*
1585 * Read the AN advertisement and LP ability registers and resolve
1586 * local flow control settings accordingly
1587 */
1588 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1589 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1590 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1591 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1592 /*
1593 * Now we need to check if the user selected Rx ONLY
1594 * of pause frames. In this case, we had to advertise
1595 * FULL flow control because we could not advertise RX
1596 * ONLY. Hence, we must now check to see if we need to
1597 * turn OFF the TRANSMISSION of PAUSE frames.
1598 */
1599 if (hw->fc.requested_mode == ixgbe_fc_full) {
1600 hw->fc.current_mode = ixgbe_fc_full;
1601 hw_dbg(hw, "Flow Control = FULL.\n");
1602 } else {
1603 hw->fc.current_mode = ixgbe_fc_rx_pause;
1604 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1605 }
1606 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1607 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1608 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1609 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1610 hw->fc.current_mode = ixgbe_fc_tx_pause;
1611 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1612 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1613 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1614 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1615 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1616 hw->fc.current_mode = ixgbe_fc_rx_pause;
1617 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1618 } else {
1619 hw->fc.current_mode = ixgbe_fc_none;
1620 hw_dbg(hw, "Flow Control = NONE.\n");
1621 }
1622
1623out:
1624 return ret_val;
1625}
1626
1627/**
1493 * ixgbe_disable_pcie_master - Disable PCI-express master access 1628 * ixgbe_disable_pcie_master - Disable PCI-express master access
1494 * @hw: pointer to hardware structure 1629 * @hw: pointer to hardware structure
1495 * 1630 *
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 192f8d01291..c63021261e5 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -61,6 +61,9 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
61 u32 addr_count, ixgbe_mc_addr_itr func); 61 u32 addr_count, ixgbe_mc_addr_itr func);
62s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 62s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
63s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 63s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
64s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
65s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
66s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
64 67
65s32 ixgbe_validate_mac_addr(u8 *mac_addr); 68s32 ixgbe_validate_mac_addr(u8 *mac_addr);
66s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 69s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index e2e28ac63de..2a60c89ab34 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 75f6efe1e36..0da5c6d5bca 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 2c046b0b5d2..df359554d49 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -298,7 +298,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
298 reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 298 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
299 reg &= ~IXGBE_RMCS_TFCE_802_3X; 299 reg &= ~IXGBE_RMCS_TFCE_802_3X;
300 /* correct the reporting of our flow control status */ 300 /* correct the reporting of our flow control status */
301 hw->fc.type = ixgbe_fc_none; 301 hw->fc.current_mode = ixgbe_fc_none;
302 reg |= IXGBE_RMCS_TFCE_PRIORITY; 302 reg |= IXGBE_RMCS_TFCE_PRIORITY;
303 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); 303 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
304 304
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index 1e6a313719d..ebbe53c352a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 4129976953f..dd9d1d63a59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 67f87a79154..cec2f4e8c61 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -89,8 +89,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, 89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
92 {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
93 {"lro_flushed", IXGBE_STAT(lro_flushed)},
94}; 92};
95 93
96#define IXGBE_QUEUE_STATS_LEN \ 94#define IXGBE_QUEUE_STATS_LEN \
@@ -132,6 +130,26 @@ static int ixgbe_get_settings(struct net_device *netdev,
132 ecmd->advertising |= ADVERTISED_1000baseT_Full; 130 ecmd->advertising |= ADVERTISED_1000baseT_Full;
133 131
134 ecmd->port = PORT_TP; 132 ecmd->port = PORT_TP;
133 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
134 /* Set as FIBRE until SERDES defined in kernel */
135 switch (hw->device_id) {
136 case IXGBE_DEV_ID_82598:
137 ecmd->supported |= (SUPPORTED_1000baseT_Full |
138 SUPPORTED_FIBRE);
139 ecmd->advertising = (ADVERTISED_10000baseT_Full |
140 ADVERTISED_1000baseT_Full |
141 ADVERTISED_FIBRE);
142 ecmd->port = PORT_FIBRE;
143 break;
144 case IXGBE_DEV_ID_82598_BX:
145 ecmd->supported = (SUPPORTED_1000baseT_Full |
146 SUPPORTED_FIBRE);
147 ecmd->advertising = (ADVERTISED_1000baseT_Full |
148 ADVERTISED_FIBRE);
149 ecmd->port = PORT_FIBRE;
150 ecmd->autoneg = AUTONEG_DISABLE;
151 break;
152 }
135 } else { 153 } else {
136 ecmd->supported |= SUPPORTED_FIBRE; 154 ecmd->supported |= SUPPORTED_FIBRE;
137 ecmd->advertising = (ADVERTISED_10000baseT_Full | 155 ecmd->advertising = (ADVERTISED_10000baseT_Full |
@@ -206,13 +224,13 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
206 struct ixgbe_adapter *adapter = netdev_priv(netdev); 224 struct ixgbe_adapter *adapter = netdev_priv(netdev);
207 struct ixgbe_hw *hw = &adapter->hw; 225 struct ixgbe_hw *hw = &adapter->hw;
208 226
209 pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0); 227 pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0);
210 228
211 if (hw->fc.type == ixgbe_fc_rx_pause) { 229 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
212 pause->rx_pause = 1; 230 pause->rx_pause = 1;
213 } else if (hw->fc.type == ixgbe_fc_tx_pause) { 231 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
214 pause->tx_pause = 1; 232 pause->tx_pause = 1;
215 } else if (hw->fc.type == ixgbe_fc_full) { 233 } else if (hw->fc.current_mode == ixgbe_fc_full) {
216 pause->rx_pause = 1; 234 pause->rx_pause = 1;
217 pause->tx_pause = 1; 235 pause->tx_pause = 1;
218 } 236 }
@@ -226,22 +244,17 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
226 244
227 if ((pause->autoneg == AUTONEG_ENABLE) || 245 if ((pause->autoneg == AUTONEG_ENABLE) ||
228 (pause->rx_pause && pause->tx_pause)) 246 (pause->rx_pause && pause->tx_pause))
229 hw->fc.type = ixgbe_fc_full; 247 hw->fc.requested_mode = ixgbe_fc_full;
230 else if (pause->rx_pause && !pause->tx_pause) 248 else if (pause->rx_pause && !pause->tx_pause)
231 hw->fc.type = ixgbe_fc_rx_pause; 249 hw->fc.requested_mode = ixgbe_fc_rx_pause;
232 else if (!pause->rx_pause && pause->tx_pause) 250 else if (!pause->rx_pause && pause->tx_pause)
233 hw->fc.type = ixgbe_fc_tx_pause; 251 hw->fc.requested_mode = ixgbe_fc_tx_pause;
234 else if (!pause->rx_pause && !pause->tx_pause) 252 else if (!pause->rx_pause && !pause->tx_pause)
235 hw->fc.type = ixgbe_fc_none; 253 hw->fc.requested_mode = ixgbe_fc_none;
236 else 254 else
237 return -EINVAL; 255 return -EINVAL;
238 256
239 hw->fc.original_type = hw->fc.type; 257 hw->mac.ops.setup_fc(hw, 0);
240
241 if (netif_running(netdev))
242 ixgbe_reinit_locked(adapter);
243 else
244 ixgbe_reset(adapter);
245 258
246 return 0; 259 return 0;
247} 260}
@@ -661,10 +674,17 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
661 struct ethtool_drvinfo *drvinfo) 674 struct ethtool_drvinfo *drvinfo)
662{ 675{
663 struct ixgbe_adapter *adapter = netdev_priv(netdev); 676 struct ixgbe_adapter *adapter = netdev_priv(netdev);
677 char firmware_version[32];
664 678
665 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 679 strncpy(drvinfo->driver, ixgbe_driver_name, 32);
666 strncpy(drvinfo->version, ixgbe_driver_version, 32); 680 strncpy(drvinfo->version, ixgbe_driver_version, 32);
667 strncpy(drvinfo->fw_version, "N/A", 32); 681
682 sprintf(firmware_version, "%d.%d-%d",
683 (adapter->eeprom_version & 0xF000) >> 12,
684 (adapter->eeprom_version & 0x0FF0) >> 4,
685 adapter->eeprom_version & 0x000F);
686
687 strncpy(drvinfo->fw_version, firmware_version, 32);
668 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 688 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
669 drvinfo->n_stats = IXGBE_STATS_LEN; 689 drvinfo->n_stats = IXGBE_STATS_LEN;
670 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 690 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -808,15 +828,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
808 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 828 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
809 int j, k; 829 int j, k;
810 int i; 830 int i;
811 u64 aggregated = 0, flushed = 0, no_desc = 0;
812 for (i = 0; i < adapter->num_rx_queues; i++) {
813 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
814 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
815 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
816 }
817 adapter->lro_aggregated = aggregated;
818 adapter->lro_flushed = flushed;
819 adapter->lro_no_desc = no_desc;
820 831
821 ixgbe_update_stats(adapter); 832 ixgbe_update_stats(adapter);
822 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 833 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d2f4d5f508b..8c32c18f569 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -47,9 +47,9 @@ char ixgbe_driver_name[] = "ixgbe";
47static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
48 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
49 49
50#define DRV_VERSION "1.3.30-k2" 50#define DRV_VERSION "1.3.56-k2"
51const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
52static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; 52static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
53 53
54static const struct ixgbe_info *ixgbe_info_tbl[] = { 54static const struct ixgbe_info *ixgbe_info_tbl[] = {
55 [board_82598] = &ixgbe_82598_info, 55 [board_82598] = &ixgbe_82598_info,
@@ -64,6 +64,8 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
64 * Class, Class Mask, private data (not used) } 64 * Class, Class Mask, private data (not used) }
65 */ 65 */
66static struct pci_device_id ixgbe_pci_tbl[] = { 66static struct pci_device_id ixgbe_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
68 board_82598 },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
68 board_82598 }, 70 board_82598 },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
@@ -82,6 +84,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
82 board_82598 }, 84 board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
84 board_82598 }, 86 board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
88 board_82598 },
85 89
86 /* required last entry */ 90 /* required last entry */
87 {0, } 91 {0, }
@@ -200,9 +204,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
200#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 204#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
201 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 205 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
202 206
203#define GET_TX_HEAD_FROM_RING(ring) (\
204 *(volatile u32 *) \
205 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
206static void ixgbe_tx_timeout(struct net_device *netdev); 207static void ixgbe_tx_timeout(struct net_device *netdev);
207 208
208/** 209/**
@@ -213,26 +214,27 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
213static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 214static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
214 struct ixgbe_ring *tx_ring) 215 struct ixgbe_ring *tx_ring)
215{ 216{
216 union ixgbe_adv_tx_desc *tx_desc;
217 struct ixgbe_tx_buffer *tx_buffer_info;
218 struct net_device *netdev = adapter->netdev; 217 struct net_device *netdev = adapter->netdev;
219 struct sk_buff *skb; 218 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
220 unsigned int i; 219 struct ixgbe_tx_buffer *tx_buffer_info;
221 u32 head, oldhead; 220 unsigned int i, eop, count = 0;
222 unsigned int count = 0;
223 unsigned int total_bytes = 0, total_packets = 0; 221 unsigned int total_bytes = 0, total_packets = 0;
224 222
225 rmb();
226 head = GET_TX_HEAD_FROM_RING(tx_ring);
227 head = le32_to_cpu(head);
228 i = tx_ring->next_to_clean; 223 i = tx_ring->next_to_clean;
229 while (1) { 224 eop = tx_ring->tx_buffer_info[i].next_to_watch;
230 while (i != head) { 225 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
226
227 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
228 (count < tx_ring->count)) {
229 bool cleaned = false;
230 for ( ; !cleaned; count++) {
231 struct sk_buff *skb;
231 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 232 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
232 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 233 tx_buffer_info = &tx_ring->tx_buffer_info[i];
234 cleaned = (i == eop);
233 skb = tx_buffer_info->skb; 235 skb = tx_buffer_info->skb;
234 236
235 if (skb) { 237 if (cleaned && skb) {
236 unsigned int segs, bytecount; 238 unsigned int segs, bytecount;
237 239
238 /* gso_segs is currently only valid for tcp */ 240 /* gso_segs is currently only valid for tcp */
@@ -247,23 +249,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
247 ixgbe_unmap_and_free_tx_resource(adapter, 249 ixgbe_unmap_and_free_tx_resource(adapter,
248 tx_buffer_info); 250 tx_buffer_info);
249 251
252 tx_desc->wb.status = 0;
253
250 i++; 254 i++;
251 if (i == tx_ring->count) 255 if (i == tx_ring->count)
252 i = 0; 256 i = 0;
253
254 count++;
255 if (count == tx_ring->count)
256 goto done_cleaning;
257 } 257 }
258 oldhead = head; 258
259 rmb(); 259 eop = tx_ring->tx_buffer_info[i].next_to_watch;
260 head = GET_TX_HEAD_FROM_RING(tx_ring); 260 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
261 head = le32_to_cpu(head); 261 }
262 if (head == oldhead) 262
263 goto done_cleaning;
264 } /* while (1) */
265
266done_cleaning:
267 tx_ring->next_to_clean = i; 263 tx_ring->next_to_clean = i;
268 264
269#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 265#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -297,8 +293,8 @@ done_cleaning:
297 293
298 tx_ring->total_bytes += total_bytes; 294 tx_ring->total_bytes += total_bytes;
299 tx_ring->total_packets += total_packets; 295 tx_ring->total_packets += total_packets;
300 tx_ring->stats.bytes += total_bytes;
301 tx_ring->stats.packets += total_packets; 296 tx_ring->stats.packets += total_packets;
297 tx_ring->stats.bytes += total_bytes;
302 adapter->net_stats.tx_bytes += total_bytes; 298 adapter->net_stats.tx_bytes += total_bytes;
303 adapter->net_stats.tx_packets += total_packets; 299 adapter->net_stats.tx_packets += total_packets;
304 return (total_packets ? true : false); 300 return (total_packets ? true : false);
@@ -403,23 +399,21 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
403 * @rx_ring: rx descriptor ring (for a specific queue) to setup 399 * @rx_ring: rx descriptor ring (for a specific queue) to setup
404 * @rx_desc: rx descriptor 400 * @rx_desc: rx descriptor
405 **/ 401 **/
406static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 402static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
407 struct sk_buff *skb, u8 status, 403 struct sk_buff *skb, u8 status,
408 struct ixgbe_ring *ring,
409 union ixgbe_adv_rx_desc *rx_desc) 404 union ixgbe_adv_rx_desc *rx_desc)
410{ 405{
406 struct ixgbe_adapter *adapter = q_vector->adapter;
407 struct napi_struct *napi = &q_vector->napi;
411 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 408 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
412 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 409 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
413 410
414 if (adapter->netdev->features & NETIF_F_LRO && 411 skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]);
415 skb->ip_summed == CHECKSUM_UNNECESSARY) { 412 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
416 if (adapter->vlgrp && is_vlan && (tag != 0)) 413 if (adapter->vlgrp && is_vlan && (tag != 0))
417 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 414 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
418 adapter->vlgrp, tag,
419 rx_desc);
420 else 415 else
421 lro_receive_skb(&ring->lro_mgr, skb, rx_desc); 416 napi_gro_receive(napi, skb);
422 ring->lro_used = true;
423 } else { 417 } else {
424 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 418 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
425 if (adapter->vlgrp && is_vlan && (tag != 0)) 419 if (adapter->vlgrp && is_vlan && (tag != 0))
@@ -574,10 +568,11 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
574 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 568 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
575} 569}
576 570
577static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 571static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
578 struct ixgbe_ring *rx_ring, 572 struct ixgbe_ring *rx_ring,
579 int *work_done, int work_to_do) 573 int *work_done, int work_to_do)
580{ 574{
575 struct ixgbe_adapter *adapter = q_vector->adapter;
581 struct pci_dev *pdev = adapter->pdev; 576 struct pci_dev *pdev = adapter->pdev;
582 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 577 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
583 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 578 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -678,7 +673,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
678 total_rx_packets++; 673 total_rx_packets++;
679 674
680 skb->protocol = eth_type_trans(skb, adapter->netdev); 675 skb->protocol = eth_type_trans(skb, adapter->netdev);
681 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 676 ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
682 677
683next_desc: 678next_desc:
684 rx_desc->wb.upper.status_error = 0; 679 rx_desc->wb.upper.status_error = 0;
@@ -696,11 +691,6 @@ next_desc:
696 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 691 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
697 } 692 }
698 693
699 if (rx_ring->lro_used) {
700 lro_flush_all(&rx_ring->lro_mgr);
701 rx_ring->lro_used = false;
702 }
703
704 rx_ring->next_to_clean = i; 694 rx_ring->next_to_clean = i;
705 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 695 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
706 696
@@ -1015,7 +1005,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1015 rx_ring = &(adapter->rx_ring[r_idx]); 1005 rx_ring = &(adapter->rx_ring[r_idx]);
1016 /* disable interrupts on this vector only */ 1006 /* disable interrupts on this vector only */
1017 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 1007 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1018 netif_rx_schedule(&q_vector->napi); 1008 napi_schedule(&q_vector->napi);
1019 1009
1020 return IRQ_HANDLED; 1010 return IRQ_HANDLED;
1021} 1011}
@@ -1052,11 +1042,11 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1052 ixgbe_update_rx_dca(adapter, rx_ring); 1042 ixgbe_update_rx_dca(adapter, rx_ring);
1053#endif 1043#endif
1054 1044
1055 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); 1045 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1056 1046
1057 /* If all Rx work done, exit the polling mode */ 1047 /* If all Rx work done, exit the polling mode */
1058 if (work_done < budget) { 1048 if (work_done < budget) {
1059 netif_rx_complete(napi); 1049 napi_complete(napi);
1060 if (adapter->itr_setting & 3) 1050 if (adapter->itr_setting & 3)
1061 ixgbe_set_itr_msix(q_vector); 1051 ixgbe_set_itr_msix(q_vector);
1062 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1052 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1095,7 +1085,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1095 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1085 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1096 ixgbe_update_rx_dca(adapter, rx_ring); 1086 ixgbe_update_rx_dca(adapter, rx_ring);
1097#endif 1087#endif
1098 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); 1088 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1099 enable_mask |= rx_ring->v_idx; 1089 enable_mask |= rx_ring->v_idx;
1100 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1090 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1101 r_idx + 1); 1091 r_idx + 1);
@@ -1105,7 +1095,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1105 rx_ring = &(adapter->rx_ring[r_idx]); 1095 rx_ring = &(adapter->rx_ring[r_idx]);
1106 /* If all Rx work done, exit the polling mode */ 1096 /* If all Rx work done, exit the polling mode */
1107 if (work_done < budget) { 1097 if (work_done < budget) {
1108 netif_rx_complete(napi); 1098 napi_complete(napi);
1109 if (adapter->itr_setting & 3) 1099 if (adapter->itr_setting & 3)
1110 ixgbe_set_itr_msix(q_vector); 1100 ixgbe_set_itr_msix(q_vector);
1111 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1101 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1381,13 +1371,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1381 1371
1382 ixgbe_check_fan_failure(adapter, eicr); 1372 ixgbe_check_fan_failure(adapter, eicr);
1383 1373
1384 if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) { 1374 if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
1385 adapter->tx_ring[0].total_packets = 0; 1375 adapter->tx_ring[0].total_packets = 0;
1386 adapter->tx_ring[0].total_bytes = 0; 1376 adapter->tx_ring[0].total_bytes = 0;
1387 adapter->rx_ring[0].total_packets = 0; 1377 adapter->rx_ring[0].total_packets = 0;
1388 adapter->rx_ring[0].total_bytes = 0; 1378 adapter->rx_ring[0].total_bytes = 0;
1389 /* would disable interrupts here but EIAM disabled it */ 1379 /* would disable interrupts here but EIAM disabled it */
1390 __netif_rx_schedule(&adapter->q_vector[0].napi); 1380 __napi_schedule(&adapter->q_vector[0].napi);
1391 } 1381 }
1392 1382
1393 return IRQ_HANDLED; 1383 return IRQ_HANDLED;
@@ -1486,7 +1476,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1486 **/ 1476 **/
1487static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 1477static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1488{ 1478{
1489 u64 tdba, tdwba; 1479 u64 tdba;
1490 struct ixgbe_hw *hw = &adapter->hw; 1480 struct ixgbe_hw *hw = &adapter->hw;
1491 u32 i, j, tdlen, txctrl; 1481 u32 i, j, tdlen, txctrl;
1492 1482
@@ -1499,11 +1489,6 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1499 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 1489 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1500 (tdba & DMA_32BIT_MASK)); 1490 (tdba & DMA_32BIT_MASK));
1501 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 1491 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1502 tdwba = ring->dma +
1503 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1504 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1505 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1506 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
1507 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 1492 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1508 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 1493 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1509 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1494 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
@@ -1569,36 +1554,6 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1569} 1554}
1570 1555
1571/** 1556/**
1572 * ixgbe_get_skb_hdr - helper function for LRO header processing
1573 * @skb: pointer to sk_buff to be added to LRO packet
1574 * @iphdr: pointer to ip header structure
1575 * @tcph: pointer to tcp header structure
1576 * @hdr_flags: pointer to header flags
1577 * @priv: private data
1578 **/
1579static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1580 u64 *hdr_flags, void *priv)
1581{
1582 union ixgbe_adv_rx_desc *rx_desc = priv;
1583
1584 /* Verify that this is a valid IPv4 TCP packet */
1585 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1586 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1587 return -1;
1588
1589 /* Set network headers */
1590 skb_reset_network_header(skb);
1591 skb_set_transport_header(skb, ip_hdrlen(skb));
1592 *iphdr = ip_hdr(skb);
1593 *tcph = tcp_hdr(skb);
1594 *hdr_flags = LRO_IPV4 | LRO_TCP;
1595 return 0;
1596}
1597
1598#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1599 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1600
1601/**
1602 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 1557 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1603 * @adapter: board private structure 1558 * @adapter: board private structure
1604 * 1559 *
@@ -1616,7 +1571,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1616 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, 1571 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1617 0x6A3E67EA, 0x14364D17, 0x3BED200D}; 1572 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1618 u32 fctrl, hlreg0; 1573 u32 fctrl, hlreg0;
1619 u32 pages;
1620 u32 reta = 0, mrqc; 1574 u32 reta = 0, mrqc;
1621 u32 rdrxctl; 1575 u32 rdrxctl;
1622 int rx_buf_len; 1576 int rx_buf_len;
@@ -1646,8 +1600,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1646 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 1600 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1647 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 1601 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1648 1602
1649 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1650
1651 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1603 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1652 /* disable receives while setting up the descriptors */ 1604 /* disable receives while setting up the descriptors */
1653 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1605 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1666,16 +1618,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1666 adapter->rx_ring[i].head = IXGBE_RDH(j); 1618 adapter->rx_ring[i].head = IXGBE_RDH(j);
1667 adapter->rx_ring[i].tail = IXGBE_RDT(j); 1619 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1668 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1620 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1669 /* Intitial LRO Settings */
1670 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1671 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1672 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1673 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1674 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1675 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1676 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1677 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1678 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1679 1621
1680 ixgbe_configure_srrctl(adapter, j); 1622 ixgbe_configure_srrctl(adapter, j);
1681 } 1623 }
@@ -1993,11 +1935,43 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1993 (adapter->rx_ring[i].count - 1)); 1935 (adapter->rx_ring[i].count - 1));
1994} 1936}
1995 1937
1938/**
1939 * ixgbe_link_config - set up initial link with default speed and duplex
1940 * @hw: pointer to private hardware struct
1941 *
1942 * Returns 0 on success, negative on failure
1943 **/
1944static int ixgbe_link_config(struct ixgbe_hw *hw)
1945{
1946 u32 autoneg;
1947 bool link_up = false;
1948 u32 ret = IXGBE_ERR_LINK_SETUP;
1949
1950 if (hw->mac.ops.check_link)
1951 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1952
1953 if (ret)
1954 goto link_cfg_out;
1955
1956 if (hw->mac.ops.get_link_capabilities)
1957 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1958 &hw->mac.autoneg);
1959 if (ret)
1960 goto link_cfg_out;
1961
1962 if (hw->mac.ops.setup_link_speed)
1963 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
1964
1965link_cfg_out:
1966 return ret;
1967}
1968
1996static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1969static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1997{ 1970{
1998 struct net_device *netdev = adapter->netdev; 1971 struct net_device *netdev = adapter->netdev;
1999 struct ixgbe_hw *hw = &adapter->hw; 1972 struct ixgbe_hw *hw = &adapter->hw;
2000 int i, j = 0; 1973 int i, j = 0;
1974 int err;
2001 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1975 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2002 u32 txdctl, rxdctl, mhadd; 1976 u32 txdctl, rxdctl, mhadd;
2003 u32 gpie; 1977 u32 gpie;
@@ -2078,6 +2052,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2078 2052
2079 ixgbe_irq_enable(adapter); 2053 ixgbe_irq_enable(adapter);
2080 2054
2055 err = ixgbe_link_config(hw);
2056 if (err)
2057 dev_err(&adapter->pdev->dev, "link_config FAILED %d\n", err);
2058
2081 /* enable transmits */ 2059 /* enable transmits */
2082 netif_tx_start_all_queues(netdev); 2060 netif_tx_start_all_queues(netdev);
2083 2061
@@ -2310,14 +2288,14 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2310#endif 2288#endif
2311 2289
2312 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2290 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2313 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); 2291 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
2314 2292
2315 if (tx_cleaned) 2293 if (tx_cleaned)
2316 work_done = budget; 2294 work_done = budget;
2317 2295
2318 /* If budget not fully consumed, exit the polling mode */ 2296 /* If budget not fully consumed, exit the polling mode */
2319 if (work_done < budget) { 2297 if (work_done < budget) {
2320 netif_rx_complete(napi); 2298 napi_complete(napi);
2321 if (adapter->itr_setting & 3) 2299 if (adapter->itr_setting & 3)
2322 ixgbe_set_itr(adapter); 2300 ixgbe_set_itr(adapter);
2323 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2301 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2353,68 +2331,57 @@ static void ixgbe_reset_task(struct work_struct *work)
2353 ixgbe_reinit_locked(adapter); 2331 ixgbe_reinit_locked(adapter);
2354} 2332}
2355 2333
2356static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2334#ifdef CONFIG_IXGBE_DCB
2335static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
2357{ 2336{
2358 int nrq = 1, ntq = 1; 2337 bool ret = false;
2359 int feature_mask = 0, rss_i, rss_m;
2360 int dcb_i, dcb_m;
2361 2338
2362 /* Number of supported queues */ 2339 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2363 switch (adapter->hw.mac.type) { 2340 adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3;
2364 case ixgbe_mac_82598EB: 2341 adapter->num_rx_queues =
2365 dcb_i = adapter->ring_feature[RING_F_DCB].indices; 2342 adapter->ring_feature[RING_F_DCB].indices;
2366 dcb_m = 0; 2343 adapter->num_tx_queues =
2367 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2344 adapter->ring_feature[RING_F_DCB].indices;
2368 rss_m = 0; 2345 ret = true;
2369 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2346 } else {
2370 feature_mask |= IXGBE_FLAG_DCB_ENABLED; 2347 ret = false;
2371 2348 }
2372 switch (adapter->flags & feature_mask) { 2349
2373 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): 2350 return ret;
2374 dcb_m = 0x7 << 3; 2351}
2375 rss_i = min(8, rss_i); 2352#endif
2376 rss_m = 0x7;
2377 nrq = dcb_i * rss_i;
2378 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2379 break;
2380 case (IXGBE_FLAG_DCB_ENABLED):
2381 dcb_m = 0x7 << 3;
2382 nrq = dcb_i;
2383 ntq = dcb_i;
2384 break;
2385 case (IXGBE_FLAG_RSS_ENABLED):
2386 rss_m = 0xF;
2387 nrq = rss_i;
2388 ntq = rss_i;
2389 break;
2390 case 0:
2391 default:
2392 dcb_i = 0;
2393 dcb_m = 0;
2394 rss_i = 0;
2395 rss_m = 0;
2396 nrq = 1;
2397 ntq = 1;
2398 break;
2399 }
2400 2353
2401 /* Sanity check, we should never have zero queues */ 2354static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2402 nrq = (nrq ?:1); 2355{
2403 ntq = (ntq ?:1); 2356 bool ret = false;
2404 2357
2405 adapter->ring_feature[RING_F_DCB].indices = dcb_i; 2358 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2406 adapter->ring_feature[RING_F_DCB].mask = dcb_m; 2359 adapter->ring_feature[RING_F_RSS].mask = 0xF;
2407 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2360 adapter->num_rx_queues =
2408 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2361 adapter->ring_feature[RING_F_RSS].indices;
2409 break; 2362 adapter->num_tx_queues =
2410 default: 2363 adapter->ring_feature[RING_F_RSS].indices;
2411 nrq = 1; 2364 ret = true;
2412 ntq = 1; 2365 } else {
2413 break; 2366 ret = false;
2414 } 2367 }
2415 2368
2416 adapter->num_rx_queues = nrq; 2369 return ret;
2417 adapter->num_tx_queues = ntq; 2370}
2371
2372static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2373{
2374 /* Start with base case */
2375 adapter->num_rx_queues = 1;
2376 adapter->num_tx_queues = 1;
2377
2378#ifdef CONFIG_IXGBE_DCB
2379 if (ixgbe_set_dcb_queues(adapter))
2380 return;
2381
2382#endif
2383 if (ixgbe_set_rss_queues(adapter))
2384 return;
2418} 2385}
2419 2386
2420static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2387static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -2460,71 +2427,98 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2460 ixgbe_set_num_queues(adapter); 2427 ixgbe_set_num_queues(adapter);
2461 } else { 2428 } else {
2462 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2429 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2463 adapter->num_msix_vectors = vectors; 2430 /*
2431 * Adjust for only the vectors we'll use, which is minimum
2432 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2433 * vectors we were allocated.
2434 */
2435 adapter->num_msix_vectors = min(vectors,
2436 adapter->max_msix_q_vectors + NON_Q_VECTORS);
2464 } 2437 }
2465} 2438}
2466 2439
2467/** 2440/**
2468 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2441 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
2469 * @adapter: board private structure to initialize 2442 * @adapter: board private structure to initialize
2470 * 2443 *
2471 * Once we know the feature-set enabled for the device, we'll cache 2444 * Cache the descriptor ring offsets for RSS to the assigned rings.
2472 * the register offset the descriptor ring is assigned to. 2445 *
2473 **/ 2446 **/
2474static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2447static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
2475{ 2448{
2476 int feature_mask = 0, rss_i; 2449 int i;
2477 int i, txr_idx, rxr_idx; 2450 bool ret = false;
2478 int dcb_i;
2479 2451
2480 /* Number of supported queues */ 2452 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2481 switch (adapter->hw.mac.type) { 2453 for (i = 0; i < adapter->num_rx_queues; i++)
2482 case ixgbe_mac_82598EB: 2454 adapter->rx_ring[i].reg_idx = i;
2483 dcb_i = adapter->ring_feature[RING_F_DCB].indices; 2455 for (i = 0; i < adapter->num_tx_queues; i++)
2484 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2456 adapter->tx_ring[i].reg_idx = i;
2485 txr_idx = 0; 2457 ret = true;
2486 rxr_idx = 0; 2458 } else {
2487 feature_mask |= IXGBE_FLAG_DCB_ENABLED; 2459 ret = false;
2488 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2460 }
2489 switch (adapter->flags & feature_mask) { 2461
2490 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): 2462 return ret;
2491 for (i = 0; i < dcb_i; i++) { 2463}
2492 int j; 2464
2493 /* Rx first */ 2465#ifdef CONFIG_IXGBE_DCB
2494 for (j = 0; j < adapter->num_rx_queues; j++) { 2466/**
2495 adapter->rx_ring[rxr_idx].reg_idx = 2467 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
2496 i << 3 | j; 2468 * @adapter: board private structure to initialize
2497 rxr_idx++; 2469 *
2498 } 2470 * Cache the descriptor ring offsets for DCB to the assigned rings.
2499 /* Tx now */ 2471 *
2500 for (j = 0; j < adapter->num_tx_queues; j++) { 2472 **/
2501 adapter->tx_ring[txr_idx].reg_idx = 2473static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
2502 i << 2 | (j >> 1); 2474{
2503 if (j & 1) 2475 int i;
2504 txr_idx++; 2476 bool ret = false;
2505 } 2477 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2506 } 2478
2507 case (IXGBE_FLAG_DCB_ENABLED): 2479 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2480 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2508 /* the number of queues is assumed to be symmetric */ 2481 /* the number of queues is assumed to be symmetric */
2509 for (i = 0; i < dcb_i; i++) { 2482 for (i = 0; i < dcb_i; i++) {
2510 adapter->rx_ring[i].reg_idx = i << 3; 2483 adapter->rx_ring[i].reg_idx = i << 3;
2511 adapter->tx_ring[i].reg_idx = i << 2; 2484 adapter->tx_ring[i].reg_idx = i << 2;
2512 } 2485 }
2513 break; 2486 ret = true;
2514 case (IXGBE_FLAG_RSS_ENABLED): 2487 } else {
2515 for (i = 0; i < adapter->num_rx_queues; i++) 2488 ret = false;
2516 adapter->rx_ring[i].reg_idx = i;
2517 for (i = 0; i < adapter->num_tx_queues; i++)
2518 adapter->tx_ring[i].reg_idx = i;
2519 break;
2520 case 0:
2521 default:
2522 break;
2523 } 2489 }
2524 break; 2490 } else {
2525 default: 2491 ret = false;
2526 break;
2527 } 2492 }
2493
2494 return ret;
2495}
2496#endif
2497
2498/**
2499 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2500 * @adapter: board private structure to initialize
2501 *
2502 * Once we know the feature-set enabled for the device, we'll cache
2503 * the register offset the descriptor ring is assigned to.
2504 *
2505 * Note, the order the various feature calls is important. It must start with
2506 * the "most" features enabled at the same time, then trickle down to the
2507 * least amount of features turned on at once.
2508 **/
2509static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2510{
2511 /* start with default case */
2512 adapter->rx_ring[0].reg_idx = 0;
2513 adapter->tx_ring[0].reg_idx = 0;
2514
2515#ifdef CONFIG_IXGBE_DCB
2516 if (ixgbe_cache_ring_dcb(adapter))
2517 return;
2518
2519#endif
2520 if (ixgbe_cache_ring_rss(adapter))
2521 return;
2528} 2522}
2529 2523
2530/** 2524/**
@@ -2785,6 +2779,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2785 adapter->ring_feature[RING_F_RSS].indices = rss; 2779 adapter->ring_feature[RING_F_RSS].indices = rss;
2786 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2780 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2787 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 2781 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2782 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
2788 2783
2789#ifdef CONFIG_IXGBE_DCB 2784#ifdef CONFIG_IXGBE_DCB
2790 /* Configure DCB traffic classes */ 2785 /* Configure DCB traffic classes */
@@ -2810,16 +2805,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2810 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 2805 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
2811 2806
2812 /* default flow control settings */ 2807 /* default flow control settings */
2813 hw->fc.original_type = ixgbe_fc_none; 2808 hw->fc.requested_mode = ixgbe_fc_none;
2814 hw->fc.type = ixgbe_fc_none;
2815 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 2809 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2816 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 2810 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2817 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 2811 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2818 hw->fc.send_xon = true; 2812 hw->fc.send_xon = true;
2819 2813
2820 /* select 10G link by default */
2821 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2822
2823 /* enable itr by default in dynamic mode */ 2814 /* enable itr by default in dynamic mode */
2824 adapter->itr_setting = 1; 2815 adapter->itr_setting = 1;
2825 adapter->eitr_param = 20000; 2816 adapter->eitr_param = 20000;
@@ -2866,8 +2857,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2866 memset(tx_ring->tx_buffer_info, 0, size); 2857 memset(tx_ring->tx_buffer_info, 0, size);
2867 2858
2868 /* round up to nearest 4K */ 2859 /* round up to nearest 4K */
2869 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) + 2860 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2870 sizeof(u32);
2871 tx_ring->size = ALIGN(tx_ring->size, 4096); 2861 tx_ring->size = ALIGN(tx_ring->size, 4096);
2872 2862
2873 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2863 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -2926,12 +2916,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2926 struct pci_dev *pdev = adapter->pdev; 2916 struct pci_dev *pdev = adapter->pdev;
2927 int size; 2917 int size;
2928 2918
2929 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2930 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2931 if (!rx_ring->lro_mgr.lro_arr)
2932 return -ENOMEM;
2933 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2934
2935 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2919 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2936 rx_ring->rx_buffer_info = vmalloc(size); 2920 rx_ring->rx_buffer_info = vmalloc(size);
2937 if (!rx_ring->rx_buffer_info) { 2921 if (!rx_ring->rx_buffer_info) {
@@ -2960,8 +2944,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2960 return 0; 2944 return 0;
2961 2945
2962alloc_failed: 2946alloc_failed:
2963 vfree(rx_ring->lro_mgr.lro_arr);
2964 rx_ring->lro_mgr.lro_arr = NULL;
2965 return -ENOMEM; 2947 return -ENOMEM;
2966} 2948}
2967 2949
@@ -3039,9 +3021,6 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
3039{ 3021{
3040 struct pci_dev *pdev = adapter->pdev; 3022 struct pci_dev *pdev = adapter->pdev;
3041 3023
3042 vfree(rx_ring->lro_mgr.lro_arr);
3043 rx_ring->lro_mgr.lro_arr = NULL;
3044
3045 ixgbe_clean_rx_ring(adapter, rx_ring); 3024 ixgbe_clean_rx_ring(adapter, rx_ring);
3046 3025
3047 vfree(rx_ring->rx_buffer_info); 3026 vfree(rx_ring->rx_buffer_info);
@@ -3619,13 +3598,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3619 3598
3620 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3599 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3621 switch (skb->protocol) { 3600 switch (skb->protocol) {
3622 case __constant_htons(ETH_P_IP): 3601 case cpu_to_be16(ETH_P_IP):
3623 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3602 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3624 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3603 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3625 type_tucmd_mlhl |= 3604 type_tucmd_mlhl |=
3626 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3605 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3627 break; 3606 break;
3628 case __constant_htons(ETH_P_IPV6): 3607 case cpu_to_be16(ETH_P_IPV6):
3629 /* XXX what about other V6 headers?? */ 3608 /* XXX what about other V6 headers?? */
3630 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3609 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3631 type_tucmd_mlhl |= 3610 type_tucmd_mlhl |=
@@ -3948,26 +3927,6 @@ static void ixgbe_netpoll(struct net_device *netdev)
3948} 3927}
3949#endif 3928#endif
3950 3929
3951/**
3952 * ixgbe_link_config - set up initial link with default speed and duplex
3953 * @hw: pointer to private hardware struct
3954 *
3955 * Returns 0 on success, negative on failure
3956 **/
3957static int ixgbe_link_config(struct ixgbe_hw *hw)
3958{
3959 u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
3960
3961 /* must always autoneg for both 1G and 10G link */
3962 hw->mac.autoneg = true;
3963
3964 if ((hw->mac.type == ixgbe_mac_82598EB) &&
3965 (hw->phy.media_type == ixgbe_media_type_copper))
3966 autoneg = IXGBE_LINK_SPEED_82598_AUTONEG;
3967
3968 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3969}
3970
3971static const struct net_device_ops ixgbe_netdev_ops = { 3930static const struct net_device_ops ixgbe_netdev_ops = {
3972 .ndo_open = ixgbe_open, 3931 .ndo_open = ixgbe_open,
3973 .ndo_stop = ixgbe_close, 3932 .ndo_stop = ixgbe_close,
@@ -4141,7 +4100,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4141 netdev->features |= NETIF_F_IPV6_CSUM; 4100 netdev->features |= NETIF_F_IPV6_CSUM;
4142 netdev->features |= NETIF_F_TSO; 4101 netdev->features |= NETIF_F_TSO;
4143 netdev->features |= NETIF_F_TSO6; 4102 netdev->features |= NETIF_F_TSO6;
4144 netdev->features |= NETIF_F_LRO; 4103 netdev->features |= NETIF_F_GRO;
4145 4104
4146 netdev->vlan_features |= NETIF_F_TSO; 4105 netdev->vlan_features |= NETIF_F_TSO;
4147 netdev->vlan_features |= NETIF_F_TSO6; 4106 netdev->vlan_features |= NETIF_F_TSO6;
@@ -4212,16 +4171,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4212 "PCI-Express slot is required.\n"); 4171 "PCI-Express slot is required.\n");
4213 } 4172 }
4214 4173
4174 /* save off EEPROM version number */
4175 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
4176
4215 /* reset the hardware with the new settings */ 4177 /* reset the hardware with the new settings */
4216 hw->mac.ops.start_hw(hw); 4178 hw->mac.ops.start_hw(hw);
4217 4179
4218 /* link_config depends on start_hw being called at least once */
4219 err = ixgbe_link_config(hw);
4220 if (err) {
4221 dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
4222 goto err_register;
4223 }
4224
4225 netif_carrier_off(netdev); 4180 netif_carrier_off(netdev);
4226 4181
4227 strcpy(netdev->name, "eth%d"); 4182 strcpy(netdev->name, "eth%d");
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 5a8669aedf6..77ec26f5650 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 43a97bc420f..539a3061eb2 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index f011c57c920..237c688f8b6 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,8 @@
34#define IXGBE_INTEL_VENDOR_ID 0x8086 34#define IXGBE_INTEL_VENDOR_ID 0x8086
35 35
36/* Device IDs */ 36/* Device IDs */
37#define IXGBE_DEV_ID_82598 0x10B6
38#define IXGBE_DEV_ID_82598_BX 0x1508
37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 39#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 40#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
39#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB 41#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
@@ -719,6 +721,7 @@
719#define IXGBE_LED_OFF 0xF 721#define IXGBE_LED_OFF 0xF
720 722
721/* AUTOC Bit Masks */ 723/* AUTOC Bit Masks */
724#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
722#define IXGBE_AUTOC_KX4_SUPP 0x80000000 725#define IXGBE_AUTOC_KX4_SUPP 0x80000000
723#define IXGBE_AUTOC_KX_SUPP 0x40000000 726#define IXGBE_AUTOC_KX_SUPP 0x40000000
724#define IXGBE_AUTOC_PAUSE 0x30000000 727#define IXGBE_AUTOC_PAUSE 0x30000000
@@ -768,6 +771,28 @@
768#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 771#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
769#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 772#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
770 773
774#define FIBER_LINK_UP_LIMIT 50
775
776/* PCS1GLSTA Bit Masks */
777#define IXGBE_PCS1GLSTA_LINK_OK 1
778#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
779#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
780#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
781#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
782#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
783#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
784
785#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
786#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
787
788/* PCS1GLCTL Bit Masks */
789#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
790#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
791#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
792#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
793#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
794#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
795
771/* SW Semaphore Register bitmasks */ 796/* SW Semaphore Register bitmasks */
772#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 797#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
773#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 798#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -819,6 +844,10 @@
819#define IXGBE_FW_PTR 0x0F 844#define IXGBE_FW_PTR 0x0F
820#define IXGBE_PBANUM0_PTR 0x15 845#define IXGBE_PBANUM0_PTR 0x15
821#define IXGBE_PBANUM1_PTR 0x16 846#define IXGBE_PBANUM1_PTR 0x16
847#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
848
849/* MSI-X capability fields masks */
850#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
822 851
823/* Legacy EEPROM word offsets */ 852/* Legacy EEPROM word offsets */
824#define IXGBE_ISCSI_BOOT_CAPS 0x0033 853#define IXGBE_ISCSI_BOOT_CAPS 0x0033
@@ -1263,7 +1292,7 @@ enum ixgbe_media_type {
1263}; 1292};
1264 1293
1265/* Flow Control Settings */ 1294/* Flow Control Settings */
1266enum ixgbe_fc_type { 1295enum ixgbe_fc_mode {
1267 ixgbe_fc_none = 0, 1296 ixgbe_fc_none = 0,
1268 ixgbe_fc_rx_pause, 1297 ixgbe_fc_rx_pause,
1269 ixgbe_fc_tx_pause, 1298 ixgbe_fc_tx_pause,
@@ -1287,8 +1316,8 @@ struct ixgbe_fc_info {
1287 u16 pause_time; /* Flow Control Pause timer */ 1316 u16 pause_time; /* Flow Control Pause timer */
1288 bool send_xon; /* Flow control send XON */ 1317 bool send_xon; /* Flow control send XON */
1289 bool strict_ieee; /* Strict IEEE mode */ 1318 bool strict_ieee; /* Strict IEEE mode */
1290 enum ixgbe_fc_type type; /* Type of flow control */ 1319 enum ixgbe_fc_mode current_mode; /* FC mode in effect */
1291 enum ixgbe_fc_type original_type; 1320 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
1292}; 1321};
1293 1322
1294/* Statistics counters collected by the MAC */ 1323/* Statistics counters collected by the MAC */
@@ -1449,11 +1478,12 @@ struct ixgbe_mac_info {
1449 u32 num_rar_entries; 1478 u32 num_rar_entries;
1450 u32 max_tx_queues; 1479 u32 max_tx_queues;
1451 u32 max_rx_queues; 1480 u32 max_rx_queues;
1452 u32 link_attach_type; 1481 u32 max_msix_vectors;
1453 u32 link_mode_select; 1482 u32 orig_autoc;
1454 bool link_settings_loaded; 1483 u32 orig_autoc2;
1484 bool orig_link_settings_stored;
1455 bool autoneg; 1485 bool autoneg;
1456 bool autoneg_failed; 1486 bool autoneg_succeeded;
1457}; 1487};
1458 1488
1459struct ixgbe_phy_info { 1489struct ixgbe_phy_info {
@@ -1467,6 +1497,7 @@ struct ixgbe_phy_info {
1467 bool reset_disable; 1497 bool reset_disable;
1468 ixgbe_autoneg_advertised autoneg_advertised; 1498 ixgbe_autoneg_advertised autoneg_advertised;
1469 bool autoneg_wait_to_complete; 1499 bool autoneg_wait_to_complete;
1500 bool multispeed_fiber;
1470}; 1501};
1471 1502
1472struct ixgbe_hw { 1503struct ixgbe_hw {
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 01474572056..d3bf2f017cc 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
141 break; 141 break;
142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); 142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
143 143
144 netif_rx_complete(napi); 144 napi_complete(napi);
145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); 145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
146 146
147 return rx; 147 return rx;
@@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
204 204
205 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); 205 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
206 if (likely(napi_schedule_prep(&ip->napi))) { 206 if (likely(napi_schedule_prep(&ip->napi))) {
207 __netif_rx_schedule(&ip->napi); 207 __napi_schedule(&ip->napi);
208 } else { 208 } else {
209 printk(KERN_CRIT "ixp2000: irq while polling!!\n"); 209 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
210 } 210 }
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 334ff9e12cd..14248cfc3df 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -131,7 +131,8 @@ static int __init sonic_probe1(struct net_device *dev)
131 if (sonic_debug && version_printed++ == 0) 131 if (sonic_debug && version_printed++ == 0)
132 printk(version); 132 printk(version);
133 133
134 printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr); 134 printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
135 dev_name(lp->device), dev->base_addr);
135 136
136 /* 137 /*
137 * Put the sonic into software reset, then 138 * Put the sonic into software reset, then
@@ -156,7 +157,8 @@ static int __init sonic_probe1(struct net_device *dev)
156 if ((lp->descriptors = dma_alloc_coherent(lp->device, 157 if ((lp->descriptors = dma_alloc_coherent(lp->device,
157 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 158 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
158 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 159 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
159 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); 160 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
161 dev_name(lp->device));
160 goto out; 162 goto out;
161 } 163 }
162 164
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 5154411b5e6..e321c678b11 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -398,15 +398,15 @@ struct jme_ring {
398#define JME_NAPI_WEIGHT(w) int w 398#define JME_NAPI_WEIGHT(w) int w
399#define JME_NAPI_WEIGHT_VAL(w) w 399#define JME_NAPI_WEIGHT_VAL(w) w
400#define JME_NAPI_WEIGHT_SET(w, r) 400#define JME_NAPI_WEIGHT_SET(w, r)
401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis) 401#define JME_RX_COMPLETE(dev, napis) napi_complete(napis)
402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); 402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
403#define JME_NAPI_DISABLE(priv) \ 403#define JME_NAPI_DISABLE(priv) \
404 if (!napi_disable_pending(&priv->napi)) \ 404 if (!napi_disable_pending(&priv->napi)) \
405 napi_disable(&priv->napi); 405 napi_disable(&priv->napi);
406#define JME_RX_SCHEDULE_PREP(priv) \ 406#define JME_RX_SCHEDULE_PREP(priv) \
407 netif_rx_schedule_prep(&priv->napi) 407 napi_schedule_prep(&priv->napi)
408#define JME_RX_SCHEDULE(priv) \ 408#define JME_RX_SCHEDULE(priv) \
409 __netif_rx_schedule(&priv->napi); 409 __napi_schedule(&priv->napi);
410 410
411/* 411/*
412 * Jmac Adapter Private data 412 * Jmac Adapter Private data
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 75010cac76a..38d6649a29c 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -334,7 +334,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
334 DMA_STAT_HALT | DMA_STAT_ERR), 334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm); 335 &lp->rx_dma_regs->dmasm);
336 336
337 netif_rx_schedule(&lp->napi); 337 napi_schedule(&lp->napi);
338 338
339 if (dmas & DMA_STAT_ERR) 339 if (dmas & DMA_STAT_ERR)
340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); 340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
@@ -468,7 +468,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
468 468
469 work_done = korina_rx(dev, budget); 469 work_done = korina_rx(dev, budget);
470 if (work_done < budget) { 470 if (work_done < budget) {
471 netif_rx_complete(napi); 471 napi_complete(napi);
472 472
473 writel(readl(&lp->rx_dma_regs->dmasm) & 473 writel(readl(&lp->rx_dma_regs->dmasm) &
474 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), 474 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f6c4936e2fa..872c1bdf42b 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -211,10 +211,10 @@ static int macb_mii_probe(struct net_device *dev)
211 211
212 /* attach the mac to the phy */ 212 /* attach the mac to the phy */
213 if (pdata && pdata->is_rmii) { 213 if (pdata && pdata->is_rmii) {
214 phydev = phy_connect(dev, phydev->dev.bus_id, 214 phydev = phy_connect(dev, dev_name(&phydev->dev),
215 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); 215 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
216 } else { 216 } else {
217 phydev = phy_connect(dev, phydev->dev.bus_id, 217 phydev = phy_connect(dev, dev_name(&phydev->dev),
218 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); 218 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
219 } 219 }
220 220
@@ -527,7 +527,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
527 * this function was called last time, and no packets 527 * this function was called last time, and no packets
528 * have been received since. 528 * have been received since.
529 */ 529 */
530 netif_rx_complete(napi); 530 napi_complete(napi);
531 goto out; 531 goto out;
532 } 532 }
533 533
@@ -538,13 +538,13 @@ static int macb_poll(struct napi_struct *napi, int budget)
538 dev_warn(&bp->pdev->dev, 538 dev_warn(&bp->pdev->dev,
539 "No RX buffers complete, status = %02lx\n", 539 "No RX buffers complete, status = %02lx\n",
540 (unsigned long)status); 540 (unsigned long)status);
541 netif_rx_complete(napi); 541 napi_complete(napi);
542 goto out; 542 goto out;
543 } 543 }
544 544
545 work_done = macb_rx(bp, budget); 545 work_done = macb_rx(bp, budget);
546 if (work_done < budget) 546 if (work_done < budget)
547 netif_rx_complete(napi); 547 napi_complete(napi);
548 548
549 /* 549 /*
550 * We've done what we can to clean the buffers. Make sure we 550 * We've done what we can to clean the buffers. Make sure we
@@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
579 } 579 }
580 580
581 if (status & MACB_RX_INT_FLAGS) { 581 if (status & MACB_RX_INT_FLAGS) {
582 if (netif_rx_schedule_prep(&bp->napi)) { 582 if (napi_schedule_prep(&bp->napi)) {
583 /* 583 /*
584 * There's no point taking any more interrupts 584 * There's no point taking any more interrupts
585 * until we have processed the buffers 585 * until we have processed the buffers
@@ -587,7 +587,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
587 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 587 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
588 dev_dbg(&bp->pdev->dev, 588 dev_dbg(&bp->pdev->dev,
589 "scheduling RX softirq\n"); 589 "scheduling RX softirq\n");
590 __netif_rx_schedule(&bp->napi); 590 __napi_schedule(&bp->napi);
591 } 591 }
592 } 592 }
593 593
@@ -1077,7 +1077,7 @@ static void macb_get_drvinfo(struct net_device *dev,
1077 1077
1078 strcpy(info->driver, bp->pdev->dev.driver->name); 1078 strcpy(info->driver, bp->pdev->dev.driver->name);
1079 strcpy(info->version, "$Revision: 1.14 $"); 1079 strcpy(info->version, "$Revision: 1.14 $");
1080 strcpy(info->bus_info, bp->pdev->dev.bus_id); 1080 strcpy(info->bus_info, dev_name(&bp->pdev->dev));
1081} 1081}
1082 1082
1083static struct ethtool_ops macb_ethtool_ops = { 1083static struct ethtool_ops macb_ethtool_ops = {
@@ -1234,8 +1234,8 @@ static int __init macb_probe(struct platform_device *pdev)
1234 1234
1235 phydev = bp->phy_dev; 1235 phydev = bp->phy_dev;
1236 printk(KERN_INFO "%s: attached PHY driver [%s] " 1236 printk(KERN_INFO "%s: attached PHY driver [%s] "
1237 "(mii_bus:phy_addr=%s, irq=%d)\n", 1237 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
1238 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 1238 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1239 1239
1240 return 0; 1240 return 0;
1241 1241
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 205bb05c25d..527166e35d5 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -176,7 +176,8 @@ static int __init macsonic_init(struct net_device *dev)
176 if ((lp->descriptors = dma_alloc_coherent(lp->device, 176 if ((lp->descriptors = dma_alloc_coherent(lp->device,
177 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 177 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
178 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 178 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
179 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); 179 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
180 dev_name(lp->device));
180 return -ENOMEM; 181 return -ENOMEM;
181 } 182 }
182 183
@@ -337,7 +338,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev)
337 sonic_version_printed = 1; 338 sonic_version_printed = 1;
338 } 339 }
339 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", 340 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
340 lp->device->bus_id, dev->base_addr); 341 dev_name(lp->device), dev->base_addr);
341 342
342 /* The PowerBook's SONIC is 16 bit always. */ 343 /* The PowerBook's SONIC is 16 bit always. */
343 if (macintosh_config->ident == MAC_MODEL_PB520) { 344 if (macintosh_config->ident == MAC_MODEL_PB520) {
@@ -370,10 +371,10 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev)
370 } 371 }
371 printk(KERN_INFO 372 printk(KERN_INFO
372 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 373 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
373 lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset); 374 dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset);
374 375
375#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ 376#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
376 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, 377 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
377 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); 378 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
378#endif 379#endif
379 380
@@ -525,12 +526,12 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev)
525 sonic_version_printed = 1; 526 sonic_version_printed = 1;
526 } 527 }
527 printk(KERN_INFO "%s: %s in slot %X\n", 528 printk(KERN_INFO "%s: %s in slot %X\n",
528 lp->device->bus_id, ndev->board->name, ndev->board->slot); 529 dev_name(lp->device), ndev->board->name, ndev->board->slot);
529 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 530 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
530 lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); 531 dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
531 532
532#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ 533#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
533 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, 534 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
534 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); 535 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
535#endif 536#endif
536 537
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index c61b0bdca1a..a4130e76499 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -768,6 +768,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
768 768
769 skb->ip_summed = ip_summed; 769 skb->ip_summed = ip_summed;
770 skb->protocol = eth_type_trans(skb, dev); 770 skb->protocol = eth_type_trans(skb, dev);
771 skb_record_rx_queue(skb, cq->ring);
771 772
772 /* Push it up the stack */ 773 /* Push it up the stack */
773 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & 774 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
@@ -814,7 +815,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
814 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 815 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
815 816
816 if (priv->port_up) 817 if (priv->port_up)
817 netif_rx_schedule(&cq->napi); 818 napi_schedule(&cq->napi);
818 else 819 else
819 mlx4_en_arm_cq(priv, cq); 820 mlx4_en_arm_cq(priv, cq);
820} 821}
@@ -834,7 +835,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
834 INC_PERF_COUNTER(priv->pstats.napi_quota); 835 INC_PERF_COUNTER(priv->pstats.napi_quota);
835 else { 836 else {
836 /* Done for now */ 837 /* Done for now */
837 netif_rx_complete(napi); 838 napi_complete(napi);
838 mlx4_en_arm_cq(priv, cq); 839 mlx4_en_arm_cq(priv, cq);
839 } 840 }
840 return done; 841 return done;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 5f31bbb614a..8fab31f631a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2589,7 +2589,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2589 2589
2590 phy_reset(mp); 2590 phy_reset(mp);
2591 2591
2592 phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII); 2592 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
2593 2593
2594 if (speed == 0) { 2594 if (speed == 0) {
2595 phy->autoneg = AUTONEG_ENABLE; 2595 phy->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e9c1296b267..aea9fdaa3cd 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1324,6 +1324,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1324 skb_shinfo(skb)->nr_frags = 0; 1324 skb_shinfo(skb)->nr_frags = 0;
1325 } 1325 }
1326 skb->protocol = eth_type_trans(skb, dev); 1326 skb->protocol = eth_type_trans(skb, dev);
1327 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1327 1328
1328 if (mgp->csum_flag) { 1329 if (mgp->csum_flag) {
1329 if ((skb->protocol == htons(ETH_P_IP)) || 1330 if ((skb->protocol == htons(ETH_P_IP)) ||
@@ -1514,7 +1515,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1514 work_done = myri10ge_clean_rx_done(ss, budget); 1515 work_done = myri10ge_clean_rx_done(ss, budget);
1515 1516
1516 if (work_done < budget) { 1517 if (work_done < budget) {
1517 netif_rx_complete(napi); 1518 napi_complete(napi);
1518 put_be32(htonl(3), ss->irq_claim); 1519 put_be32(htonl(3), ss->irq_claim);
1519 } 1520 }
1520 return work_done; 1521 return work_done;
@@ -1532,7 +1533,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1532 /* an interrupt on a non-zero receive-only slice is implicitly 1533 /* an interrupt on a non-zero receive-only slice is implicitly
1533 * valid since MSI-X irqs are not shared */ 1534 * valid since MSI-X irqs are not shared */
1534 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1535 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1535 netif_rx_schedule(&ss->napi); 1536 napi_schedule(&ss->napi);
1536 return (IRQ_HANDLED); 1537 return (IRQ_HANDLED);
1537 } 1538 }
1538 1539
@@ -1543,7 +1544,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1543 /* low bit indicates receives are present, so schedule 1544 /* low bit indicates receives are present, so schedule
1544 * napi poll handler */ 1545 * napi poll handler */
1545 if (stats->valid & 1) 1546 if (stats->valid & 1)
1546 netif_rx_schedule(&ss->napi); 1547 napi_schedule(&ss->napi);
1547 1548
1548 if (!mgp->msi_enabled && !mgp->msix_enabled) { 1549 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1549 put_be32(0, mgp->irq_deassert); 1550 put_be32(0, mgp->irq_deassert);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 899ed065a14..88b52883ace 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -748,7 +748,7 @@ static int myri_rebuild_header(struct sk_buff *skb)
748 switch (eth->h_proto) 748 switch (eth->h_proto)
749 { 749 {
750#ifdef CONFIG_INET 750#ifdef CONFIG_INET
751 case __constant_htons(ETH_P_IP): 751 case cpu_to_be16(ETH_P_IP):
752 return arp_find(eth->h_dest, skb); 752 return arp_find(eth->h_dest, skb);
753#endif 753#endif
754 754
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c5dec54251b..c23a58624a3 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2198,10 +2198,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
2198 2198
2199 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); 2199 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2200 2200
2201 if (netif_rx_schedule_prep(&np->napi)) { 2201 if (napi_schedule_prep(&np->napi)) {
2202 /* Disable interrupts and register for poll */ 2202 /* Disable interrupts and register for poll */
2203 natsemi_irq_disable(dev); 2203 natsemi_irq_disable(dev);
2204 __netif_rx_schedule(&np->napi); 2204 __napi_schedule(&np->napi);
2205 } else 2205 } else
2206 printk(KERN_WARNING 2206 printk(KERN_WARNING
2207 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", 2207 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
@@ -2253,7 +2253,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
2253 np->intr_status = readl(ioaddr + IntrStatus); 2253 np->intr_status = readl(ioaddr + IntrStatus);
2254 } while (np->intr_status); 2254 } while (np->intr_status);
2255 2255
2256 netif_rx_complete(napi); 2256 napi_complete(napi);
2257 2257
2258 /* Reenable interrupts providing nothing is trying to shut 2258 /* Reenable interrupts providing nothing is trying to shut
2259 * the chip down. */ 2259 * the chip down. */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f33e442f40..4fe20ecdbc6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1109,7 +1109,7 @@ static bool netxen_tso_check(struct net_device *netdev,
1109 __be16 protocol = skb->protocol; 1109 __be16 protocol = skb->protocol;
1110 u16 flags = 0; 1110 u16 flags = 0;
1111 1111
1112 if (protocol == __constant_htons(ETH_P_8021Q)) { 1112 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1113 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data; 1113 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1114 protocol = vh->h_vlan_encapsulated_proto; 1114 protocol = vh->h_vlan_encapsulated_proto;
1115 flags = FLAGS_VLAN_TAGGED; 1115 flags = FLAGS_VLAN_TAGGED;
@@ -1122,21 +1122,21 @@ static bool netxen_tso_check(struct net_device *netdev,
1122 desc->total_hdr_length = 1122 desc->total_hdr_length =
1123 skb_transport_offset(skb) + tcp_hdrlen(skb); 1123 skb_transport_offset(skb) + tcp_hdrlen(skb);
1124 1124
1125 opcode = (protocol == __constant_htons(ETH_P_IPV6)) ? 1125 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1126 TX_TCP_LSO6 : TX_TCP_LSO; 1126 TX_TCP_LSO6 : TX_TCP_LSO;
1127 tso = true; 1127 tso = true;
1128 1128
1129 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1129 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1130 u8 l4proto; 1130 u8 l4proto;
1131 1131
1132 if (protocol == __constant_htons(ETH_P_IP)) { 1132 if (protocol == cpu_to_be16(ETH_P_IP)) {
1133 l4proto = ip_hdr(skb)->protocol; 1133 l4proto = ip_hdr(skb)->protocol;
1134 1134
1135 if (l4proto == IPPROTO_TCP) 1135 if (l4proto == IPPROTO_TCP)
1136 opcode = TX_TCP_PKT; 1136 opcode = TX_TCP_PKT;
1137 else if(l4proto == IPPROTO_UDP) 1137 else if(l4proto == IPPROTO_UDP)
1138 opcode = TX_UDP_PKT; 1138 opcode = TX_UDP_PKT;
1139 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 1139 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1140 l4proto = ipv6_hdr(skb)->nexthdr; 1140 l4proto = ipv6_hdr(skb)->nexthdr;
1141 1141
1142 if (l4proto == IPPROTO_TCP) 1142 if (l4proto == IPPROTO_TCP)
@@ -1587,7 +1587,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
1587 } 1587 }
1588 1588
1589 if ((work_done < budget) && tx_complete) { 1589 if ((work_done < budget) && tx_complete) {
1590 netif_rx_complete(&adapter->napi); 1590 napi_complete(&adapter->napi);
1591 netxen_nic_enable_int(adapter); 1591 netxen_nic_enable_int(adapter);
1592 } 1592 }
1593 1593
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 0c0b752315c..c26325ded20 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3390,6 +3390,7 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3390 rp->rx_bytes += skb->len; 3390 rp->rx_bytes += skb->len;
3391 3391
3392 skb->protocol = eth_type_trans(skb, np->dev); 3392 skb->protocol = eth_type_trans(skb, np->dev);
3393 skb_record_rx_queue(skb, rp->rx_channel);
3393 netif_receive_skb(skb); 3394 netif_receive_skb(skb);
3394 3395
3395 return num_rcr; 3396 return num_rcr;
@@ -3669,7 +3670,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
3669 work_done = niu_poll_core(np, lp, budget); 3670 work_done = niu_poll_core(np, lp, budget);
3670 3671
3671 if (work_done < budget) { 3672 if (work_done < budget) {
3672 netif_rx_complete(napi); 3673 napi_complete(napi);
3673 niu_ldg_rearm(np, lp, 1); 3674 niu_ldg_rearm(np, lp, 1);
3674 } 3675 }
3675 return work_done; 3676 return work_done;
@@ -4088,12 +4089,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4088static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4089static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4089 u64 v0, u64 v1, u64 v2) 4090 u64 v0, u64 v1, u64 v2)
4090{ 4091{
4091 if (likely(netif_rx_schedule_prep(&lp->napi))) { 4092 if (likely(napi_schedule_prep(&lp->napi))) {
4092 lp->v0 = v0; 4093 lp->v0 = v0;
4093 lp->v1 = v1; 4094 lp->v1 = v1;
4094 lp->v2 = v2; 4095 lp->v2 = v2;
4095 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4096 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4096 __netif_rx_schedule(&lp->napi); 4097 __napi_schedule(&lp->napi);
4097 } 4098 }
4098} 4099}
4099 4100
@@ -6446,11 +6447,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6446 6447
6447 ipv6 = ihl = 0; 6448 ipv6 = ihl = 0;
6448 switch (skb->protocol) { 6449 switch (skb->protocol) {
6449 case __constant_htons(ETH_P_IP): 6450 case cpu_to_be16(ETH_P_IP):
6450 ip_proto = ip_hdr(skb)->protocol; 6451 ip_proto = ip_hdr(skb)->protocol;
6451 ihl = ip_hdr(skb)->ihl; 6452 ihl = ip_hdr(skb)->ihl;
6452 break; 6453 break;
6453 case __constant_htons(ETH_P_IPV6): 6454 case cpu_to_be16(ETH_P_IPV6):
6454 ip_proto = ipv6_hdr(skb)->nexthdr; 6455 ip_proto = ipv6_hdr(skb)->nexthdr;
6455 ihl = (40 >> 2); 6456 ihl = (40 >> 2);
6456 ipv6 = 1; 6457 ipv6 = 1;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index d0349e7d73e..5eeb5a87b73 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -970,7 +970,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
970 if (*chan->status & PAS_STATUS_ERROR) 970 if (*chan->status & PAS_STATUS_ERROR)
971 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; 971 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
972 972
973 netif_rx_schedule(&mac->napi); 973 napi_schedule(&mac->napi);
974 974
975 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); 975 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
976 976
@@ -1010,7 +1010,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
1010 1010
1011 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); 1011 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
1012 1012
1013 netif_rx_schedule(&mac->napi); 1013 napi_schedule(&mac->napi);
1014 1014
1015 if (reg) 1015 if (reg)
1016 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); 1016 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
@@ -1639,7 +1639,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1639 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); 1639 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1640 if (pkts < budget) { 1640 if (pkts < budget) {
1641 /* all done, no more packets present */ 1641 /* all done, no more packets present */
1642 netif_rx_complete(napi); 1642 napi_complete(napi);
1643 1643
1644 pasemi_mac_restart_rx_intr(mac); 1644 pasemi_mac_restart_rx_intr(mac);
1645 pasemi_mac_restart_tx_intr(mac); 1645 pasemi_mac_restart_tx_intr(mac);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 665a4286da3..80124fac65f 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
1397 if (work_done < budget) { 1397 if (work_done < budget) {
1398 spin_lock_irqsave(&lp->lock, flags); 1398 spin_lock_irqsave(&lp->lock, flags);
1399 1399
1400 __netif_rx_complete(napi); 1400 __napi_complete(napi);
1401 1401
1402 /* clear interrupt masks */ 1402 /* clear interrupt masks */
1403 val = lp->a.read_csr(ioaddr, CSR3); 1403 val = lp->a.read_csr(ioaddr, CSR3);
@@ -2592,14 +2592,14 @@ pcnet32_interrupt(int irq, void *dev_id)
2592 dev->name, csr0); 2592 dev->name, csr0);
2593 /* unlike for the lance, there is no restart needed */ 2593 /* unlike for the lance, there is no restart needed */
2594 } 2594 }
2595 if (netif_rx_schedule_prep(&lp->napi)) { 2595 if (napi_schedule_prep(&lp->napi)) {
2596 u16 val; 2596 u16 val;
2597 /* set interrupt masks */ 2597 /* set interrupt masks */
2598 val = lp->a.read_csr(ioaddr, CSR3); 2598 val = lp->a.read_csr(ioaddr, CSR3);
2599 val |= 0x5f00; 2599 val |= 0x5f00;
2600 lp->a.write_csr(ioaddr, CSR3, val); 2600 lp->a.write_csr(ioaddr, CSR3, val);
2601 mmiowb(); 2601 mmiowb();
2602 __netif_rx_schedule(&lp->napi); 2602 __napi_schedule(&lp->napi);
2603 break; 2603 break;
2604 } 2604 }
2605 csr0 = lp->a.read_csr(ioaddr, CSR0); 2605 csr0 = lp->a.read_csr(ioaddr, CSR0);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index af28ff7ae17..33984b73723 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -202,16 +202,21 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
202{ 202{
203 struct device_node *np = NULL; 203 struct device_node *np = NULL;
204 struct mdio_gpio_platform_data *pdata; 204 struct mdio_gpio_platform_data *pdata;
205 int ret;
205 206
206 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 207 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
207 if (!pdata) 208 if (!pdata)
208 return -ENOMEM; 209 return -ENOMEM;
209 210
210 pdata->mdc = of_get_gpio(ofdev->node, 0); 211 ret = of_get_gpio(ofdev->node, 0);
211 pdata->mdio = of_get_gpio(ofdev->node, 1); 212 if (ret < 0)
212
213 if (pdata->mdc < 0 || pdata->mdio < 0)
214 goto out_free; 213 goto out_free;
214 pdata->mdc = ret;
215
216 ret = of_get_gpio(ofdev->node, 1);
217 if (ret < 0)
218 goto out_free;
219 pdata->mdio = ret;
215 220
216 while ((np = of_get_next_child(ofdev->node, np))) 221 while ((np = of_get_next_child(ofdev->node, np)))
217 if (!strcmp(np->type, "ethernet-phy")) 222 if (!strcmp(np->type, "ethernet-phy"))
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 811a637695c..bb29ae3ff17 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/device.h>
24#include <linux/netdevice.h> 25#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/skbuff.h> 27#include <linux/skbuff.h>
@@ -286,33 +287,58 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
286 (phydev->phy_id & phydrv->phy_id_mask)); 287 (phydev->phy_id & phydrv->phy_id_mask));
287} 288}
288 289
290static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
291{
292 struct device_driver *drv = phydev->dev.driver;
293 struct phy_driver *phydrv = to_phy_driver(drv);
294 struct net_device *netdev = phydev->attached_dev;
295
296 if (!drv || !phydrv->suspend)
297 return false;
298
299 /* PHY not attached? May suspend. */
300 if (!netdev)
301 return true;
302
303 /*
304 * Don't suspend PHY if the attched netdev parent may wakeup.
305 * The parent may point to a PCI device, as in tg3 driver.
306 */
307 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
308 return false;
309
310 /*
311 * Also don't suspend PHY if the netdev itself may wakeup. This
312 * is the case for devices w/o underlaying pwr. mgmt. aware bus,
313 * e.g. SoC devices.
314 */
315 if (device_may_wakeup(&netdev->dev))
316 return false;
317
318 return true;
319}
320
289/* Suspend and resume. Copied from platform_suspend and 321/* Suspend and resume. Copied from platform_suspend and
290 * platform_resume 322 * platform_resume
291 */ 323 */
292static int mdio_bus_suspend(struct device * dev, pm_message_t state) 324static int mdio_bus_suspend(struct device * dev, pm_message_t state)
293{ 325{
294 int ret = 0; 326 struct phy_driver *phydrv = to_phy_driver(dev->driver);
295 struct device_driver *drv = dev->driver;
296 struct phy_driver *phydrv = to_phy_driver(drv);
297 struct phy_device *phydev = to_phy_device(dev); 327 struct phy_device *phydev = to_phy_device(dev);
298 328
299 if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent)) 329 if (!mdio_bus_phy_may_suspend(phydev))
300 ret = phydrv->suspend(phydev); 330 return 0;
301 331 return phydrv->suspend(phydev);
302 return ret;
303} 332}
304 333
305static int mdio_bus_resume(struct device * dev) 334static int mdio_bus_resume(struct device * dev)
306{ 335{
307 int ret = 0; 336 struct phy_driver *phydrv = to_phy_driver(dev->driver);
308 struct device_driver *drv = dev->driver;
309 struct phy_driver *phydrv = to_phy_driver(drv);
310 struct phy_device *phydev = to_phy_device(dev); 337 struct phy_device *phydev = to_phy_device(dev);
311 338
312 if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent)) 339 if (!mdio_bus_phy_may_suspend(phydev))
313 ret = phydrv->resume(phydev); 340 return 0;
314 341 return phydrv->resume(phydev);
315 return ret;
316} 342}
317 343
318struct bus_type mdio_bus_type = { 344struct bus_type mdio_bus_type = {
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 7b2728b8f1b..fddc8493f35 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -49,6 +49,10 @@
49#include <net/slhc_vj.h> 49#include <net/slhc_vj.h>
50#include <asm/atomic.h> 50#include <asm/atomic.h>
51 51
52#include <linux/nsproxy.h>
53#include <net/net_namespace.h>
54#include <net/netns/generic.h>
55
52#define PPP_VERSION "2.4.2" 56#define PPP_VERSION "2.4.2"
53 57
54/* 58/*
@@ -131,6 +135,7 @@ struct ppp {
131 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 135 struct sock_filter *active_filter;/* filter for pkts to reset idle */
132 unsigned pass_len, active_len; 136 unsigned pass_len, active_len;
133#endif /* CONFIG_PPP_FILTER */ 137#endif /* CONFIG_PPP_FILTER */
138 struct net *ppp_net; /* the net we belong to */
134}; 139};
135 140
136/* 141/*
@@ -155,6 +160,7 @@ struct channel {
155 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 160 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
156 spinlock_t downl; /* protects `chan', file.xq dequeue */ 161 spinlock_t downl; /* protects `chan', file.xq dequeue */
157 struct ppp *ppp; /* ppp unit we're connected to */ 162 struct ppp *ppp; /* ppp unit we're connected to */
163 struct net *chan_net; /* the net channel belongs to */
158 struct list_head clist; /* link in list of channels per unit */ 164 struct list_head clist; /* link in list of channels per unit */
159 rwlock_t upl; /* protects `ppp' */ 165 rwlock_t upl; /* protects `ppp' */
160#ifdef CONFIG_PPP_MULTILINK 166#ifdef CONFIG_PPP_MULTILINK
@@ -173,26 +179,35 @@ struct channel {
173 * channel.downl. 179 * channel.downl.
174 */ 180 */
175 181
176/*
177 * all_ppp_mutex protects the all_ppp_units mapping.
178 * It also ensures that finding a ppp unit in the all_ppp_units map
179 * and updating its file.refcnt field is atomic.
180 */
181static DEFINE_MUTEX(all_ppp_mutex);
182static atomic_t ppp_unit_count = ATOMIC_INIT(0); 182static atomic_t ppp_unit_count = ATOMIC_INIT(0);
183static DEFINE_IDR(ppp_units_idr);
184
185/*
186 * all_channels_lock protects all_channels and last_channel_index,
187 * and the atomicity of find a channel and updating its file.refcnt
188 * field.
189 */
190static DEFINE_SPINLOCK(all_channels_lock);
191static LIST_HEAD(all_channels);
192static LIST_HEAD(new_channels);
193static int last_channel_index;
194static atomic_t channel_count = ATOMIC_INIT(0); 183static atomic_t channel_count = ATOMIC_INIT(0);
195 184
185/* per-net private data for this module */
186static unsigned int ppp_net_id;
187struct ppp_net {
188 /* units to ppp mapping */
189 struct idr units_idr;
190
191 /*
192 * all_ppp_mutex protects the units_idr mapping.
193 * It also ensures that finding a ppp unit in the units_idr
194 * map and updating its file.refcnt field is atomic.
195 */
196 struct mutex all_ppp_mutex;
197
198 /* channels */
199 struct list_head all_channels;
200 struct list_head new_channels;
201 int last_channel_index;
202
203 /*
204 * all_channels_lock protects all_channels and
205 * last_channel_index, and the atomicity of find
206 * a channel and updating its file.refcnt field.
207 */
208 spinlock_t all_channels_lock;
209};
210
196/* Get the PPP protocol number from a skb */ 211/* Get the PPP protocol number from a skb */
197#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 212#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
198 213
@@ -216,8 +231,8 @@ static atomic_t channel_count = ATOMIC_INIT(0);
216#define seq_after(a, b) ((s32)((a) - (b)) > 0) 231#define seq_after(a, b) ((s32)((a) - (b)) > 0)
217 232
218/* Prototypes. */ 233/* Prototypes. */
219static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 234static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
220 unsigned int cmd, unsigned long arg); 235 struct file *file, unsigned int cmd, unsigned long arg);
221static void ppp_xmit_process(struct ppp *ppp); 236static void ppp_xmit_process(struct ppp *ppp);
222static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 237static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
223static void ppp_push(struct ppp *ppp); 238static void ppp_push(struct ppp *ppp);
@@ -240,12 +255,12 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
240static void ppp_ccp_closed(struct ppp *ppp); 255static void ppp_ccp_closed(struct ppp *ppp);
241static struct compressor *find_compressor(int type); 256static struct compressor *find_compressor(int type);
242static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 257static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
243static struct ppp *ppp_create_interface(int unit, int *retp); 258static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
244static void init_ppp_file(struct ppp_file *pf, int kind); 259static void init_ppp_file(struct ppp_file *pf, int kind);
245static void ppp_shutdown_interface(struct ppp *ppp); 260static void ppp_shutdown_interface(struct ppp *ppp);
246static void ppp_destroy_interface(struct ppp *ppp); 261static void ppp_destroy_interface(struct ppp *ppp);
247static struct ppp *ppp_find_unit(int unit); 262static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
248static struct channel *ppp_find_channel(int unit); 263static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
249static int ppp_connect_channel(struct channel *pch, int unit); 264static int ppp_connect_channel(struct channel *pch, int unit);
250static int ppp_disconnect_channel(struct channel *pch); 265static int ppp_disconnect_channel(struct channel *pch);
251static void ppp_destroy_channel(struct channel *pch); 266static void ppp_destroy_channel(struct channel *pch);
@@ -256,6 +271,14 @@ static void *unit_find(struct idr *p, int n);
256 271
257static struct class *ppp_class; 272static struct class *ppp_class;
258 273
274/* per net-namespace data */
275static inline struct ppp_net *ppp_pernet(struct net *net)
276{
277 BUG_ON(!net);
278
279 return net_generic(net, ppp_net_id);
280}
281
259/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 282/* Translates a PPP protocol number to a NP index (NP == network protocol) */
260static inline int proto_to_npindex(int proto) 283static inline int proto_to_npindex(int proto)
261{ 284{
@@ -544,7 +567,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
544 int __user *p = argp; 567 int __user *p = argp;
545 568
546 if (!pf) 569 if (!pf)
547 return ppp_unattached_ioctl(pf, file, cmd, arg); 570 return ppp_unattached_ioctl(current->nsproxy->net_ns,
571 pf, file, cmd, arg);
548 572
549 if (cmd == PPPIOCDETACH) { 573 if (cmd == PPPIOCDETACH) {
550 /* 574 /*
@@ -763,12 +787,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
763 return err; 787 return err;
764} 788}
765 789
766static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 790static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
767 unsigned int cmd, unsigned long arg) 791 struct file *file, unsigned int cmd, unsigned long arg)
768{ 792{
769 int unit, err = -EFAULT; 793 int unit, err = -EFAULT;
770 struct ppp *ppp; 794 struct ppp *ppp;
771 struct channel *chan; 795 struct channel *chan;
796 struct ppp_net *pn;
772 int __user *p = (int __user *)arg; 797 int __user *p = (int __user *)arg;
773 798
774 lock_kernel(); 799 lock_kernel();
@@ -777,7 +802,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
777 /* Create a new ppp unit */ 802 /* Create a new ppp unit */
778 if (get_user(unit, p)) 803 if (get_user(unit, p))
779 break; 804 break;
780 ppp = ppp_create_interface(unit, &err); 805 ppp = ppp_create_interface(net, unit, &err);
781 if (!ppp) 806 if (!ppp)
782 break; 807 break;
783 file->private_data = &ppp->file; 808 file->private_data = &ppp->file;
@@ -792,29 +817,31 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
792 /* Attach to an existing ppp unit */ 817 /* Attach to an existing ppp unit */
793 if (get_user(unit, p)) 818 if (get_user(unit, p))
794 break; 819 break;
795 mutex_lock(&all_ppp_mutex);
796 err = -ENXIO; 820 err = -ENXIO;
797 ppp = ppp_find_unit(unit); 821 pn = ppp_pernet(net);
822 mutex_lock(&pn->all_ppp_mutex);
823 ppp = ppp_find_unit(pn, unit);
798 if (ppp) { 824 if (ppp) {
799 atomic_inc(&ppp->file.refcnt); 825 atomic_inc(&ppp->file.refcnt);
800 file->private_data = &ppp->file; 826 file->private_data = &ppp->file;
801 err = 0; 827 err = 0;
802 } 828 }
803 mutex_unlock(&all_ppp_mutex); 829 mutex_unlock(&pn->all_ppp_mutex);
804 break; 830 break;
805 831
806 case PPPIOCATTCHAN: 832 case PPPIOCATTCHAN:
807 if (get_user(unit, p)) 833 if (get_user(unit, p))
808 break; 834 break;
809 spin_lock_bh(&all_channels_lock);
810 err = -ENXIO; 835 err = -ENXIO;
811 chan = ppp_find_channel(unit); 836 pn = ppp_pernet(net);
837 spin_lock_bh(&pn->all_channels_lock);
838 chan = ppp_find_channel(pn, unit);
812 if (chan) { 839 if (chan) {
813 atomic_inc(&chan->file.refcnt); 840 atomic_inc(&chan->file.refcnt);
814 file->private_data = &chan->file; 841 file->private_data = &chan->file;
815 err = 0; 842 err = 0;
816 } 843 }
817 spin_unlock_bh(&all_channels_lock); 844 spin_unlock_bh(&pn->all_channels_lock);
818 break; 845 break;
819 846
820 default: 847 default:
@@ -834,6 +861,51 @@ static const struct file_operations ppp_device_fops = {
834 .release = ppp_release 861 .release = ppp_release
835}; 862};
836 863
864static __net_init int ppp_init_net(struct net *net)
865{
866 struct ppp_net *pn;
867 int err;
868
869 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
870 if (!pn)
871 return -ENOMEM;
872
873 idr_init(&pn->units_idr);
874 mutex_init(&pn->all_ppp_mutex);
875
876 INIT_LIST_HEAD(&pn->all_channels);
877 INIT_LIST_HEAD(&pn->new_channels);
878
879 spin_lock_init(&pn->all_channels_lock);
880
881 err = net_assign_generic(net, ppp_net_id, pn);
882 if (err) {
883 kfree(pn);
884 return err;
885 }
886
887 return 0;
888}
889
890static __net_exit void ppp_exit_net(struct net *net)
891{
892 struct ppp_net *pn;
893
894 pn = net_generic(net, ppp_net_id);
895 idr_destroy(&pn->units_idr);
896 /*
897 * if someone has cached our net then
898 * further net_generic call will return NULL
899 */
900 net_assign_generic(net, ppp_net_id, NULL);
901 kfree(pn);
902}
903
904static struct pernet_operations ppp_net_ops = {
905 .init = ppp_init_net,
906 .exit = ppp_exit_net,
907};
908
837#define PPP_MAJOR 108 909#define PPP_MAJOR 108
838 910
839/* Called at boot time if ppp is compiled into the kernel, 911/* Called at boot time if ppp is compiled into the kernel,
@@ -843,25 +915,36 @@ static int __init ppp_init(void)
843 int err; 915 int err;
844 916
845 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 917 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
846 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 918
847 if (!err) { 919 err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops);
848 ppp_class = class_create(THIS_MODULE, "ppp"); 920 if (err) {
849 if (IS_ERR(ppp_class)) { 921 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
850 err = PTR_ERR(ppp_class); 922 goto out;
851 goto out_chrdev;
852 }
853 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL,
854 "ppp");
855 } 923 }
856 924
857out: 925 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
858 if (err) 926 if (err) {
859 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 927 printk(KERN_ERR "failed to register PPP device (%d)\n", err);
860 return err; 928 goto out_net;
929 }
930
931 ppp_class = class_create(THIS_MODULE, "ppp");
932 if (IS_ERR(ppp_class)) {
933 err = PTR_ERR(ppp_class);
934 goto out_chrdev;
935 }
936
937 /* not a big deal if we fail here :-) */
938 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
939
940 return 0;
861 941
862out_chrdev: 942out_chrdev:
863 unregister_chrdev(PPP_MAJOR, "ppp"); 943 unregister_chrdev(PPP_MAJOR, "ppp");
864 goto out; 944out_net:
945 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
946out:
947 return err;
865} 948}
866 949
867/* 950/*
@@ -969,6 +1052,7 @@ static void ppp_setup(struct net_device *dev)
969 dev->tx_queue_len = 3; 1052 dev->tx_queue_len = 3;
970 dev->type = ARPHRD_PPP; 1053 dev->type = ARPHRD_PPP;
971 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1054 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1055 dev->features |= NETIF_F_NETNS_LOCAL;
972} 1056}
973 1057
974/* 1058/*
@@ -1986,19 +2070,27 @@ ppp_mp_reconstruct(struct ppp *ppp)
1986 * Channel interface. 2070 * Channel interface.
1987 */ 2071 */
1988 2072
1989/* 2073/* Create a new, unattached ppp channel. */
1990 * Create a new, unattached ppp channel. 2074int ppp_register_channel(struct ppp_channel *chan)
1991 */ 2075{
1992int 2076 return ppp_register_net_channel(current->nsproxy->net_ns, chan);
1993ppp_register_channel(struct ppp_channel *chan) 2077}
2078
2079/* Create a new, unattached ppp channel for specified net. */
2080int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
1994{ 2081{
1995 struct channel *pch; 2082 struct channel *pch;
2083 struct ppp_net *pn;
1996 2084
1997 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2085 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1998 if (!pch) 2086 if (!pch)
1999 return -ENOMEM; 2087 return -ENOMEM;
2088
2089 pn = ppp_pernet(net);
2090
2000 pch->ppp = NULL; 2091 pch->ppp = NULL;
2001 pch->chan = chan; 2092 pch->chan = chan;
2093 pch->chan_net = net;
2002 chan->ppp = pch; 2094 chan->ppp = pch;
2003 init_ppp_file(&pch->file, CHANNEL); 2095 init_ppp_file(&pch->file, CHANNEL);
2004 pch->file.hdrlen = chan->hdrlen; 2096 pch->file.hdrlen = chan->hdrlen;
@@ -2008,11 +2100,13 @@ ppp_register_channel(struct ppp_channel *chan)
2008 init_rwsem(&pch->chan_sem); 2100 init_rwsem(&pch->chan_sem);
2009 spin_lock_init(&pch->downl); 2101 spin_lock_init(&pch->downl);
2010 rwlock_init(&pch->upl); 2102 rwlock_init(&pch->upl);
2011 spin_lock_bh(&all_channels_lock); 2103
2012 pch->file.index = ++last_channel_index; 2104 spin_lock_bh(&pn->all_channels_lock);
2013 list_add(&pch->list, &new_channels); 2105 pch->file.index = ++pn->last_channel_index;
2106 list_add(&pch->list, &pn->new_channels);
2014 atomic_inc(&channel_count); 2107 atomic_inc(&channel_count);
2015 spin_unlock_bh(&all_channels_lock); 2108 spin_unlock_bh(&pn->all_channels_lock);
2109
2016 return 0; 2110 return 0;
2017} 2111}
2018 2112
@@ -2053,9 +2147,11 @@ void
2053ppp_unregister_channel(struct ppp_channel *chan) 2147ppp_unregister_channel(struct ppp_channel *chan)
2054{ 2148{
2055 struct channel *pch = chan->ppp; 2149 struct channel *pch = chan->ppp;
2150 struct ppp_net *pn;
2056 2151
2057 if (!pch) 2152 if (!pch)
2058 return; /* should never happen */ 2153 return; /* should never happen */
2154
2059 chan->ppp = NULL; 2155 chan->ppp = NULL;
2060 2156
2061 /* 2157 /*
@@ -2068,9 +2164,12 @@ ppp_unregister_channel(struct ppp_channel *chan)
2068 spin_unlock_bh(&pch->downl); 2164 spin_unlock_bh(&pch->downl);
2069 up_write(&pch->chan_sem); 2165 up_write(&pch->chan_sem);
2070 ppp_disconnect_channel(pch); 2166 ppp_disconnect_channel(pch);
2071 spin_lock_bh(&all_channels_lock); 2167
2168 pn = ppp_pernet(pch->chan_net);
2169 spin_lock_bh(&pn->all_channels_lock);
2072 list_del(&pch->list); 2170 list_del(&pch->list);
2073 spin_unlock_bh(&all_channels_lock); 2171 spin_unlock_bh(&pn->all_channels_lock);
2172
2074 pch->file.dead = 1; 2173 pch->file.dead = 1;
2075 wake_up_interruptible(&pch->file.rwait); 2174 wake_up_interruptible(&pch->file.rwait);
2076 if (atomic_dec_and_test(&pch->file.refcnt)) 2175 if (atomic_dec_and_test(&pch->file.refcnt))
@@ -2395,9 +2494,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2395 * unit == -1 means allocate a new number. 2494 * unit == -1 means allocate a new number.
2396 */ 2495 */
2397static struct ppp * 2496static struct ppp *
2398ppp_create_interface(int unit, int *retp) 2497ppp_create_interface(struct net *net, int unit, int *retp)
2399{ 2498{
2400 struct ppp *ppp; 2499 struct ppp *ppp;
2500 struct ppp_net *pn;
2401 struct net_device *dev = NULL; 2501 struct net_device *dev = NULL;
2402 int ret = -ENOMEM; 2502 int ret = -ENOMEM;
2403 int i; 2503 int i;
@@ -2406,6 +2506,8 @@ ppp_create_interface(int unit, int *retp)
2406 if (!dev) 2506 if (!dev)
2407 goto out1; 2507 goto out1;
2408 2508
2509 pn = ppp_pernet(net);
2510
2409 ppp = netdev_priv(dev); 2511 ppp = netdev_priv(dev);
2410 ppp->dev = dev; 2512 ppp->dev = dev;
2411 ppp->mru = PPP_MRU; 2513 ppp->mru = PPP_MRU;
@@ -2421,17 +2523,23 @@ ppp_create_interface(int unit, int *retp)
2421 skb_queue_head_init(&ppp->mrq); 2523 skb_queue_head_init(&ppp->mrq);
2422#endif /* CONFIG_PPP_MULTILINK */ 2524#endif /* CONFIG_PPP_MULTILINK */
2423 2525
2526 /*
2527 * drum roll: don't forget to set
2528 * the net device is belong to
2529 */
2530 dev_net_set(dev, net);
2531
2424 ret = -EEXIST; 2532 ret = -EEXIST;
2425 mutex_lock(&all_ppp_mutex); 2533 mutex_lock(&pn->all_ppp_mutex);
2426 2534
2427 if (unit < 0) { 2535 if (unit < 0) {
2428 unit = unit_get(&ppp_units_idr, ppp); 2536 unit = unit_get(&pn->units_idr, ppp);
2429 if (unit < 0) { 2537 if (unit < 0) {
2430 *retp = unit; 2538 *retp = unit;
2431 goto out2; 2539 goto out2;
2432 } 2540 }
2433 } else { 2541 } else {
2434 if (unit_find(&ppp_units_idr, unit)) 2542 if (unit_find(&pn->units_idr, unit))
2435 goto out2; /* unit already exists */ 2543 goto out2; /* unit already exists */
2436 /* 2544 /*
2437 * if caller need a specified unit number 2545 * if caller need a specified unit number
@@ -2442,7 +2550,7 @@ ppp_create_interface(int unit, int *retp)
2442 * fair but at least pppd will ask us to allocate 2550 * fair but at least pppd will ask us to allocate
2443 * new unit in this case so user is happy :) 2551 * new unit in this case so user is happy :)
2444 */ 2552 */
2445 unit = unit_set(&ppp_units_idr, ppp, unit); 2553 unit = unit_set(&pn->units_idr, ppp, unit);
2446 if (unit < 0) 2554 if (unit < 0)
2447 goto out2; 2555 goto out2;
2448 } 2556 }
@@ -2453,20 +2561,22 @@ ppp_create_interface(int unit, int *retp)
2453 2561
2454 ret = register_netdev(dev); 2562 ret = register_netdev(dev);
2455 if (ret != 0) { 2563 if (ret != 0) {
2456 unit_put(&ppp_units_idr, unit); 2564 unit_put(&pn->units_idr, unit);
2457 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2565 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
2458 dev->name, ret); 2566 dev->name, ret);
2459 goto out2; 2567 goto out2;
2460 } 2568 }
2461 2569
2570 ppp->ppp_net = net;
2571
2462 atomic_inc(&ppp_unit_count); 2572 atomic_inc(&ppp_unit_count);
2463 mutex_unlock(&all_ppp_mutex); 2573 mutex_unlock(&pn->all_ppp_mutex);
2464 2574
2465 *retp = 0; 2575 *retp = 0;
2466 return ppp; 2576 return ppp;
2467 2577
2468out2: 2578out2:
2469 mutex_unlock(&all_ppp_mutex); 2579 mutex_unlock(&pn->all_ppp_mutex);
2470 free_netdev(dev); 2580 free_netdev(dev);
2471out1: 2581out1:
2472 *retp = ret; 2582 *retp = ret;
@@ -2492,7 +2602,11 @@ init_ppp_file(struct ppp_file *pf, int kind)
2492 */ 2602 */
2493static void ppp_shutdown_interface(struct ppp *ppp) 2603static void ppp_shutdown_interface(struct ppp *ppp)
2494{ 2604{
2495 mutex_lock(&all_ppp_mutex); 2605 struct ppp_net *pn;
2606
2607 pn = ppp_pernet(ppp->ppp_net);
2608 mutex_lock(&pn->all_ppp_mutex);
2609
2496 /* This will call dev_close() for us. */ 2610 /* This will call dev_close() for us. */
2497 ppp_lock(ppp); 2611 ppp_lock(ppp);
2498 if (!ppp->closing) { 2612 if (!ppp->closing) {
@@ -2502,11 +2616,12 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2502 } else 2616 } else
2503 ppp_unlock(ppp); 2617 ppp_unlock(ppp);
2504 2618
2505 unit_put(&ppp_units_idr, ppp->file.index); 2619 unit_put(&pn->units_idr, ppp->file.index);
2506 ppp->file.dead = 1; 2620 ppp->file.dead = 1;
2507 ppp->owner = NULL; 2621 ppp->owner = NULL;
2508 wake_up_interruptible(&ppp->file.rwait); 2622 wake_up_interruptible(&ppp->file.rwait);
2509 mutex_unlock(&all_ppp_mutex); 2623
2624 mutex_unlock(&pn->all_ppp_mutex);
2510} 2625}
2511 2626
2512/* 2627/*
@@ -2554,9 +2669,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2554 * The caller should have locked the all_ppp_mutex. 2669 * The caller should have locked the all_ppp_mutex.
2555 */ 2670 */
2556static struct ppp * 2671static struct ppp *
2557ppp_find_unit(int unit) 2672ppp_find_unit(struct ppp_net *pn, int unit)
2558{ 2673{
2559 return unit_find(&ppp_units_idr, unit); 2674 return unit_find(&pn->units_idr, unit);
2560} 2675}
2561 2676
2562/* 2677/*
@@ -2568,20 +2683,22 @@ ppp_find_unit(int unit)
2568 * when we have a lot of channels in use. 2683 * when we have a lot of channels in use.
2569 */ 2684 */
2570static struct channel * 2685static struct channel *
2571ppp_find_channel(int unit) 2686ppp_find_channel(struct ppp_net *pn, int unit)
2572{ 2687{
2573 struct channel *pch; 2688 struct channel *pch;
2574 2689
2575 list_for_each_entry(pch, &new_channels, list) { 2690 list_for_each_entry(pch, &pn->new_channels, list) {
2576 if (pch->file.index == unit) { 2691 if (pch->file.index == unit) {
2577 list_move(&pch->list, &all_channels); 2692 list_move(&pch->list, &pn->all_channels);
2578 return pch; 2693 return pch;
2579 } 2694 }
2580 } 2695 }
2581 list_for_each_entry(pch, &all_channels, list) { 2696
2697 list_for_each_entry(pch, &pn->all_channels, list) {
2582 if (pch->file.index == unit) 2698 if (pch->file.index == unit)
2583 return pch; 2699 return pch;
2584 } 2700 }
2701
2585 return NULL; 2702 return NULL;
2586} 2703}
2587 2704
@@ -2592,11 +2709,14 @@ static int
2592ppp_connect_channel(struct channel *pch, int unit) 2709ppp_connect_channel(struct channel *pch, int unit)
2593{ 2710{
2594 struct ppp *ppp; 2711 struct ppp *ppp;
2712 struct ppp_net *pn;
2595 int ret = -ENXIO; 2713 int ret = -ENXIO;
2596 int hdrlen; 2714 int hdrlen;
2597 2715
2598 mutex_lock(&all_ppp_mutex); 2716 pn = ppp_pernet(pch->chan_net);
2599 ppp = ppp_find_unit(unit); 2717
2718 mutex_lock(&pn->all_ppp_mutex);
2719 ppp = ppp_find_unit(pn, unit);
2600 if (!ppp) 2720 if (!ppp)
2601 goto out; 2721 goto out;
2602 write_lock_bh(&pch->upl); 2722 write_lock_bh(&pch->upl);
@@ -2620,7 +2740,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2620 outl: 2740 outl:
2621 write_unlock_bh(&pch->upl); 2741 write_unlock_bh(&pch->upl);
2622 out: 2742 out:
2623 mutex_unlock(&all_ppp_mutex); 2743 mutex_unlock(&pn->all_ppp_mutex);
2624 return ret; 2744 return ret;
2625} 2745}
2626 2746
@@ -2677,7 +2797,7 @@ static void __exit ppp_cleanup(void)
2677 unregister_chrdev(PPP_MAJOR, "ppp"); 2797 unregister_chrdev(PPP_MAJOR, "ppp");
2678 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2798 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2679 class_destroy(ppp_class); 2799 class_destroy(ppp_class);
2680 idr_destroy(&ppp_units_idr); 2800 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
2681} 2801}
2682 2802
2683/* 2803/*
@@ -2743,6 +2863,7 @@ static void *unit_find(struct idr *p, int n)
2743module_init(ppp_init); 2863module_init(ppp_init);
2744module_exit(ppp_cleanup); 2864module_exit(ppp_cleanup);
2745 2865
2866EXPORT_SYMBOL(ppp_register_net_channel);
2746EXPORT_SYMBOL(ppp_register_channel); 2867EXPORT_SYMBOL(ppp_register_channel);
2747EXPORT_SYMBOL(ppp_unregister_channel); 2868EXPORT_SYMBOL(ppp_unregister_channel);
2748EXPORT_SYMBOL(ppp_channel_index); 2869EXPORT_SYMBOL(ppp_channel_index);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c22b30533a1..af6321d9757 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -78,38 +78,73 @@
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/seq_file.h> 79#include <linux/seq_file.h>
80 80
81#include <linux/nsproxy.h>
81#include <net/net_namespace.h> 82#include <net/net_namespace.h>
83#include <net/netns/generic.h>
82#include <net/sock.h> 84#include <net/sock.h>
83 85
84#include <asm/uaccess.h> 86#include <asm/uaccess.h>
85 87
86#define PPPOE_HASH_BITS 4 88#define PPPOE_HASH_BITS 4
87#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS) 89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
88 90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
89static struct ppp_channel_ops pppoe_chan_ops;
90 91
91static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 92static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
92static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); 93static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
93static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 94static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
94 95
95static const struct proto_ops pppoe_ops; 96static const struct proto_ops pppoe_ops;
96static DEFINE_RWLOCK(pppoe_hash_lock);
97
98static struct ppp_channel_ops pppoe_chan_ops; 97static struct ppp_channel_ops pppoe_chan_ops;
99 98
99/* per-net private data for this module */
100static unsigned int pppoe_net_id;
101struct pppoe_net {
102 /*
103 * we could use _single_ hash table for all
104 * nets by injecting net id into the hash but
105 * it would increase hash chains and add
106 * a few additional math comparations messy
107 * as well, moreover in case of SMP less locking
108 * controversy here
109 */
110 struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
111 rwlock_t hash_lock;
112};
113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/*
118 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID)
120 * 2) Session stage (MAC and SID are known)
121 *
122 * Ethernet frames have a special tag for this but
123 * we use simplier approach based on session id
124 */
125static inline bool stage_session(__be16 sid)
126{
127 return sid != 0;
128}
129
130static inline struct pppoe_net *pppoe_pernet(struct net *net)
131{
132 BUG_ON(!net);
133
134 return net_generic(net, pppoe_net_id);
135}
136
100static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) 137static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
101{ 138{
102 return (a->sid == b->sid && 139 return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
103 (memcmp(a->remote, b->remote, ETH_ALEN) == 0));
104} 140}
105 141
106static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) 142static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
107{ 143{
108 return (a->sid == sid && 144 return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
109 (memcmp(a->remote,addr,ETH_ALEN) == 0));
110} 145}
111 146
112#if 8%PPPOE_HASH_BITS 147#if 8 % PPPOE_HASH_BITS
113#error 8 must be a multiple of PPPOE_HASH_BITS 148#error 8 must be a multiple of PPPOE_HASH_BITS
114#endif 149#endif
115 150
@@ -118,69 +153,71 @@ static int hash_item(__be16 sid, unsigned char *addr)
118 unsigned char hash = 0; 153 unsigned char hash = 0;
119 unsigned int i; 154 unsigned int i;
120 155
121 for (i = 0 ; i < ETH_ALEN ; i++) { 156 for (i = 0; i < ETH_ALEN; i++)
122 hash ^= addr[i]; 157 hash ^= addr[i];
123 } 158 for (i = 0; i < sizeof(sid_t) * 8; i += 8)
124 for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ 159 hash ^= (__force __u32)sid >> i;
125 hash ^= (__force __u32)sid>>i; 160 for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
126 } 161 hash ^= hash >> i;
127 for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) {
128 hash ^= hash>>i;
129 }
130 162
131 return hash & ( PPPOE_HASH_SIZE - 1 ); 163 return hash & PPPOE_HASH_MASK;
132} 164}
133 165
134/* zeroed because its in .bss */
135static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
136
137/********************************************************************** 166/**********************************************************************
138 * 167 *
139 * Set/get/delete/rehash items (internal versions) 168 * Set/get/delete/rehash items (internal versions)
140 * 169 *
141 **********************************************************************/ 170 **********************************************************************/
142static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) 171static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
172 unsigned char *addr, int ifindex)
143{ 173{
144 int hash = hash_item(sid, addr); 174 int hash = hash_item(sid, addr);
145 struct pppox_sock *ret; 175 struct pppox_sock *ret;
146 176
147 ret = item_hash_table[hash]; 177 ret = pn->hash_table[hash];
178 while (ret) {
179 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
180 ret->pppoe_ifindex == ifindex)
181 return ret;
148 182
149 while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
150 ret = ret->next; 183 ret = ret->next;
184 }
151 185
152 return ret; 186 return NULL;
153} 187}
154 188
155static int __set_item(struct pppox_sock *po) 189static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
156{ 190{
157 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 191 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
158 struct pppox_sock *ret; 192 struct pppox_sock *ret;
159 193
160 ret = item_hash_table[hash]; 194 ret = pn->hash_table[hash];
161 while (ret) { 195 while (ret) {
162 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) 196 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
197 ret->pppoe_ifindex == po->pppoe_ifindex)
163 return -EALREADY; 198 return -EALREADY;
164 199
165 ret = ret->next; 200 ret = ret->next;
166 } 201 }
167 202
168 po->next = item_hash_table[hash]; 203 po->next = pn->hash_table[hash];
169 item_hash_table[hash] = po; 204 pn->hash_table[hash] = po;
170 205
171 return 0; 206 return 0;
172} 207}
173 208
174static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) 209static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
210 char *addr, int ifindex)
175{ 211{
176 int hash = hash_item(sid, addr); 212 int hash = hash_item(sid, addr);
177 struct pppox_sock *ret, **src; 213 struct pppox_sock *ret, **src;
178 214
179 ret = item_hash_table[hash]; 215 ret = pn->hash_table[hash];
180 src = &item_hash_table[hash]; 216 src = &pn->hash_table[hash];
181 217
182 while (ret) { 218 while (ret) {
183 if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { 219 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
220 ret->pppoe_ifindex == ifindex) {
184 *src = ret->next; 221 *src = ret->next;
185 break; 222 break;
186 } 223 }
@@ -197,46 +234,54 @@ static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex)
197 * Set/get/delete/rehash items 234 * Set/get/delete/rehash items
198 * 235 *
199 **********************************************************************/ 236 **********************************************************************/
200static inline struct pppox_sock *get_item(__be16 sid, 237static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
201 unsigned char *addr, int ifindex) 238 unsigned char *addr, int ifindex)
202{ 239{
203 struct pppox_sock *po; 240 struct pppox_sock *po;
204 241
205 read_lock_bh(&pppoe_hash_lock); 242 read_lock_bh(&pn->hash_lock);
206 po = __get_item(sid, addr, ifindex); 243 po = __get_item(pn, sid, addr, ifindex);
207 if (po) 244 if (po)
208 sock_hold(sk_pppox(po)); 245 sock_hold(sk_pppox(po));
209 read_unlock_bh(&pppoe_hash_lock); 246 read_unlock_bh(&pn->hash_lock);
210 247
211 return po; 248 return po;
212} 249}
213 250
214static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 251static inline struct pppox_sock *get_item_by_addr(struct net *net,
252 struct sockaddr_pppox *sp)
215{ 253{
216 struct net_device *dev; 254 struct net_device *dev;
255 struct pppoe_net *pn;
256 struct pppox_sock *pppox_sock;
257
217 int ifindex; 258 int ifindex;
218 259
219 dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); 260 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
220 if(!dev) 261 if (!dev)
221 return NULL; 262 return NULL;
263
222 ifindex = dev->ifindex; 264 ifindex = dev->ifindex;
265 pn = net_generic(net, pppoe_net_id);
266 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
267 sp->sa_addr.pppoe.remote, ifindex);
223 dev_put(dev); 268 dev_put(dev);
224 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); 269
270 return pppox_sock;
225} 271}
226 272
227static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) 273static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid,
274 char *addr, int ifindex)
228{ 275{
229 struct pppox_sock *ret; 276 struct pppox_sock *ret;
230 277
231 write_lock_bh(&pppoe_hash_lock); 278 write_lock_bh(&pn->hash_lock);
232 ret = __delete_item(sid, addr, ifindex); 279 ret = __delete_item(pn, sid, addr, ifindex);
233 write_unlock_bh(&pppoe_hash_lock); 280 write_unlock_bh(&pn->hash_lock);
234 281
235 return ret; 282 return ret;
236} 283}
237 284
238
239
240/*************************************************************************** 285/***************************************************************************
241 * 286 *
242 * Handler for device events. 287 * Handler for device events.
@@ -246,25 +291,33 @@ static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex
246 291
247static void pppoe_flush_dev(struct net_device *dev) 292static void pppoe_flush_dev(struct net_device *dev)
248{ 293{
249 int hash; 294 struct pppoe_net *pn;
295 int i;
296
250 BUG_ON(dev == NULL); 297 BUG_ON(dev == NULL);
251 298
252 write_lock_bh(&pppoe_hash_lock); 299 pn = pppoe_pernet(dev_net(dev));
253 for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { 300 if (!pn) /* already freed */
254 struct pppox_sock *po = item_hash_table[hash]; 301 return;
302
303 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i];
255 306
256 while (po != NULL) { 307 while (po != NULL) {
257 struct sock *sk = sk_pppox(po); 308 struct sock *sk;
258 if (po->pppoe_dev != dev) { 309 if (po->pppoe_dev != dev) {
259 po = po->next; 310 po = po->next;
260 continue; 311 continue;
261 } 312 }
313 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
262 po->pppoe_dev = NULL; 315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
263 dev_put(dev); 317 dev_put(dev);
264 318
265
266 /* We always grab the socket lock, followed by the 319 /* We always grab the socket lock, followed by the
267 * pppoe_hash_lock, in that order. Since we should 320 * hash_lock, in that order. Since we should
268 * hold the sock lock while doing any unbinding, 321 * hold the sock lock while doing any unbinding,
269 * we need to release the lock we're holding. 322 * we need to release the lock we're holding.
270 * Hold a reference to the sock so it doesn't disappear 323 * Hold a reference to the sock so it doesn't disappear
@@ -273,7 +326,7 @@ static void pppoe_flush_dev(struct net_device *dev)
273 326
274 sock_hold(sk); 327 sock_hold(sk);
275 328
276 write_unlock_bh(&pppoe_hash_lock); 329 write_unlock_bh(&pn->hash_lock);
277 lock_sock(sk); 330 lock_sock(sk);
278 331
279 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
@@ -289,20 +342,17 @@ static void pppoe_flush_dev(struct net_device *dev)
289 * While the lock was dropped the chain contents may 342 * While the lock was dropped the chain contents may
290 * have changed. 343 * have changed.
291 */ 344 */
292 write_lock_bh(&pppoe_hash_lock); 345 write_lock_bh(&pn->hash_lock);
293 po = item_hash_table[hash]; 346 po = pn->hash_table[i];
294 } 347 }
295 } 348 }
296 write_unlock_bh(&pppoe_hash_lock); 349 write_unlock_bh(&pn->hash_lock);
297} 350}
298 351
299static int pppoe_device_event(struct notifier_block *this, 352static int pppoe_device_event(struct notifier_block *this,
300 unsigned long event, void *ptr) 353 unsigned long event, void *ptr)
301{ 354{
302 struct net_device *dev = (struct net_device *) ptr; 355 struct net_device *dev = (struct net_device *)ptr;
303
304 if (dev_net(dev) != &init_net)
305 return NOTIFY_DONE;
306 356
307 /* Only look at sockets that are using this specific device. */ 357 /* Only look at sockets that are using this specific device. */
308 switch (event) { 358 switch (event) {
@@ -324,12 +374,10 @@ static int pppoe_device_event(struct notifier_block *this,
324 return NOTIFY_DONE; 374 return NOTIFY_DONE;
325} 375}
326 376
327
328static struct notifier_block pppoe_notifier = { 377static struct notifier_block pppoe_notifier = {
329 .notifier_call = pppoe_device_event, 378 .notifier_call = pppoe_device_event,
330}; 379};
331 380
332
333/************************************************************************ 381/************************************************************************
334 * 382 *
335 * Do the real work of receiving a PPPoE Session frame. 383 * Do the real work of receiving a PPPoE Session frame.
@@ -343,8 +391,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
343 if (sk->sk_state & PPPOX_BOUND) { 391 if (sk->sk_state & PPPOX_BOUND) {
344 ppp_input(&po->chan, skb); 392 ppp_input(&po->chan, skb);
345 } else if (sk->sk_state & PPPOX_RELAY) { 393 } else if (sk->sk_state & PPPOX_RELAY) {
346 relay_po = get_item_by_addr(&po->pppoe_relay); 394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
347 395 &po->pppoe_relay);
348 if (relay_po == NULL) 396 if (relay_po == NULL)
349 goto abort_kfree; 397 goto abort_kfree;
350 398
@@ -373,22 +421,18 @@ abort_kfree:
373 * Receive wrapper called in BH context. 421 * Receive wrapper called in BH context.
374 * 422 *
375 ***********************************************************************/ 423 ***********************************************************************/
376static int pppoe_rcv(struct sk_buff *skb, 424static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct net_device *dev, 425 struct packet_type *pt, struct net_device *orig_dev)
378 struct packet_type *pt,
379 struct net_device *orig_dev)
380
381{ 426{
382 struct pppoe_hdr *ph; 427 struct pppoe_hdr *ph;
383 struct pppox_sock *po; 428 struct pppox_sock *po;
429 struct pppoe_net *pn;
384 int len; 430 int len;
385 431
386 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 432 skb = skb_share_check(skb, GFP_ATOMIC);
433 if (!skb)
387 goto out; 434 goto out;
388 435
389 if (dev_net(dev) != &init_net)
390 goto drop;
391
392 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 436 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
393 goto drop; 437 goto drop;
394 438
@@ -402,7 +446,8 @@ static int pppoe_rcv(struct sk_buff *skb,
402 if (pskb_trim_rcsum(skb, len)) 446 if (pskb_trim_rcsum(skb, len))
403 goto drop; 447 goto drop;
404 448
405 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 449 pn = pppoe_pernet(dev_net(dev));
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
406 if (!po) 451 if (!po)
407 goto drop; 452 goto drop;
408 453
@@ -420,19 +465,16 @@ out:
420 * This is solely for detection of PADT frames 465 * This is solely for detection of PADT frames
421 * 466 *
422 ***********************************************************************/ 467 ***********************************************************************/
423static int pppoe_disc_rcv(struct sk_buff *skb, 468static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
424 struct net_device *dev, 469 struct packet_type *pt, struct net_device *orig_dev)
425 struct packet_type *pt,
426 struct net_device *orig_dev)
427 470
428{ 471{
429 struct pppoe_hdr *ph; 472 struct pppoe_hdr *ph;
430 struct pppox_sock *po; 473 struct pppox_sock *po;
474 struct pppoe_net *pn;
431 475
432 if (dev_net(dev) != &init_net) 476 skb = skb_share_check(skb, GFP_ATOMIC);
433 goto abort; 477 if (!skb)
434
435 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
436 goto out; 478 goto out;
437 479
438 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 480 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
@@ -442,7 +484,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
442 if (ph->code != PADT_CODE) 484 if (ph->code != PADT_CODE)
443 goto abort; 485 goto abort;
444 486
445 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 487 pn = pppoe_pernet(dev_net(dev));
488 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
446 if (po) { 489 if (po) {
447 struct sock *sk = sk_pppox(po); 490 struct sock *sk = sk_pppox(po);
448 491
@@ -471,12 +514,12 @@ out:
471} 514}
472 515
473static struct packet_type pppoes_ptype = { 516static struct packet_type pppoes_ptype = {
474 .type = __constant_htons(ETH_P_PPP_SES), 517 .type = cpu_to_be16(ETH_P_PPP_SES),
475 .func = pppoe_rcv, 518 .func = pppoe_rcv,
476}; 519};
477 520
478static struct packet_type pppoed_ptype = { 521static struct packet_type pppoed_ptype = {
479 .type = __constant_htons(ETH_P_PPP_DISC), 522 .type = cpu_to_be16(ETH_P_PPP_DISC),
480 .func = pppoe_disc_rcv, 523 .func = pppoe_disc_rcv,
481}; 524};
482 525
@@ -493,38 +536,37 @@ static struct proto pppoe_sk_proto = {
493 **********************************************************************/ 536 **********************************************************************/
494static int pppoe_create(struct net *net, struct socket *sock) 537static int pppoe_create(struct net *net, struct socket *sock)
495{ 538{
496 int error = -ENOMEM;
497 struct sock *sk; 539 struct sock *sk;
498 540
499 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); 541 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
500 if (!sk) 542 if (!sk)
501 goto out; 543 return -ENOMEM;
502 544
503 sock_init_data(sock, sk); 545 sock_init_data(sock, sk);
504 546
505 sock->state = SS_UNCONNECTED; 547 sock->state = SS_UNCONNECTED;
506 sock->ops = &pppoe_ops; 548 sock->ops = &pppoe_ops;
507 549
508 sk->sk_backlog_rcv = pppoe_rcv_core; 550 sk->sk_backlog_rcv = pppoe_rcv_core;
509 sk->sk_state = PPPOX_NONE; 551 sk->sk_state = PPPOX_NONE;
510 sk->sk_type = SOCK_STREAM; 552 sk->sk_type = SOCK_STREAM;
511 sk->sk_family = PF_PPPOX; 553 sk->sk_family = PF_PPPOX;
512 sk->sk_protocol = PX_PROTO_OE; 554 sk->sk_protocol = PX_PROTO_OE;
513 555
514 error = 0; 556 return 0;
515out: return error;
516} 557}
517 558
518static int pppoe_release(struct socket *sock) 559static int pppoe_release(struct socket *sock)
519{ 560{
520 struct sock *sk = sock->sk; 561 struct sock *sk = sock->sk;
521 struct pppox_sock *po; 562 struct pppox_sock *po;
563 struct pppoe_net *pn;
522 564
523 if (!sk) 565 if (!sk)
524 return 0; 566 return 0;
525 567
526 lock_sock(sk); 568 lock_sock(sk);
527 if (sock_flag(sk, SOCK_DEAD)){ 569 if (sock_flag(sk, SOCK_DEAD)) {
528 release_sock(sk); 570 release_sock(sk);
529 return -EBADF; 571 return -EBADF;
530 } 572 }
@@ -534,26 +576,39 @@ static int pppoe_release(struct socket *sock)
534 /* Signal the death of the socket. */ 576 /* Signal the death of the socket. */
535 sk->sk_state = PPPOX_DEAD; 577 sk->sk_state = PPPOX_DEAD;
536 578
579 /*
580 * pppoe_flush_dev could lead to a race with
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
537 592
538 /* Write lock on hash lock protects the entire "po" struct from 593 /*
539 * concurrent updates via pppoe_flush_dev. The "po" struct should 594 * protect "po" from concurrent updates
540 * be considered part of the hash table contents, thus protected 595 * on pppoe_flush_dev
541 * by the hash table lock */ 596 */
542 write_lock_bh(&pppoe_hash_lock); 597 write_lock_bh(&pn->hash_lock);
543 598
544 po = pppox_sk(sk); 599 po = pppox_sk(sk);
545 if (po->pppoe_pa.sid) { 600 if (stage_session(po->pppoe_pa.sid))
546 __delete_item(po->pppoe_pa.sid, 601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
547 po->pppoe_pa.remote, po->pppoe_ifindex); 602 po->pppoe_ifindex);
548 }
549 603
550 if (po->pppoe_dev) { 604 if (po->pppoe_dev) {
551 dev_put(po->pppoe_dev); 605 dev_put(po->pppoe_dev);
552 po->pppoe_dev = NULL; 606 po->pppoe_dev = NULL;
553 } 607 }
554 608
555 write_unlock_bh(&pppoe_hash_lock); 609 write_unlock_bh(&pn->hash_lock);
556 610
611out:
557 sock_orphan(sk); 612 sock_orphan(sk);
558 sock->sk = NULL; 613 sock->sk = NULL;
559 614
@@ -564,14 +619,14 @@ static int pppoe_release(struct socket *sock)
564 return 0; 619 return 0;
565} 620}
566 621
567
568static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, 622static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
569 int sockaddr_len, int flags) 623 int sockaddr_len, int flags)
570{ 624{
571 struct sock *sk = sock->sk; 625 struct sock *sk = sock->sk;
572 struct net_device *dev; 626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
573 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
574 struct pppox_sock *po = pppox_sk(sk); 627 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev;
629 struct pppoe_net *pn;
575 int error; 630 int error;
576 631
577 lock_sock(sk); 632 lock_sock(sk);
@@ -582,44 +637,45 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
582 637
583 /* Check for already bound sockets */ 638 /* Check for already bound sockets */
584 error = -EBUSY; 639 error = -EBUSY;
585 if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid) 640 if ((sk->sk_state & PPPOX_CONNECTED) &&
641 stage_session(sp->sa_addr.pppoe.sid))
586 goto end; 642 goto end;
587 643
588 /* Check for already disconnected sockets, on attempts to disconnect */ 644 /* Check for already disconnected sockets, on attempts to disconnect */
589 error = -EALREADY; 645 error = -EALREADY;
590 if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid ) 646 if ((sk->sk_state & PPPOX_DEAD) &&
647 !stage_session(sp->sa_addr.pppoe.sid))
591 goto end; 648 goto end;
592 649
593 error = 0; 650 error = 0;
594 if (po->pppoe_pa.sid) {
595 pppox_unbind_sock(sk);
596
597 /* Delete the old binding */
598 delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex);
599 651
600 if(po->pppoe_dev) 652 /* Delete the old binding */
653 if (stage_session(po->pppoe_pa.sid)) {
654 pppox_unbind_sock(sk);
655 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
601 dev_put(po->pppoe_dev); 659 dev_put(po->pppoe_dev);
602 660 }
603 memset(sk_pppox(po) + 1, 0, 661 memset(sk_pppox(po) + 1, 0,
604 sizeof(struct pppox_sock) - sizeof(struct sock)); 662 sizeof(struct pppox_sock) - sizeof(struct sock));
605
606 sk->sk_state = PPPOX_NONE; 663 sk->sk_state = PPPOX_NONE;
607 } 664 }
608 665
609 /* Don't re-bind if sid==0 */ 666 /* Re-bind in session stage only */
610 if (sp->sa_addr.pppoe.sid != 0) { 667 if (stage_session(sp->sa_addr.pppoe.sid)) {
611 dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev);
612
613 error = -ENODEV; 668 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
614 if (!dev) 670 if (!dev)
615 goto end; 671 goto end;
616 672
617 po->pppoe_dev = dev; 673 po->pppoe_dev = dev;
618 po->pppoe_ifindex = dev->ifindex; 674 po->pppoe_ifindex = dev->ifindex;
619 675 pn = pppoe_pernet(dev_net(dev));
620 write_lock_bh(&pppoe_hash_lock); 676 write_lock_bh(&pn->hash_lock);
621 if (!(dev->flags & IFF_UP)){ 677 if (!(dev->flags & IFF_UP)) {
622 write_unlock_bh(&pppoe_hash_lock); 678 write_unlock_bh(&pn->hash_lock);
623 goto err_put; 679 goto err_put;
624 } 680 }
625 681
@@ -627,8 +683,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
627 &sp->sa_addr.pppoe, 683 &sp->sa_addr.pppoe,
628 sizeof(struct pppoe_addr)); 684 sizeof(struct pppoe_addr));
629 685
630 error = __set_item(po); 686 error = __set_item(pn, po);
631 write_unlock_bh(&pppoe_hash_lock); 687 write_unlock_bh(&pn->hash_lock);
632 if (error < 0) 688 if (error < 0)
633 goto err_put; 689 goto err_put;
634 690
@@ -639,7 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
639 po->chan.private = sk; 695 po->chan.private = sk;
640 po->chan.ops = &pppoe_chan_ops; 696 po->chan.ops = &pppoe_chan_ops;
641 697
642 error = ppp_register_channel(&po->chan); 698 error = ppp_register_net_channel(dev_net(dev), &po->chan);
643 if (error) 699 if (error)
644 goto err_put; 700 goto err_put;
645 701
@@ -648,7 +704,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
648 704
649 po->num = sp->sa_addr.pppoe.sid; 705 po->num = sp->sa_addr.pppoe.sid;
650 706
651 end: 707end:
652 release_sock(sk); 708 release_sock(sk);
653 return error; 709 return error;
654err_put: 710err_put:
@@ -659,7 +715,6 @@ err_put:
659 goto end; 715 goto end;
660} 716}
661 717
662
663static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, 718static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
664 int *usockaddr_len, int peer) 719 int *usockaddr_len, int peer)
665{ 720{
@@ -678,7 +733,6 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
678 return 0; 733 return 0;
679} 734}
680 735
681
682static int pppoe_ioctl(struct socket *sock, unsigned int cmd, 736static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
683 unsigned long arg) 737 unsigned long arg)
684{ 738{
@@ -690,7 +744,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
690 switch (cmd) { 744 switch (cmd) {
691 case PPPIOCGMRU: 745 case PPPIOCGMRU:
692 err = -ENXIO; 746 err = -ENXIO;
693
694 if (!(sk->sk_state & PPPOX_CONNECTED)) 747 if (!(sk->sk_state & PPPOX_CONNECTED))
695 break; 748 break;
696 749
@@ -698,7 +751,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
698 if (put_user(po->pppoe_dev->mtu - 751 if (put_user(po->pppoe_dev->mtu -
699 sizeof(struct pppoe_hdr) - 752 sizeof(struct pppoe_hdr) -
700 PPP_HDRLEN, 753 PPP_HDRLEN,
701 (int __user *) arg)) 754 (int __user *)arg))
702 break; 755 break;
703 err = 0; 756 err = 0;
704 break; 757 break;
@@ -709,7 +762,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
709 break; 762 break;
710 763
711 err = -EFAULT; 764 err = -EFAULT;
712 if (get_user(val,(int __user *) arg)) 765 if (get_user(val, (int __user *)arg))
713 break; 766 break;
714 767
715 if (val < (po->pppoe_dev->mtu 768 if (val < (po->pppoe_dev->mtu
@@ -722,7 +775,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
722 775
723 case PPPIOCSFLAGS: 776 case PPPIOCSFLAGS:
724 err = -EFAULT; 777 err = -EFAULT;
725 if (get_user(val, (int __user *) arg)) 778 if (get_user(val, (int __user *)arg))
726 break; 779 break;
727 err = 0; 780 err = 0;
728 break; 781 break;
@@ -749,13 +802,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
749 802
750 err = -EINVAL; 803 err = -EINVAL;
751 if (po->pppoe_relay.sa_family != AF_PPPOX || 804 if (po->pppoe_relay.sa_family != AF_PPPOX ||
752 po->pppoe_relay.sa_protocol!= PX_PROTO_OE) 805 po->pppoe_relay.sa_protocol != PX_PROTO_OE)
753 break; 806 break;
754 807
755 /* Check that the socket referenced by the address 808 /* Check that the socket referenced by the address
756 actually exists. */ 809 actually exists. */
757 relay_po = get_item_by_addr(&po->pppoe_relay); 810 relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
758
759 if (!relay_po) 811 if (!relay_po)
760 break; 812 break;
761 813
@@ -781,7 +833,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
781 return err; 833 return err;
782} 834}
783 835
784
785static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, 836static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
786 struct msghdr *m, size_t total_len) 837 struct msghdr *m, size_t total_len)
787{ 838{
@@ -808,7 +859,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
808 dev = po->pppoe_dev; 859 dev = po->pppoe_dev;
809 860
810 error = -EMSGSIZE; 861 error = -EMSGSIZE;
811 if (total_len > (dev->mtu + dev->hard_header_len)) 862 if (total_len > (dev->mtu + dev->hard_header_len))
812 goto end; 863 goto end;
813 864
814 865
@@ -826,13 +877,12 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
826 skb->dev = dev; 877 skb->dev = dev;
827 878
828 skb->priority = sk->sk_priority; 879 skb->priority = sk->sk_priority;
829 skb->protocol = __constant_htons(ETH_P_PPP_SES); 880 skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
830 881
831 ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr)); 882 ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
832 start = (char *) &ph->tag[0]; 883 start = (char *)&ph->tag[0];
833 884
834 error = memcpy_fromiovec(start, m->msg_iov, total_len); 885 error = memcpy_fromiovec(start, m->msg_iov, total_len);
835
836 if (error < 0) { 886 if (error < 0) {
837 kfree_skb(skb); 887 kfree_skb(skb);
838 goto end; 888 goto end;
@@ -853,7 +903,6 @@ end:
853 return error; 903 return error;
854} 904}
855 905
856
857/************************************************************************ 906/************************************************************************
858 * 907 *
859 * xmit function for internal use. 908 * xmit function for internal use.
@@ -888,7 +937,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
888 ph->sid = po->num; 937 ph->sid = po->num;
889 ph->length = htons(data_len); 938 ph->length = htons(data_len);
890 939
891 skb->protocol = __constant_htons(ETH_P_PPP_SES); 940 skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
892 skb->dev = dev; 941 skb->dev = dev;
893 942
894 dev_hard_header(skb, dev, ETH_P_PPP_SES, 943 dev_hard_header(skb, dev, ETH_P_PPP_SES,
@@ -903,7 +952,6 @@ abort:
903 return 1; 952 return 1;
904} 953}
905 954
906
907/************************************************************************ 955/************************************************************************
908 * 956 *
909 * xmit function called by generic PPP driver 957 * xmit function called by generic PPP driver
@@ -912,11 +960,10 @@ abort:
912 ***********************************************************************/ 960 ***********************************************************************/
913static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) 961static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
914{ 962{
915 struct sock *sk = (struct sock *) chan->private; 963 struct sock *sk = (struct sock *)chan->private;
916 return __pppoe_xmit(sk, skb); 964 return __pppoe_xmit(sk, skb);
917} 965}
918 966
919
920static struct ppp_channel_ops pppoe_chan_ops = { 967static struct ppp_channel_ops pppoe_chan_ops = {
921 .start_xmit = pppoe_xmit, 968 .start_xmit = pppoe_xmit,
922}; 969};
@@ -935,7 +982,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
935 982
936 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 983 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
937 flags & MSG_DONTWAIT, &error); 984 flags & MSG_DONTWAIT, &error);
938
939 if (error < 0) 985 if (error < 0)
940 goto end; 986 goto end;
941 987
@@ -968,44 +1014,47 @@ static int pppoe_seq_show(struct seq_file *seq, void *v)
968 dev_name = po->pppoe_pa.dev; 1014 dev_name = po->pppoe_pa.dev;
969 1015
970 seq_printf(seq, "%08X %pM %8s\n", 1016 seq_printf(seq, "%08X %pM %8s\n",
971 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); 1017 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
972out: 1018out:
973 return 0; 1019 return 0;
974} 1020}
975 1021
976static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) 1022static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
977{ 1023{
978 struct pppox_sock *po; 1024 struct pppox_sock *po;
979 int i = 0; 1025 int i;
980 1026
981 for (; i < PPPOE_HASH_SIZE; i++) { 1027 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
982 po = item_hash_table[i]; 1028 po = pn->hash_table[i];
983 while (po) { 1029 while (po) {
984 if (!pos--) 1030 if (!pos--)
985 goto out; 1031 goto out;
986 po = po->next; 1032 po = po->next;
987 } 1033 }
988 } 1034 }
1035
989out: 1036out:
990 return po; 1037 return po;
991} 1038}
992 1039
993static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) 1040static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
994 __acquires(pppoe_hash_lock) 1041 __acquires(pn->hash_lock)
995{ 1042{
1043 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
996 loff_t l = *pos; 1044 loff_t l = *pos;
997 1045
998 read_lock_bh(&pppoe_hash_lock); 1046 read_lock_bh(&pn->hash_lock);
999 return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN; 1047 return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
1000} 1048}
1001 1049
1002static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1050static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1003{ 1051{
1052 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
1004 struct pppox_sock *po; 1053 struct pppox_sock *po;
1005 1054
1006 ++*pos; 1055 ++*pos;
1007 if (v == SEQ_START_TOKEN) { 1056 if (v == SEQ_START_TOKEN) {
1008 po = pppoe_get_idx(0); 1057 po = pppoe_get_idx(pn, 0);
1009 goto out; 1058 goto out;
1010 } 1059 }
1011 po = v; 1060 po = v;
@@ -1015,22 +1064,24 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1015 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
1016 1065
1017 while (++hash < PPPOE_HASH_SIZE) { 1066 while (++hash < PPPOE_HASH_SIZE) {
1018 po = item_hash_table[hash]; 1067 po = pn->hash_table[hash];
1019 if (po) 1068 if (po)
1020 break; 1069 break;
1021 } 1070 }
1022 } 1071 }
1072
1023out: 1073out:
1024 return po; 1074 return po;
1025} 1075}
1026 1076
1027static void pppoe_seq_stop(struct seq_file *seq, void *v) 1077static void pppoe_seq_stop(struct seq_file *seq, void *v)
1028 __releases(pppoe_hash_lock) 1078 __releases(pn->hash_lock)
1029{ 1079{
1030 read_unlock_bh(&pppoe_hash_lock); 1080 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
1081 read_unlock_bh(&pn->hash_lock);
1031} 1082}
1032 1083
1033static struct seq_operations pppoe_seq_ops = { 1084static const struct seq_operations pppoe_seq_ops = {
1034 .start = pppoe_seq_start, 1085 .start = pppoe_seq_start,
1035 .next = pppoe_seq_next, 1086 .next = pppoe_seq_next,
1036 .stop = pppoe_seq_stop, 1087 .stop = pppoe_seq_stop,
@@ -1039,7 +1090,8 @@ static struct seq_operations pppoe_seq_ops = {
1039 1090
1040static int pppoe_seq_open(struct inode *inode, struct file *file) 1091static int pppoe_seq_open(struct inode *inode, struct file *file)
1041{ 1092{
1042 return seq_open(file, &pppoe_seq_ops); 1093 return seq_open_net(inode, file, &pppoe_seq_ops,
1094 sizeof(struct seq_net_private));
1043} 1095}
1044 1096
1045static const struct file_operations pppoe_seq_fops = { 1097static const struct file_operations pppoe_seq_fops = {
@@ -1047,74 +1099,115 @@ static const struct file_operations pppoe_seq_fops = {
1047 .open = pppoe_seq_open, 1099 .open = pppoe_seq_open,
1048 .read = seq_read, 1100 .read = seq_read,
1049 .llseek = seq_lseek, 1101 .llseek = seq_lseek,
1050 .release = seq_release, 1102 .release = seq_release_net,
1051}; 1103};
1052 1104
1053static int __init pppoe_proc_init(void)
1054{
1055 struct proc_dir_entry *p;
1056
1057 p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1058 if (!p)
1059 return -ENOMEM;
1060 return 0;
1061}
1062#else /* CONFIG_PROC_FS */
1063static inline int pppoe_proc_init(void) { return 0; }
1064#endif /* CONFIG_PROC_FS */ 1105#endif /* CONFIG_PROC_FS */
1065 1106
1066static const struct proto_ops pppoe_ops = { 1107static const struct proto_ops pppoe_ops = {
1067 .family = AF_PPPOX, 1108 .family = AF_PPPOX,
1068 .owner = THIS_MODULE, 1109 .owner = THIS_MODULE,
1069 .release = pppoe_release, 1110 .release = pppoe_release,
1070 .bind = sock_no_bind, 1111 .bind = sock_no_bind,
1071 .connect = pppoe_connect, 1112 .connect = pppoe_connect,
1072 .socketpair = sock_no_socketpair, 1113 .socketpair = sock_no_socketpair,
1073 .accept = sock_no_accept, 1114 .accept = sock_no_accept,
1074 .getname = pppoe_getname, 1115 .getname = pppoe_getname,
1075 .poll = datagram_poll, 1116 .poll = datagram_poll,
1076 .listen = sock_no_listen, 1117 .listen = sock_no_listen,
1077 .shutdown = sock_no_shutdown, 1118 .shutdown = sock_no_shutdown,
1078 .setsockopt = sock_no_setsockopt, 1119 .setsockopt = sock_no_setsockopt,
1079 .getsockopt = sock_no_getsockopt, 1120 .getsockopt = sock_no_getsockopt,
1080 .sendmsg = pppoe_sendmsg, 1121 .sendmsg = pppoe_sendmsg,
1081 .recvmsg = pppoe_recvmsg, 1122 .recvmsg = pppoe_recvmsg,
1082 .mmap = sock_no_mmap, 1123 .mmap = sock_no_mmap,
1083 .ioctl = pppox_ioctl, 1124 .ioctl = pppox_ioctl,
1084}; 1125};
1085 1126
1086static struct pppox_proto pppoe_proto = { 1127static struct pppox_proto pppoe_proto = {
1087 .create = pppoe_create, 1128 .create = pppoe_create,
1088 .ioctl = pppoe_ioctl, 1129 .ioctl = pppoe_ioctl,
1089 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
1090}; 1131};
1091 1132
1133static __net_init int pppoe_init_net(struct net *net)
1134{
1135 struct pppoe_net *pn;
1136 struct proc_dir_entry *pde;
1137 int err;
1138
1139 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1140 if (!pn)
1141 return -ENOMEM;
1142
1143 rwlock_init(&pn->hash_lock);
1144
1145 err = net_assign_generic(net, pppoe_net_id, pn);
1146 if (err)
1147 goto out;
1148
1149 pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1150#ifdef CONFIG_PROC_FS
1151 if (!pde) {
1152 err = -ENOMEM;
1153 goto out;
1154 }
1155#endif
1156
1157 return 0;
1158
1159out:
1160 kfree(pn);
1161 return err;
1162}
1163
1164static __net_exit void pppoe_exit_net(struct net *net)
1165{
1166 struct pppoe_net *pn;
1167
1168 proc_net_remove(net, "pppoe");
1169 pn = net_generic(net, pppoe_net_id);
1170 /*
1171 * if someone has cached our net then
1172 * further net_generic call will return NULL
1173 */
1174 net_assign_generic(net, pppoe_net_id, NULL);
1175 kfree(pn);
1176}
1177
1178static struct pernet_operations pppoe_net_ops = {
1179 .init = pppoe_init_net,
1180 .exit = pppoe_exit_net,
1181};
1092 1182
1093static int __init pppoe_init(void) 1183static int __init pppoe_init(void)
1094{ 1184{
1095 int err = proto_register(&pppoe_sk_proto, 0); 1185 int err;
1096 1186
1187 err = proto_register(&pppoe_sk_proto, 0);
1097 if (err) 1188 if (err)
1098 goto out; 1189 goto out;
1099 1190
1100 err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); 1191 err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
1101 if (err) 1192 if (err)
1102 goto out_unregister_pppoe_proto; 1193 goto out_unregister_pppoe_proto;
1103 1194
1104 err = pppoe_proc_init(); 1195 err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops);
1105 if (err) 1196 if (err)
1106 goto out_unregister_pppox_proto; 1197 goto out_unregister_pppox_proto;
1107 1198
1108 dev_add_pack(&pppoes_ptype); 1199 dev_add_pack(&pppoes_ptype);
1109 dev_add_pack(&pppoed_ptype); 1200 dev_add_pack(&pppoed_ptype);
1110 register_netdevice_notifier(&pppoe_notifier); 1201 register_netdevice_notifier(&pppoe_notifier);
1111out: 1202
1112 return err; 1203 return 0;
1204
1113out_unregister_pppox_proto: 1205out_unregister_pppox_proto:
1114 unregister_pppox_proto(PX_PROTO_OE); 1206 unregister_pppox_proto(PX_PROTO_OE);
1115out_unregister_pppoe_proto: 1207out_unregister_pppoe_proto:
1116 proto_unregister(&pppoe_sk_proto); 1208 proto_unregister(&pppoe_sk_proto);
1117 goto out; 1209out:
1210 return err;
1118} 1211}
1119 1212
1120static void __exit pppoe_exit(void) 1213static void __exit pppoe_exit(void)
@@ -1123,7 +1216,7 @@ static void __exit pppoe_exit(void)
1123 dev_remove_pack(&pppoes_ptype); 1216 dev_remove_pack(&pppoes_ptype);
1124 dev_remove_pack(&pppoed_ptype); 1217 dev_remove_pack(&pppoed_ptype);
1125 unregister_netdevice_notifier(&pppoe_notifier); 1218 unregister_netdevice_notifier(&pppoe_notifier);
1126 remove_proc_entry("pppoe", init_net.proc_net); 1219 unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops);
1127 proto_unregister(&pppoe_sk_proto); 1220 proto_unregister(&pppoe_sk_proto);
1128} 1221}
1129 1222
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f1a946785c6..1ba0f6864ac 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -90,7 +90,9 @@
90#include <linux/hash.h> 90#include <linux/hash.h>
91#include <linux/sort.h> 91#include <linux/sort.h>
92#include <linux/proc_fs.h> 92#include <linux/proc_fs.h>
93#include <linux/nsproxy.h>
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
95#include <net/netns/generic.h>
94#include <net/dst.h> 96#include <net/dst.h>
95#include <net/ip.h> 97#include <net/ip.h>
96#include <net/udp.h> 98#include <net/udp.h>
@@ -204,6 +206,7 @@ struct pppol2tp_tunnel
204 struct sock *sock; /* Parent socket */ 206 struct sock *sock; /* Parent socket */
205 struct list_head list; /* Keep a list of all open 207 struct list_head list; /* Keep a list of all open
206 * prepared sockets */ 208 * prepared sockets */
209 struct net *pppol2tp_net; /* the net we belong to */
207 210
208 atomic_t ref_count; 211 atomic_t ref_count;
209}; 212};
@@ -227,8 +230,20 @@ static atomic_t pppol2tp_tunnel_count;
227static atomic_t pppol2tp_session_count; 230static atomic_t pppol2tp_session_count;
228static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; 231static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
229static struct proto_ops pppol2tp_ops; 232static struct proto_ops pppol2tp_ops;
230static LIST_HEAD(pppol2tp_tunnel_list); 233
231static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock); 234/* per-net private data for this module */
235static unsigned int pppol2tp_net_id;
236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock;
239};
240
241static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
242{
243 BUG_ON(!net);
244
245 return net_generic(net, pppol2tp_net_id);
246}
232 247
233/* Helpers to obtain tunnel/session contexts from sockets. 248/* Helpers to obtain tunnel/session contexts from sockets.
234 */ 249 */
@@ -321,18 +336,19 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
321 336
322/* Lookup a tunnel by id 337/* Lookup a tunnel by id
323 */ 338 */
324static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id) 339static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
325{ 340{
326 struct pppol2tp_tunnel *tunnel = NULL; 341 struct pppol2tp_tunnel *tunnel;
342 struct pppol2tp_net *pn = pppol2tp_pernet(net);
327 343
328 read_lock_bh(&pppol2tp_tunnel_list_lock); 344 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
329 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { 345 list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
330 if (tunnel->stats.tunnel_id == tunnel_id) { 346 if (tunnel->stats.tunnel_id == tunnel_id) {
331 read_unlock_bh(&pppol2tp_tunnel_list_lock); 347 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
332 return tunnel; 348 return tunnel;
333 } 349 }
334 } 350 }
335 read_unlock_bh(&pppol2tp_tunnel_list_lock); 351 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
336 352
337 return NULL; 353 return NULL;
338} 354}
@@ -1287,10 +1303,12 @@ again:
1287 */ 1303 */
1288static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) 1304static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1289{ 1305{
1306 struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
1307
1290 /* Remove from socket list */ 1308 /* Remove from socket list */
1291 write_lock_bh(&pppol2tp_tunnel_list_lock); 1309 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1292 list_del_init(&tunnel->list); 1310 list_del_init(&tunnel->list);
1293 write_unlock_bh(&pppol2tp_tunnel_list_lock); 1311 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1294 1312
1295 atomic_dec(&pppol2tp_tunnel_count); 1313 atomic_dec(&pppol2tp_tunnel_count);
1296 kfree(tunnel); 1314 kfree(tunnel);
@@ -1444,13 +1462,14 @@ error:
1444/* Internal function to prepare a tunnel (UDP) socket to have PPPoX 1462/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1445 * sockets attached to it. 1463 * sockets attached to it.
1446 */ 1464 */
1447static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, 1465static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1448 int *error) 1466 int fd, u16 tunnel_id, int *error)
1449{ 1467{
1450 int err; 1468 int err;
1451 struct socket *sock = NULL; 1469 struct socket *sock = NULL;
1452 struct sock *sk; 1470 struct sock *sk;
1453 struct pppol2tp_tunnel *tunnel; 1471 struct pppol2tp_tunnel *tunnel;
1472 struct pppol2tp_net *pn;
1454 struct sock *ret = NULL; 1473 struct sock *ret = NULL;
1455 1474
1456 /* Get the tunnel UDP socket from the fd, which was opened by 1475 /* Get the tunnel UDP socket from the fd, which was opened by
@@ -1524,11 +1543,15 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1524 /* Misc init */ 1543 /* Misc init */
1525 rwlock_init(&tunnel->hlist_lock); 1544 rwlock_init(&tunnel->hlist_lock);
1526 1545
1546 /* The net we belong to */
1547 tunnel->pppol2tp_net = net;
1548 pn = pppol2tp_pernet(net);
1549
1527 /* Add tunnel to our list */ 1550 /* Add tunnel to our list */
1528 INIT_LIST_HEAD(&tunnel->list); 1551 INIT_LIST_HEAD(&tunnel->list);
1529 write_lock_bh(&pppol2tp_tunnel_list_lock); 1552 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1530 list_add(&tunnel->list, &pppol2tp_tunnel_list); 1553 list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
1531 write_unlock_bh(&pppol2tp_tunnel_list_lock); 1554 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1532 atomic_inc(&pppol2tp_tunnel_count); 1555 atomic_inc(&pppol2tp_tunnel_count);
1533 1556
1534 /* Bump the reference count. The tunnel context is deleted 1557 /* Bump the reference count. The tunnel context is deleted
@@ -1629,7 +1652,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1629 * tunnel id. 1652 * tunnel id.
1630 */ 1653 */
1631 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { 1654 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1632 tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd, 1655 tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
1656 sp->pppol2tp.fd,
1633 sp->pppol2tp.s_tunnel, 1657 sp->pppol2tp.s_tunnel,
1634 &error); 1658 &error);
1635 if (tunnel_sock == NULL) 1659 if (tunnel_sock == NULL)
@@ -1637,7 +1661,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1637 1661
1638 tunnel = tunnel_sock->sk_user_data; 1662 tunnel = tunnel_sock->sk_user_data;
1639 } else { 1663 } else {
1640 tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel); 1664 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1641 1665
1642 /* Error if we can't find the tunnel */ 1666 /* Error if we can't find the tunnel */
1643 error = -ENOENT; 1667 error = -ENOENT;
@@ -1725,7 +1749,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1725 po->chan.ops = &pppol2tp_chan_ops; 1749 po->chan.ops = &pppol2tp_chan_ops;
1726 po->chan.mtu = session->mtu; 1750 po->chan.mtu = session->mtu;
1727 1751
1728 error = ppp_register_channel(&po->chan); 1752 error = ppp_register_net_channel(sock_net(sk), &po->chan);
1729 if (error) 1753 if (error)
1730 goto end_put_tun; 1754 goto end_put_tun;
1731 1755
@@ -2347,8 +2371,9 @@ end:
2347#include <linux/seq_file.h> 2371#include <linux/seq_file.h>
2348 2372
2349struct pppol2tp_seq_data { 2373struct pppol2tp_seq_data {
2350 struct pppol2tp_tunnel *tunnel; /* current tunnel */ 2374 struct seq_net_private p;
2351 struct pppol2tp_session *session; /* NULL means get first session in tunnel */ 2375 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2376 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2352}; 2377};
2353 2378
2354static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) 2379static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
@@ -2384,17 +2409,18 @@ out:
2384 return session; 2409 return session;
2385} 2410}
2386 2411
2387static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr) 2412static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
2413 struct pppol2tp_tunnel *curr)
2388{ 2414{
2389 struct pppol2tp_tunnel *tunnel = NULL; 2415 struct pppol2tp_tunnel *tunnel = NULL;
2390 2416
2391 read_lock_bh(&pppol2tp_tunnel_list_lock); 2417 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
2392 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { 2418 if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
2393 goto out; 2419 goto out;
2394 } 2420 }
2395 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); 2421 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2396out: 2422out:
2397 read_unlock_bh(&pppol2tp_tunnel_list_lock); 2423 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
2398 2424
2399 return tunnel; 2425 return tunnel;
2400} 2426}
@@ -2402,6 +2428,7 @@ out:
2402static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) 2428static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2403{ 2429{
2404 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; 2430 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2431 struct pppol2tp_net *pn;
2405 loff_t pos = *offs; 2432 loff_t pos = *offs;
2406 2433
2407 if (!pos) 2434 if (!pos)
@@ -2409,14 +2436,15 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2409 2436
2410 BUG_ON(m->private == NULL); 2437 BUG_ON(m->private == NULL);
2411 pd = m->private; 2438 pd = m->private;
2439 pn = pppol2tp_pernet(seq_file_net(m));
2412 2440
2413 if (pd->tunnel == NULL) { 2441 if (pd->tunnel == NULL) {
2414 if (!list_empty(&pppol2tp_tunnel_list)) 2442 if (!list_empty(&pn->pppol2tp_tunnel_list))
2415 pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); 2443 pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2416 } else { 2444 } else {
2417 pd->session = next_session(pd->tunnel, pd->session); 2445 pd->session = next_session(pd->tunnel, pd->session);
2418 if (pd->session == NULL) { 2446 if (pd->session == NULL) {
2419 pd->tunnel = next_tunnel(pd->tunnel); 2447 pd->tunnel = next_tunnel(pn, pd->tunnel);
2420 } 2448 }
2421 } 2449 }
2422 2450
@@ -2517,7 +2545,7 @@ out:
2517 return 0; 2545 return 0;
2518} 2546}
2519 2547
2520static struct seq_operations pppol2tp_seq_ops = { 2548static const struct seq_operations pppol2tp_seq_ops = {
2521 .start = pppol2tp_seq_start, 2549 .start = pppol2tp_seq_start,
2522 .next = pppol2tp_seq_next, 2550 .next = pppol2tp_seq_next,
2523 .stop = pppol2tp_seq_stop, 2551 .stop = pppol2tp_seq_stop,
@@ -2530,51 +2558,18 @@ static struct seq_operations pppol2tp_seq_ops = {
2530 */ 2558 */
2531static int pppol2tp_proc_open(struct inode *inode, struct file *file) 2559static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2532{ 2560{
2533 struct seq_file *m; 2561 return seq_open_net(inode, file, &pppol2tp_seq_ops,
2534 struct pppol2tp_seq_data *pd; 2562 sizeof(struct pppol2tp_seq_data));
2535 int ret = 0;
2536
2537 ret = seq_open(file, &pppol2tp_seq_ops);
2538 if (ret < 0)
2539 goto out;
2540
2541 m = file->private_data;
2542
2543 /* Allocate and fill our proc_data for access later */
2544 ret = -ENOMEM;
2545 m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
2546 if (m->private == NULL)
2547 goto out;
2548
2549 pd = m->private;
2550 ret = 0;
2551
2552out:
2553 return ret;
2554}
2555
2556/* Called when /proc file access completes.
2557 */
2558static int pppol2tp_proc_release(struct inode *inode, struct file *file)
2559{
2560 struct seq_file *m = (struct seq_file *)file->private_data;
2561
2562 kfree(m->private);
2563 m->private = NULL;
2564
2565 return seq_release(inode, file);
2566} 2563}
2567 2564
2568static struct file_operations pppol2tp_proc_fops = { 2565static const struct file_operations pppol2tp_proc_fops = {
2569 .owner = THIS_MODULE, 2566 .owner = THIS_MODULE,
2570 .open = pppol2tp_proc_open, 2567 .open = pppol2tp_proc_open,
2571 .read = seq_read, 2568 .read = seq_read,
2572 .llseek = seq_lseek, 2569 .llseek = seq_lseek,
2573 .release = pppol2tp_proc_release, 2570 .release = seq_release_net,
2574}; 2571};
2575 2572
2576static struct proc_dir_entry *pppol2tp_proc;
2577
2578#endif /* CONFIG_PROC_FS */ 2573#endif /* CONFIG_PROC_FS */
2579 2574
2580/***************************************************************************** 2575/*****************************************************************************
@@ -2606,6 +2601,57 @@ static struct pppox_proto pppol2tp_proto = {
2606 .ioctl = pppol2tp_ioctl 2601 .ioctl = pppol2tp_ioctl
2607}; 2602};
2608 2603
2604static __net_init int pppol2tp_init_net(struct net *net)
2605{
2606 struct pppol2tp_net *pn;
2607 struct proc_dir_entry *pde;
2608 int err;
2609
2610 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
2611 if (!pn)
2612 return -ENOMEM;
2613
2614 INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
2615 rwlock_init(&pn->pppol2tp_tunnel_list_lock);
2616
2617 err = net_assign_generic(net, pppol2tp_net_id, pn);
2618 if (err)
2619 goto out;
2620
2621 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
2622#ifdef CONFIG_PROC_FS
2623 if (!pde) {
2624 err = -ENOMEM;
2625 goto out;
2626 }
2627#endif
2628
2629 return 0;
2630
2631out:
2632 kfree(pn);
2633 return err;
2634}
2635
2636static __net_exit void pppol2tp_exit_net(struct net *net)
2637{
2638 struct pppoe_net *pn;
2639
2640 proc_net_remove(net, "pppol2tp");
2641 pn = net_generic(net, pppol2tp_net_id);
2642 /*
2643 * if someone has cached our net then
2644 * further net_generic call will return NULL
2645 */
2646 net_assign_generic(net, pppol2tp_net_id, NULL);
2647 kfree(pn);
2648}
2649
2650static struct pernet_operations pppol2tp_net_ops = {
2651 .init = pppol2tp_init_net,
2652 .exit = pppol2tp_exit_net,
2653};
2654
2609static int __init pppol2tp_init(void) 2655static int __init pppol2tp_init(void)
2610{ 2656{
2611 int err; 2657 int err;
@@ -2617,23 +2663,17 @@ static int __init pppol2tp_init(void)
2617 if (err) 2663 if (err)
2618 goto out_unregister_pppol2tp_proto; 2664 goto out_unregister_pppol2tp_proto;
2619 2665
2620#ifdef CONFIG_PROC_FS 2666 err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops);
2621 pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, 2667 if (err)
2622 &pppol2tp_proc_fops);
2623 if (!pppol2tp_proc) {
2624 err = -ENOMEM;
2625 goto out_unregister_pppox_proto; 2668 goto out_unregister_pppox_proto;
2626 } 2669
2627#endif /* CONFIG_PROC_FS */
2628 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", 2670 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2629 PPPOL2TP_DRV_VERSION); 2671 PPPOL2TP_DRV_VERSION);
2630 2672
2631out: 2673out:
2632 return err; 2674 return err;
2633#ifdef CONFIG_PROC_FS
2634out_unregister_pppox_proto: 2675out_unregister_pppox_proto:
2635 unregister_pppox_proto(PX_PROTO_OL2TP); 2676 unregister_pppox_proto(PX_PROTO_OL2TP);
2636#endif
2637out_unregister_pppol2tp_proto: 2677out_unregister_pppol2tp_proto:
2638 proto_unregister(&pppol2tp_sk_proto); 2678 proto_unregister(&pppol2tp_sk_proto);
2639 goto out; 2679 goto out;
@@ -2642,10 +2682,6 @@ out_unregister_pppol2tp_proto:
2642static void __exit pppol2tp_exit(void) 2682static void __exit pppol2tp_exit(void)
2643{ 2683{
2644 unregister_pppox_proto(PX_PROTO_OL2TP); 2684 unregister_pppox_proto(PX_PROTO_OL2TP);
2645
2646#ifdef CONFIG_PROC_FS
2647 remove_proc_entry("pppol2tp", init_net.proc_net);
2648#endif
2649 proto_unregister(&pppol2tp_sk_proto); 2685 proto_unregister(&pppol2tp_sk_proto);
2650} 2686}
2651 2687
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 03aecc97fb4..4f6d33fbc67 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -108,9 +108,6 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol)
108{ 108{
109 int rc = -EPROTOTYPE; 109 int rc = -EPROTOTYPE;
110 110
111 if (net != &init_net)
112 return -EAFNOSUPPORT;
113
114 if (protocol < 0 || protocol > PX_MAX_PROTO) 111 if (protocol < 0 || protocol > PX_MAX_PROTO)
115 goto out; 112 goto out;
116 113
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 4b564eda5bd..30900b30d53 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -745,7 +745,7 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
745 /* Move the mac addresses to the top of buffer */ 745 /* Move the mac addresses to the top of buffer */
746 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); 746 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
747 747
748 veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); 748 veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q);
749 veth->h_vlan_TCI = htons(tag); 749 veth->h_vlan_TCI = htons(tag);
750 750
751 return skb; 751 return skb;
@@ -1403,6 +1403,19 @@ void gelic_net_tx_timeout(struct net_device *netdev)
1403 atomic_dec(&card->tx_timeout_task_counter); 1403 atomic_dec(&card->tx_timeout_task_counter);
1404} 1404}
1405 1405
1406static const struct net_device_ops gelic_netdevice_ops = {
1407 .ndo_open = gelic_net_open,
1408 .ndo_stop = gelic_net_stop,
1409 .ndo_start_xmit = gelic_net_xmit,
1410 .ndo_set_multicast_list = gelic_net_set_multi,
1411 .ndo_change_mtu = gelic_net_change_mtu,
1412 .ndo_tx_timeout = gelic_net_tx_timeout,
1413 .ndo_validate_addr = eth_validate_addr,
1414#ifdef CONFIG_NET_POLL_CONTROLLER
1415 .ndo_poll_controller = gelic_net_poll_controller,
1416#endif
1417};
1418
1406/** 1419/**
1407 * gelic_ether_setup_netdev_ops - initialization of net_device operations 1420 * gelic_ether_setup_netdev_ops - initialization of net_device operations
1408 * @netdev: net_device structure 1421 * @netdev: net_device structure
@@ -1412,21 +1425,12 @@ void gelic_net_tx_timeout(struct net_device *netdev)
1412static void gelic_ether_setup_netdev_ops(struct net_device *netdev, 1425static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1413 struct napi_struct *napi) 1426 struct napi_struct *napi)
1414{ 1427{
1415 netdev->open = &gelic_net_open;
1416 netdev->stop = &gelic_net_stop;
1417 netdev->hard_start_xmit = &gelic_net_xmit;
1418 netdev->set_multicast_list = &gelic_net_set_multi;
1419 netdev->change_mtu = &gelic_net_change_mtu;
1420 /* tx watchdog */
1421 netdev->tx_timeout = &gelic_net_tx_timeout;
1422 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1428 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1423 /* NAPI */ 1429 /* NAPI */
1424 netif_napi_add(netdev, napi, 1430 netif_napi_add(netdev, napi,
1425 gelic_net_poll, GELIC_NET_NAPI_WEIGHT); 1431 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1426 netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1432 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1427#ifdef CONFIG_NET_POLL_CONTROLLER 1433 netdev->netdev_ops = &gelic_netdevice_ops;
1428 netdev->poll_controller = gelic_net_poll_controller;
1429#endif
1430} 1434}
1431 1435
1432/** 1436/**
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 335da4831ab..a5ac2bd58b5 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2697,6 +2697,19 @@ static int gelic_wl_stop(struct net_device *netdev)
2697 2697
2698/* -- */ 2698/* -- */
2699 2699
2700static const struct net_device_ops gelic_wl_netdevice_ops = {
2701 .ndo_open = gelic_wl_open,
2702 .ndo_stop = gelic_wl_stop,
2703 .ndo_start_xmit = gelic_net_xmit,
2704 .ndo_set_multicast_list = gelic_net_set_multi,
2705 .ndo_change_mtu = gelic_net_change_mtu,
2706 .ndo_tx_timeout = gelic_net_tx_timeout,
2707 .ndo_validate_addr = eth_validate_addr,
2708#ifdef CONFIG_NET_POLL_CONTROLLER
2709 .ndo_poll_controller = gelic_net_poll_controller,
2710#endif
2711};
2712
2700static struct ethtool_ops gelic_wl_ethtool_ops = { 2713static struct ethtool_ops gelic_wl_ethtool_ops = {
2701 .get_drvinfo = gelic_net_get_drvinfo, 2714 .get_drvinfo = gelic_net_get_drvinfo,
2702 .get_link = gelic_wl_get_link, 2715 .get_link = gelic_wl_get_link,
@@ -2711,21 +2724,12 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
2711 struct gelic_wl_info *wl; 2724 struct gelic_wl_info *wl;
2712 wl = port_wl(netdev_priv(netdev)); 2725 wl = port_wl(netdev_priv(netdev));
2713 BUG_ON(!wl); 2726 BUG_ON(!wl);
2714 netdev->open = &gelic_wl_open;
2715 netdev->stop = &gelic_wl_stop;
2716 netdev->hard_start_xmit = &gelic_net_xmit;
2717 netdev->set_multicast_list = &gelic_net_set_multi;
2718 netdev->change_mtu = &gelic_net_change_mtu;
2719 netdev->wireless_data = &wl->wireless_data;
2720 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2721 /* tx watchdog */
2722 netdev->tx_timeout = &gelic_net_tx_timeout;
2723 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 2727 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
2724 2728
2725 netdev->ethtool_ops = &gelic_wl_ethtool_ops; 2729 netdev->ethtool_ops = &gelic_wl_ethtool_ops;
2726#ifdef CONFIG_NET_POLL_CONTROLLER 2730 netdev->netdev_ops = &gelic_wl_netdevice_ops;
2727 netdev->poll_controller = gelic_net_poll_controller; 2731 netdev->wireless_data = &wl->wireless_data;
2728#endif 2732 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2729} 2733}
2730 2734
2731/* 2735/*
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 189ec29ac7a..8b2823c8dcc 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2292,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget)
2292 2292
2293 if (tx_cleaned + rx_cleaned != budget) { 2293 if (tx_cleaned + rx_cleaned != budget) {
2294 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2294 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2295 __netif_rx_complete(napi); 2295 __napi_complete(napi);
2296 ql_update_small_bufq_prod_index(qdev); 2296 ql_update_small_bufq_prod_index(qdev);
2297 ql_update_lrg_bufq_prod_index(qdev); 2297 ql_update_lrg_bufq_prod_index(qdev);
2298 writel(qdev->rsp_consumer_index, 2298 writel(qdev->rsp_consumer_index,
@@ -2351,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2351 spin_unlock(&qdev->adapter_lock); 2351 spin_unlock(&qdev->adapter_lock);
2352 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2352 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2353 ql_disable_interrupts(qdev); 2353 ql_disable_interrupts(qdev);
2354 if (likely(netif_rx_schedule_prep(&qdev->napi))) { 2354 if (likely(napi_schedule_prep(&qdev->napi))) {
2355 __netif_rx_schedule(&qdev->napi); 2355 __napi_schedule(&qdev->napi);
2356 } 2356 }
2357 } else { 2357 } else {
2358 return IRQ_NONE; 2358 return IRQ_NONE;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8ea72dc60f7..fd515afb1aa 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1452,6 +1452,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1452 qdev->stats.rx_packets++; 1452 qdev->stats.rx_packets++;
1453 qdev->stats.rx_bytes += skb->len; 1453 qdev->stats.rx_bytes += skb->len;
1454 skb->protocol = eth_type_trans(skb, ndev); 1454 skb->protocol = eth_type_trans(skb, ndev);
1455 skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]);
1455 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { 1456 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1456 QPRINTK(qdev, RX_STATUS, DEBUG, 1457 QPRINTK(qdev, RX_STATUS, DEBUG,
1457 "Passing a VLAN packet upstream.\n"); 1458 "Passing a VLAN packet upstream.\n");
@@ -1663,7 +1664,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1663 rx_ring->cq_id); 1664 rx_ring->cq_id);
1664 1665
1665 if (work_done < budget) { 1666 if (work_done < budget) {
1666 __netif_rx_complete(napi); 1667 __napi_complete(napi);
1667 ql_enable_completion_interrupt(qdev, rx_ring->irq); 1668 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1668 } 1669 }
1669 return work_done; 1670 return work_done;
@@ -1748,7 +1749,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1748static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 1749static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1749{ 1750{
1750 struct rx_ring *rx_ring = dev_id; 1751 struct rx_ring *rx_ring = dev_id;
1751 netif_rx_schedule(&rx_ring->napi); 1752 napi_schedule(&rx_ring->napi);
1752 return IRQ_HANDLED; 1753 return IRQ_HANDLED;
1753} 1754}
1754 1755
@@ -1834,7 +1835,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1834 &rx_ring->rx_work, 1835 &rx_ring->rx_work,
1835 0); 1836 0);
1836 else 1837 else
1837 netif_rx_schedule(&rx_ring->napi); 1838 napi_schedule(&rx_ring->napi);
1838 work_done++; 1839 work_done++;
1839 } 1840 }
1840 } 1841 }
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index b2dcdb5ed8b..3c27a7bfea4 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -676,7 +676,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
676 work_done = r6040_rx(dev, budget); 676 work_done = r6040_rx(dev, budget);
677 677
678 if (work_done < budget) { 678 if (work_done < budget) {
679 netif_rx_complete(napi); 679 napi_complete(napi);
680 /* Enable RX interrupt */ 680 /* Enable RX interrupt */
681 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); 681 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
682 } 682 }
@@ -713,7 +713,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
713 713
714 /* Mask off RX interrupt */ 714 /* Mask off RX interrupt */
715 misr &= ~RX_INTS; 715 misr &= ~RX_INTS;
716 netif_rx_schedule(&lp->napi); 716 napi_schedule(&lp->napi);
717 } 717 }
718 718
719 /* TX interrupt request */ 719 /* TX interrupt request */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0771eb6fc6e..dd83f936b03 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3601,8 +3601,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3601 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); 3601 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3602 tp->intr_mask = ~tp->napi_event; 3602 tp->intr_mask = ~tp->napi_event;
3603 3603
3604 if (likely(netif_rx_schedule_prep(&tp->napi))) 3604 if (likely(napi_schedule_prep(&tp->napi)))
3605 __netif_rx_schedule(&tp->napi); 3605 __napi_schedule(&tp->napi);
3606 else if (netif_msg_intr(tp)) { 3606 else if (netif_msg_intr(tp)) {
3607 printk(KERN_INFO "%s: interrupt %04x in poll\n", 3607 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3608 dev->name, status); 3608 dev->name, status);
@@ -3623,7 +3623,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3623 rtl8169_tx_interrupt(dev, tp, ioaddr); 3623 rtl8169_tx_interrupt(dev, tp, ioaddr);
3624 3624
3625 if (work_done < budget) { 3625 if (work_done < budget) {
3626 netif_rx_complete(napi); 3626 napi_complete(napi);
3627 tp->intr_mask = 0xffff; 3627 tp->intr_mask = 0xffff;
3628 /* 3628 /*
3629 * 20040426: the barrier is not strictly required but the 3629 * 20040426: the barrier is not strictly required but the
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index f5c57c059bc..5cd2291bc0b 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2852 s2io_chk_rx_buffers(nic, ring); 2852 s2io_chk_rx_buffers(nic, ring);
2853 2853
2854 if (pkts_processed < budget_org) { 2854 if (pkts_processed < budget_org) {
2855 netif_rx_complete(napi); 2855 napi_complete(napi);
2856 /*Re Enable MSI-Rx Vector*/ 2856 /*Re Enable MSI-Rx Vector*/
2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; 2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2858 addr += 7 - ring->ring_no; 2858 addr += 7 - ring->ring_no;
@@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2889 break; 2889 break;
2890 } 2890 }
2891 if (pkts_processed < budget_org) { 2891 if (pkts_processed < budget_org) {
2892 netif_rx_complete(napi); 2892 napi_complete(napi);
2893 /* Re enable the Rx interrupts for the ring */ 2893 /* Re enable the Rx interrupts for the ring */
2894 writeq(0, &bar0->rx_traffic_mask); 2894 writeq(0, &bar0->rx_traffic_mask);
2895 readl(&bar0->rx_traffic_mask); 2895 readl(&bar0->rx_traffic_mask);
@@ -3862,7 +3862,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3862 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); 3862 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3863 /* We fail init if error or we get less vectors than min required */ 3863 /* We fail init if error or we get less vectors than min required */
3864 if (ret) { 3864 if (ret) {
3865 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3865 DBG_PRINT(ERR_DBG, "s2io: Enabling MSI-X failed\n");
3866 kfree(nic->entries); 3866 kfree(nic->entries);
3867 nic->mac_control.stats_info->sw_stat.mem_freed 3867 nic->mac_control.stats_info->sw_stat.mem_freed
3868 += (nic->num_entries * sizeof(struct msix_entry)); 3868 += (nic->num_entries * sizeof(struct msix_entry));
@@ -4342,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4342 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4342 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4343 writeb(val8, addr); 4343 writeb(val8, addr);
4344 val8 = readb(addr); 4344 val8 = readb(addr);
4345 netif_rx_schedule(&ring->napi); 4345 napi_schedule(&ring->napi);
4346 } else { 4346 } else {
4347 rx_intr_handler(ring, 0); 4347 rx_intr_handler(ring, 0);
4348 s2io_chk_rx_buffers(sp, ring); 4348 s2io_chk_rx_buffers(sp, ring);
@@ -4789,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4789 4789
4790 if (config->napi) { 4790 if (config->napi) {
4791 if (reason & GEN_INTR_RXTRAFFIC) { 4791 if (reason & GEN_INTR_RXTRAFFIC) {
4792 netif_rx_schedule(&sp->napi); 4792 napi_schedule(&sp->napi);
4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4795 readl(&bar0->rx_traffic_int); 4795 readl(&bar0->rx_traffic_int);
@@ -7542,6 +7542,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7542 7542
7543 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 7543 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7544send_up: 7544send_up:
7545 skb_record_rx_queue(skb, ring_no);
7545 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7546 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7546aggregate: 7547aggregate:
7547 sp->mac_control.rings[ring_no].rx_bufs_left -= 1; 7548 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
@@ -8009,8 +8010,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8009 if (ret) { 8010 if (ret) {
8010 8011
8011 DBG_PRINT(ERR_DBG, 8012 DBG_PRINT(ERR_DBG,
8012 "%s: MSI-X requested but failed to enable\n", 8013 "s2io: MSI-X requested but failed to enable\n");
8013 dev->name);
8014 sp->config.intr_type = INTA; 8014 sp->config.intr_type = INTA;
8015 } 8015 }
8016 } 8016 }
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 31e38fae017..88dd2e09832 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0); 2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2040 2040
2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2042 if (netif_rx_schedule_prep(&sc->napi)) { 2042 if (napi_schedule_prep(&sc->napi)) {
2043 __raw_writeq(0, sc->sbm_imr); 2043 __raw_writeq(0, sc->sbm_imr);
2044 __netif_rx_schedule(&sc->napi); 2044 __napi_schedule(&sc->napi);
2045 /* Depend on the exit from poll to reenable intr */ 2045 /* Depend on the exit from poll to reenable intr */
2046 } 2046 }
2047 else { 2047 else {
@@ -2478,7 +2478,7 @@ static int sbmac_mii_probe(struct net_device *dev)
2478 return -ENXIO; 2478 return -ENXIO;
2479 } 2479 }
2480 2480
2481 phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0, 2481 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
2482 PHY_INTERFACE_MODE_GMII); 2482 PHY_INTERFACE_MODE_GMII);
2483 if (IS_ERR(phy_dev)) { 2483 if (IS_ERR(phy_dev)) {
2484 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 2484 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
@@ -2500,7 +2500,7 @@ static int sbmac_mii_probe(struct net_device *dev)
2500 2500
2501 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 2501 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
2502 dev->name, phy_dev->drv->name, 2502 dev->name, phy_dev->drv->name,
2503 phy_dev->dev.bus_id, phy_dev->irq); 2503 dev_name(&phy_dev->dev), phy_dev->irq);
2504 2504
2505 sc->phy_dev = phy_dev; 2505 sc->phy_dev = phy_dev;
2506 2506
@@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
2667 sbdma_tx_process(sc, &(sc->sbm_txdma), 1); 2667 sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2668 2668
2669 if (work_done < budget) { 2669 if (work_done < budget) {
2670 netif_rx_complete(napi); 2670 napi_complete(napi);
2671 2671
2672#ifdef CONFIG_SBMAC_COALESCE 2672#ifdef CONFIG_SBMAC_COALESCE
2673 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 2673 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
@@ -2697,7 +2697,7 @@ static int __init sbmac_probe(struct platform_device *pldev)
2697 sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); 2697 sbm_base = ioremap_nocache(res->start, res->end - res->start + 1);
2698 if (!sbm_base) { 2698 if (!sbm_base) {
2699 printk(KERN_ERR "%s: unable to map device registers\n", 2699 printk(KERN_ERR "%s: unable to map device registers\n",
2700 pldev->dev.bus_id); 2700 dev_name(&pldev->dev));
2701 err = -ENOMEM; 2701 err = -ENOMEM;
2702 goto out_out; 2702 goto out_out;
2703 } 2703 }
@@ -2708,7 +2708,7 @@ static int __init sbmac_probe(struct platform_device *pldev)
2708 * If we find a zero, skip this MAC. 2708 * If we find a zero, skip this MAC.
2709 */ 2709 */
2710 sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); 2710 sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
2711 pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id, 2711 pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
2712 sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); 2712 sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
2713 if (sbmac_orig_hwaddr == 0) { 2713 if (sbmac_orig_hwaddr == 0) {
2714 err = 0; 2714 err = 0;
@@ -2721,7 +2721,7 @@ static int __init sbmac_probe(struct platform_device *pldev)
2721 dev = alloc_etherdev(sizeof(struct sbmac_softc)); 2721 dev = alloc_etherdev(sizeof(struct sbmac_softc));
2722 if (!dev) { 2722 if (!dev) {
2723 printk(KERN_ERR "%s: unable to allocate etherdev\n", 2723 printk(KERN_ERR "%s: unable to allocate etherdev\n",
2724 pldev->dev.bus_id); 2724 dev_name(&pldev->dev));
2725 err = -ENOMEM; 2725 err = -ENOMEM;
2726 goto out_unmap; 2726 goto out_unmap;
2727 } 2727 }
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8b75bef4a84..c13cbf099b8 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -13,6 +13,9 @@
13 * Both are almost identical and seem to be based on pci-skeleton.c 13 * Both are almost identical and seem to be based on pci-skeleton.c
14 * 14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros 15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 *
17 * A datasheet for this chip can be found at
18 * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf
16 */ 19 */
17 20
18/* Note about set_mac_address: I don't know how to change the hardware 21/* Note about set_mac_address: I don't know how to change the hardware
@@ -31,13 +34,7 @@
31 34
32#include <asm/irq.h> 35#include <asm/irq.h>
33 36
34#define PCI_VENDOR_ID_SILAN 0x1904
35#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
36#define PCI_DEVICE_ID_SILAN_8139D 0x8139
37
38#define SC92031_NAME "sc92031" 37#define SC92031_NAME "sc92031"
39#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
40#define SC92031_VERSION "2.0c"
41 38
42/* BAR 0 is MMIO, BAR 1 is PIO */ 39/* BAR 0 is MMIO, BAR 1 is PIO */
43#ifndef SC92031_USE_BAR 40#ifndef SC92031_USE_BAR
@@ -1264,7 +1261,6 @@ static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1264 struct pci_dev *pdev = priv->pdev; 1261 struct pci_dev *pdev = priv->pdev;
1265 1262
1266 strcpy(drvinfo->driver, SC92031_NAME); 1263 strcpy(drvinfo->driver, SC92031_NAME);
1267 strcpy(drvinfo->version, SC92031_VERSION);
1268 strcpy(drvinfo->bus_info, pci_name(pdev)); 1264 strcpy(drvinfo->bus_info, pci_name(pdev));
1269} 1265}
1270 1266
@@ -1423,6 +1419,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1423 struct net_device *dev; 1419 struct net_device *dev;
1424 struct sc92031_priv *priv; 1420 struct sc92031_priv *priv;
1425 u32 mac0, mac1; 1421 u32 mac0, mac1;
1422 unsigned long base_addr;
1426 1423
1427 err = pci_enable_device(pdev); 1424 err = pci_enable_device(pdev);
1428 if (unlikely(err < 0)) 1425 if (unlikely(err < 0))
@@ -1497,6 +1494,14 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1497 if (err < 0) 1494 if (err < 0)
1498 goto out_register_netdev; 1495 goto out_register_netdev;
1499 1496
1497#if SC92031_USE_BAR == 0
1498 base_addr = dev->mem_start;
1499#elif SC92031_USE_BAR == 1
1500 base_addr = dev->base_addr;
1501#endif
1502 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1503 base_addr, dev->dev_addr, dev->irq);
1504
1500 return 0; 1505 return 0;
1501 1506
1502out_register_netdev: 1507out_register_netdev:
@@ -1586,8 +1591,8 @@ out:
1586} 1591}
1587 1592
1588static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1593static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1589 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1590 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) }, 1595 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1591 { 0, } 1596 { 0, }
1592}; 1597};
1593MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); 1598MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
@@ -1603,7 +1608,6 @@ static struct pci_driver sc92031_pci_driver = {
1603 1608
1604static int __init sc92031_init(void) 1609static int __init sc92031_init(void)
1605{ 1610{
1606 printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
1607 return pci_register_driver(&sc92031_pci_driver); 1611 return pci_register_driver(&sc92031_pci_driver);
1608} 1612}
1609 1613
@@ -1617,5 +1621,4 @@ module_exit(sc92031_exit);
1617 1621
1618MODULE_LICENSE("GPL"); 1622MODULE_LICENSE("GPL");
1619MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); 1623MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1620MODULE_DESCRIPTION(SC92031_DESCRIPTION); 1624MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
1621MODULE_VERSION(SC92031_VERSION);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index c535408ad6b..12a82966b57 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -2,7 +2,6 @@ config SFC
2 tristate "Solarflare Solarstorm SFC4000 support" 2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET 3 depends on PCI && INET
4 select MII 4 select MII
5 select INET_LRO
6 select CRC32 5 select CRC32
7 select I2C 6 select I2C
8 select I2C_ALGOBIT 7 select I2C_ALGOBIT
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d95c2182801..d54d84c267b 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -543,7 +543,7 @@ typedef union efx_oword {
543 543
544/* Static initialiser */ 544/* Static initialiser */
545#define EFX_OWORD32(a, b, c, d) \ 545#define EFX_OWORD32(a, b, c, d) \
546 { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \ 546 { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
547 __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } } 547 cpu_to_le32(c), cpu_to_le32(d) } }
548 548
549#endif /* EFX_BITFIELD_H */ 549#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index ab0e09bf154..75836599e43 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
182 channel->rx_pkt = NULL; 182 channel->rx_pkt = NULL;
183 } 183 }
184 184
185 efx_flush_lro(channel);
186 efx_rx_strategy(channel); 185 efx_rx_strategy(channel);
187 186
188 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 187 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
@@ -225,11 +224,11 @@ static int efx_poll(struct napi_struct *napi, int budget)
225 224
226 if (rx_packets < budget) { 225 if (rx_packets < budget) {
227 /* There is no race here; although napi_disable() will 226 /* There is no race here; although napi_disable() will
228 * only wait for netif_rx_complete(), this isn't a problem 227 * only wait for napi_complete(), this isn't a problem
229 * since efx_channel_processed() will have no effect if 228 * since efx_channel_processed() will have no effect if
230 * interrupts have already been disabled. 229 * interrupts have already been disabled.
231 */ 230 */
232 netif_rx_complete(napi); 231 napi_complete(napi);
233 efx_channel_processed(channel); 232 efx_channel_processed(channel);
234 } 233 }
235 234
@@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1269static int efx_init_napi(struct efx_nic *efx) 1268static int efx_init_napi(struct efx_nic *efx)
1270{ 1269{
1271 struct efx_channel *channel; 1270 struct efx_channel *channel;
1272 int rc;
1273 1271
1274 efx_for_each_channel(channel, efx) { 1272 efx_for_each_channel(channel, efx) {
1275 channel->napi_dev = efx->net_dev; 1273 channel->napi_dev = efx->net_dev;
1276 rc = efx_lro_init(&channel->lro_mgr, efx);
1277 if (rc)
1278 goto err;
1279 } 1274 }
1280 return 0; 1275 return 0;
1281 err:
1282 efx_fini_napi(efx);
1283 return rc;
1284} 1276}
1285 1277
1286static void efx_fini_napi(struct efx_nic *efx) 1278static void efx_fini_napi(struct efx_nic *efx)
@@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx)
1288 struct efx_channel *channel; 1280 struct efx_channel *channel;
1289 1281
1290 efx_for_each_channel(channel, efx) { 1282 efx_for_each_channel(channel, efx) {
1291 efx_lro_fini(&channel->lro_mgr);
1292 channel->napi_dev = NULL; 1283 channel->napi_dev = NULL;
1293 } 1284 }
1294} 1285}
@@ -2120,7 +2111,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2120 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2111 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2121 NETIF_F_HIGHDMA | NETIF_F_TSO); 2112 NETIF_F_HIGHDMA | NETIF_F_TSO);
2122 if (lro) 2113 if (lro)
2123 net_dev->features |= NETIF_F_LRO; 2114 net_dev->features |= NETIF_F_GRO;
2124 /* Mask for features that also apply to VLAN devices */ 2115 /* Mask for features that also apply to VLAN devices */
2125 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2116 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2126 NETIF_F_HIGHDMA | NETIF_F_TSO); 2117 NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 55d0f131b0e..8bde1d2a21d 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -80,7 +80,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
80 channel->channel, raw_smp_processor_id()); 80 channel->channel, raw_smp_processor_id());
81 channel->work_pending = true; 81 channel->work_pending = true;
82 82
83 netif_rx_schedule(&channel->napi_str); 83 napi_schedule(&channel->napi_str);
84} 84}
85 85
86#endif /* EFX_EFX_H */ 86#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index e019ad1fb9a..19930ff9df7 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -25,15 +25,11 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29#include <linux/i2c.h> 28#include <linux/i2c.h>
30 29
31#include "enum.h" 30#include "enum.h"
32#include "bitfield.h" 31#include "bitfield.h"
33 32
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/************************************************************************** 33/**************************************************************************
38 * 34 *
39 * Build definitions 35 * Build definitions
@@ -340,13 +336,10 @@ enum efx_rx_alloc_method {
340 * @eventq_read_ptr: Event queue read pointer 336 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value. 337 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events 338 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 339 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters 340 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 341 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors 342 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 343 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors 344 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 345 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
@@ -371,10 +364,8 @@ struct efx_channel {
371 unsigned int last_eventq_read_ptr; 364 unsigned int last_eventq_read_ptr;
372 unsigned int eventq_magic; 365 unsigned int eventq_magic;
373 366
374 struct net_lro_mgr lro_mgr;
375 int rx_alloc_level; 367 int rx_alloc_level;
376 int rx_alloc_push_pages; 368 int rx_alloc_push_pages;
377 int rx_alloc_pop_pages;
378 369
379 unsigned n_rx_tobe_disc; 370 unsigned n_rx_tobe_disc;
380 unsigned n_rx_ip_frag_err; 371 unsigned n_rx_ip_frag_err;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index b8ba4bbad88..66d7fe3db3e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
99} 99}
100 100
101 101
102/**************************************************************************
103 *
104 * Linux generic LRO handling
105 *
106 **************************************************************************
107 */
108
109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
111{
112 struct efx_channel *channel = priv;
113 struct iphdr *iph;
114 struct tcphdr *th;
115
116 iph = (struct iphdr *)skb->data;
117 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
118 goto fail;
119
120 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
121
122 *tcpudp_hdr = th;
123 *ip_hdr = iph;
124 *hdr_flags = LRO_IPV4 | LRO_TCP;
125
126 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
127 return 0;
128fail:
129 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
130 return -1;
131}
132
133static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
135 void *priv)
136{
137 struct efx_channel *channel = priv;
138 struct ethhdr *eh;
139 struct iphdr *iph;
140
141 /* We support EtherII and VLAN encapsulated IPv4 */
142 eh = page_address(frag->page) + frag->page_offset;
143 *mac_hdr = eh;
144
145 if (eh->h_proto == htons(ETH_P_IP)) {
146 iph = (struct iphdr *)(eh + 1);
147 } else {
148 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
149 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
150 goto fail;
151
152 iph = (struct iphdr *)(veh + 1);
153 }
154 *ip_hdr = iph;
155
156 /* We can only do LRO over TCP */
157 if (iph->protocol != IPPROTO_TCP)
158 goto fail;
159
160 *hdr_flags = LRO_IPV4 | LRO_TCP;
161 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
162
163 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
164 return 0;
165 fail:
166 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
167 return -1;
168}
169
170int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
171{
172 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
173 struct net_lro_desc *lro_arr;
174
175 /* Allocate the LRO descriptors structure */
176 lro_arr = kzalloc(s, GFP_KERNEL);
177 if (lro_arr == NULL)
178 return -ENOMEM;
179
180 lro_mgr->lro_arr = lro_arr;
181 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
182 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
183 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
184
185 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
186 lro_mgr->get_frag_header = efx_get_frag_hdr;
187 lro_mgr->dev = efx->net_dev;
188
189 lro_mgr->features = LRO_F_NAPI;
190
191 /* We can pass packets up with the checksum intact */
192 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
193
194 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
195
196 return 0;
197}
198
199void efx_lro_fini(struct net_lro_mgr *lro_mgr)
200{
201 kfree(lro_mgr->lro_arr);
202 lro_mgr->lro_arr = NULL;
203}
204
205/** 102/**
206 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation 103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
207 * 104 *
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
549static void efx_rx_packet_lro(struct efx_channel *channel, 446static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 447 struct efx_rx_buffer *rx_buf)
551{ 448{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 449 struct napi_struct *napi = &channel->napi_str;
553 void *priv = channel;
554 450
555 /* Pass the skb/page into the LRO engine */ 451 /* Pass the skb/page into the LRO engine */
556 if (rx_buf->page) { 452 if (rx_buf->page) {
557 struct skb_frag_struct frags; 453 struct napi_gro_fraginfo info;
558 454
559 frags.page = rx_buf->page; 455 info.frags[0].page = rx_buf->page;
560 frags.page_offset = efx_rx_buf_offset(rx_buf); 456 info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
561 frags.size = rx_buf->len; 457 info.frags[0].size = rx_buf->len;
458 info.nr_frags = 1;
459 info.ip_summed = CHECKSUM_UNNECESSARY;
460 info.len = rx_buf->len;
562 461
563 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 462 napi_gro_frags(napi, &info);
564 rx_buf->len, priv, 0);
565 463
566 EFX_BUG_ON_PARANOID(rx_buf->skb); 464 EFX_BUG_ON_PARANOID(rx_buf->skb);
567 rx_buf->page = NULL; 465 rx_buf->page = NULL;
568 } else { 466 } else {
569 EFX_BUG_ON_PARANOID(!rx_buf->skb); 467 EFX_BUG_ON_PARANOID(!rx_buf->skb);
570 468
571 lro_receive_skb(lro_mgr, rx_buf->skb, priv); 469 napi_gro_receive(napi, rx_buf->skb);
572 rx_buf->skb = NULL; 470 rx_buf->skb = NULL;
573 } 471 }
574} 472}
575 473
576/* Allocate and construct an SKB around a struct page.*/
577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx,
579 int hdr_len)
580{
581 struct sk_buff *skb;
582
583 /* Allocate an SKB to store the headers */
584 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
585 if (unlikely(skb == NULL)) {
586 EFX_ERR_RL(efx, "RX out of memory for skb\n");
587 return NULL;
588 }
589
590 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
591 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
592
593 skb->ip_summed = CHECKSUM_UNNECESSARY;
594 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
595
596 skb->len = rx_buf->len;
597 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
598 memcpy(skb->data, rx_buf->data, hdr_len);
599 skb->tail += hdr_len;
600
601 /* Append the remaining page onto the frag list */
602 if (unlikely(rx_buf->len > hdr_len)) {
603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
604 frag->page = rx_buf->page;
605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
606 frag->size = skb->len - hdr_len;
607 skb_shinfo(skb)->nr_frags = 1;
608 skb->data_len = frag->size;
609 } else {
610 __free_pages(rx_buf->page, efx->rx_buffer_order);
611 skb->data_len = 0;
612 }
613
614 /* Ownership has transferred from the rx_buf to skb */
615 rx_buf->page = NULL;
616
617 /* Move past the ethernet header */
618 skb->protocol = eth_type_trans(skb, efx->net_dev);
619
620 return skb;
621}
622
623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 474void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
624 unsigned int len, bool checksummed, bool discard) 475 unsigned int len, bool checksummed, bool discard)
625{ 476{
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel,
687{ 538{
688 struct efx_nic *efx = channel->efx; 539 struct efx_nic *efx = channel->efx;
689 struct sk_buff *skb; 540 struct sk_buff *skb;
690 bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
691 541
692 /* If we're in loopback test, then pass the packet directly to the 542 /* If we're in loopback test, then pass the packet directly to the
693 * loopback layer, and free the rx_buf here 543 * loopback layer, and free the rx_buf here
@@ -709,41 +559,23 @@ void __efx_rx_packet(struct efx_channel *channel,
709 efx->net_dev); 559 efx->net_dev);
710 } 560 }
711 561
712 /* Both our generic-LRO and SFC-SSR support skb and page based 562 if (likely(checksummed || rx_buf->page)) {
713 * allocation, but neither support switching from one to the
714 * other on the fly. If we spot that the allocation mode has
715 * changed, then flush the LRO state.
716 */
717 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
718 efx_flush_lro(channel);
719 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
720 }
721 if (likely(checksummed && lro)) {
722 efx_rx_packet_lro(channel, rx_buf); 563 efx_rx_packet_lro(channel, rx_buf);
723 goto done; 564 goto done;
724 } 565 }
725 566
726 /* Form an skb if required */ 567 /* We now own the SKB */
727 if (rx_buf->page) { 568 skb = rx_buf->skb;
728 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); 569 rx_buf->skb = NULL;
729 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
730 if (unlikely(skb == NULL)) {
731 efx_free_rx_buffer(efx, rx_buf);
732 goto done;
733 }
734 } else {
735 /* We now own the SKB */
736 skb = rx_buf->skb;
737 rx_buf->skb = NULL;
738 }
739 570
740 EFX_BUG_ON_PARANOID(rx_buf->page); 571 EFX_BUG_ON_PARANOID(rx_buf->page);
741 EFX_BUG_ON_PARANOID(rx_buf->skb); 572 EFX_BUG_ON_PARANOID(rx_buf->skb);
742 EFX_BUG_ON_PARANOID(!skb); 573 EFX_BUG_ON_PARANOID(!skb);
743 574
744 /* Set the SKB flags */ 575 /* Set the SKB flags */
745 if (unlikely(!checksummed || !efx->rx_checksum_enabled)) 576 skb->ip_summed = CHECKSUM_NONE;
746 skb->ip_summed = CHECKSUM_NONE; 577
578 skb_record_rx_queue(skb, channel->channel);
747 579
748 /* Pass the packet up */ 580 /* Pass the packet up */
749 netif_receive_skb(skb); 581 netif_receive_skb(skb);
@@ -760,7 +592,7 @@ void efx_rx_strategy(struct efx_channel *channel)
760 enum efx_rx_alloc_method method = rx_alloc_method; 592 enum efx_rx_alloc_method method = rx_alloc_method;
761 593
762 /* Only makes sense to use page based allocation if LRO is enabled */ 594 /* Only makes sense to use page based allocation if LRO is enabled */
763 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { 595 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
764 method = RX_ALLOC_METHOD_SKB; 596 method = RX_ALLOC_METHOD_SKB;
765 } else if (method == RX_ALLOC_METHOD_AUTO) { 597 } else if (method == RX_ALLOC_METHOD_AUTO) {
766 /* Constrain the rx_alloc_level */ 598 /* Constrain the rx_alloc_level */
@@ -865,11 +697,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
865 rx_queue->buffer = NULL; 697 rx_queue->buffer = NULL;
866} 698}
867 699
868void efx_flush_lro(struct efx_channel *channel)
869{
870 lro_flush_all(&channel->lro_mgr);
871}
872
873 700
874module_param(rx_alloc_method, int, 0644); 701module_param(rx_alloc_method, int, 0644);
875MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); 702MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index 0e88a9ddc1c..42ee7555a80 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19 19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel); 20void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 21void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data); 22void efx_rx_work(struct work_struct *data);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index cb25ae5b257..c0e90683162 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
27#include "net_driver.h" 28#include "net_driver.h"
28#include "efx.h" 29#include "efx.h"
29#include "phy.h" 30#include "phy.h"
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f0efd246962..ac9eeab79f2 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/rtnetlink.h>
11#include <linux/seq_file.h> 12#include <linux/seq_file.h>
12#include "efx.h" 13#include "efx.h"
13#include "mdio_10g.h" 14#include "mdio_10g.h"
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c9dbb06f8c9..952d37ffee5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3214 unsigned long flags; 3214 unsigned long flags;
3215 3215
3216 spin_lock_irqsave(&hw->hw_lock, flags); 3216 spin_lock_irqsave(&hw->hw_lock, flags);
3217 __netif_rx_complete(napi); 3217 __napi_complete(napi);
3218 hw->intr_mask |= napimask[skge->port]; 3218 hw->intr_mask |= napimask[skge->port];
3219 skge_write32(hw, B0_IMSK, hw->intr_mask); 3219 skge_write32(hw, B0_IMSK, hw->intr_mask);
3220 skge_read32(hw, B0_IMSK); 3220 skge_read32(hw, B0_IMSK);
@@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3377 if (status & (IS_XA1_F|IS_R1_F)) { 3377 if (status & (IS_XA1_F|IS_R1_F)) {
3378 struct skge_port *skge = netdev_priv(hw->dev[0]); 3378 struct skge_port *skge = netdev_priv(hw->dev[0]);
3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
3380 netif_rx_schedule(&skge->napi); 3380 napi_schedule(&skge->napi);
3381 } 3381 }
3382 3382
3383 if (status & IS_PA_TO_TX1) 3383 if (status & IS_PA_TO_TX1)
@@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3397 3397
3398 if (status & (IS_XA2_F|IS_R2_F)) { 3398 if (status & (IS_XA2_F|IS_R2_F)) {
3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
3400 netif_rx_schedule(&skge->napi); 3400 napi_schedule(&skge->napi);
3401 } 3401 }
3402 3402
3403 if (status & IS_PA_TO_RX2) { 3403 if (status & IS_PA_TO_RX2) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 994703cc0db..d01c56eb962 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1068,13 +1068,16 @@ static void sky2_rx_submit(struct sky2_port *sky2,
1068} 1068}
1069 1069
1070 1070
1071static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, 1071static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1072 unsigned size) 1072 unsigned size)
1073{ 1073{
1074 struct sk_buff *skb = re->skb; 1074 struct sk_buff *skb = re->skb;
1075 int i; 1075 int i;
1076 1076
1077 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); 1077 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1078 if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
1079 return -EIO;
1080
1078 pci_unmap_len_set(re, data_size, size); 1081 pci_unmap_len_set(re, data_size, size);
1079 1082
1080 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1083 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1083,6 +1086,7 @@ static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1083 skb_shinfo(skb)->frags[i].page_offset, 1086 skb_shinfo(skb)->frags[i].page_offset,
1084 skb_shinfo(skb)->frags[i].size, 1087 skb_shinfo(skb)->frags[i].size,
1085 PCI_DMA_FROMDEVICE); 1088 PCI_DMA_FROMDEVICE);
1089 return 0;
1086} 1090}
1087 1091
1088static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) 1092static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1354,7 +1358,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
1354 if (!re->skb) 1358 if (!re->skb)
1355 goto nomem; 1359 goto nomem;
1356 1360
1357 sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size); 1361 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1362 dev_kfree_skb(re->skb);
1363 re->skb = NULL;
1364 goto nomem;
1365 }
1366
1358 sky2_rx_submit(sky2, re); 1367 sky2_rx_submit(sky2, re);
1359 } 1368 }
1360 1369
@@ -1547,7 +1556,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1547 struct sky2_hw *hw = sky2->hw; 1556 struct sky2_hw *hw = sky2->hw;
1548 struct sky2_tx_le *le = NULL; 1557 struct sky2_tx_le *le = NULL;
1549 struct tx_ring_info *re; 1558 struct tx_ring_info *re;
1550 unsigned i, len; 1559 unsigned i, len, first_slot;
1551 dma_addr_t mapping; 1560 dma_addr_t mapping;
1552 u16 mss; 1561 u16 mss;
1553 u8 ctrl; 1562 u8 ctrl;
@@ -1555,13 +1564,17 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1555 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) 1564 if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1556 return NETDEV_TX_BUSY; 1565 return NETDEV_TX_BUSY;
1557 1566
1558 if (unlikely(netif_msg_tx_queued(sky2)))
1559 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1560 dev->name, sky2->tx_prod, skb->len);
1561
1562 len = skb_headlen(skb); 1567 len = skb_headlen(skb);
1563 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 1568 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1564 1569
1570 if (pci_dma_mapping_error(hw->pdev, mapping))
1571 goto mapping_error;
1572
1573 first_slot = sky2->tx_prod;
1574 if (unlikely(netif_msg_tx_queued(sky2)))
1575 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1576 dev->name, first_slot, skb->len);
1577
1565 /* Send high bits if needed */ 1578 /* Send high bits if needed */
1566 if (sizeof(dma_addr_t) > sizeof(u32)) { 1579 if (sizeof(dma_addr_t) > sizeof(u32)) {
1567 le = get_tx_le(sky2); 1580 le = get_tx_le(sky2);
@@ -1648,6 +1661,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1648 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1661 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1649 frag->size, PCI_DMA_TODEVICE); 1662 frag->size, PCI_DMA_TODEVICE);
1650 1663
1664 if (pci_dma_mapping_error(hw->pdev, mapping))
1665 goto mapping_unwind;
1666
1651 if (sizeof(dma_addr_t) > sizeof(u32)) { 1667 if (sizeof(dma_addr_t) > sizeof(u32)) {
1652 le = get_tx_le(sky2); 1668 le = get_tx_le(sky2);
1653 le->addr = cpu_to_le32(upper_32_bits(mapping)); 1669 le->addr = cpu_to_le32(upper_32_bits(mapping));
@@ -1676,6 +1692,34 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1676 1692
1677 dev->trans_start = jiffies; 1693 dev->trans_start = jiffies;
1678 return NETDEV_TX_OK; 1694 return NETDEV_TX_OK;
1695
1696mapping_unwind:
1697 for (i = first_slot; i != sky2->tx_prod; i = RING_NEXT(i, TX_RING_SIZE)) {
1698 le = sky2->tx_le + i;
1699 re = sky2->tx_ring + i;
1700
1701 switch(le->opcode & ~HW_OWNER) {
1702 case OP_LARGESEND:
1703 case OP_PACKET:
1704 pci_unmap_single(hw->pdev,
1705 pci_unmap_addr(re, mapaddr),
1706 pci_unmap_len(re, maplen),
1707 PCI_DMA_TODEVICE);
1708 break;
1709 case OP_BUFFER:
1710 pci_unmap_page(hw->pdev, pci_unmap_addr(re, mapaddr),
1711 pci_unmap_len(re, maplen),
1712 PCI_DMA_TODEVICE);
1713 break;
1714 }
1715 }
1716
1717 sky2->tx_prod = first_slot;
1718mapping_error:
1719 if (net_ratelimit())
1720 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
1721 dev_kfree_skb(skb);
1722 return NETDEV_TX_OK;
1679} 1723}
1680 1724
1681/* 1725/*
@@ -2191,7 +2235,11 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
2191 2235
2192 prefetch(skb->data); 2236 prefetch(skb->data);
2193 re->skb = nskb; 2237 re->skb = nskb;
2194 sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space); 2238 if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
2239 dev_kfree_skb(nskb);
2240 re->skb = skb;
2241 return NULL;
2242 }
2195 2243
2196 if (skb_shinfo(skb)->nr_frags) 2244 if (skb_shinfo(skb)->nr_frags)
2197 skb_put_frags(skb, hdr_space, length); 2245 skb_put_frags(skb, hdr_space, length);
@@ -2687,13 +2735,6 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
2687 goto done; 2735 goto done;
2688 } 2736 }
2689 2737
2690 /* Bug/Errata workaround?
2691 * Need to kick the TX irq moderation timer.
2692 */
2693 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
2694 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2695 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2696 }
2697 napi_complete(napi); 2738 napi_complete(napi);
2698 sky2_read32(hw, B0_Y2_SP_LISR); 2739 sky2_read32(hw, B0_Y2_SP_LISR);
2699done: 2740done:
@@ -3864,6 +3905,86 @@ static const struct ethtool_ops sky2_ethtool_ops = {
3864 3905
3865static struct dentry *sky2_debug; 3906static struct dentry *sky2_debug;
3866 3907
3908
3909/*
3910 * Read and parse the first part of Vital Product Data
3911 */
3912#define VPD_SIZE 128
3913#define VPD_MAGIC 0x82
3914
3915static const struct vpd_tag {
3916 char tag[2];
3917 char *label;
3918} vpd_tags[] = {
3919 { "PN", "Part Number" },
3920 { "EC", "Engineering Level" },
3921 { "MN", "Manufacturer" },
3922 { "SN", "Serial Number" },
3923 { "YA", "Asset Tag" },
3924 { "VL", "First Error Log Message" },
3925 { "VF", "Second Error Log Message" },
3926 { "VB", "Boot Agent ROM Configuration" },
3927 { "VE", "EFI UNDI Configuration" },
3928};
3929
3930static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
3931{
3932 size_t vpd_size;
3933 loff_t offs;
3934 u8 len;
3935 unsigned char *buf;
3936 u16 reg2;
3937
3938 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
3939 vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3940
3941 seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
3942 buf = kmalloc(vpd_size, GFP_KERNEL);
3943 if (!buf) {
3944 seq_puts(seq, "no memory!\n");
3945 return;
3946 }
3947
3948 if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
3949 seq_puts(seq, "VPD read failed\n");
3950 goto out;
3951 }
3952
3953 if (buf[0] != VPD_MAGIC) {
3954 seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
3955 goto out;
3956 }
3957 len = buf[1];
3958 if (len == 0 || len > vpd_size - 4) {
3959 seq_printf(seq, "Invalid id length: %d\n", len);
3960 goto out;
3961 }
3962
3963 seq_printf(seq, "%.*s\n", len, buf + 3);
3964 offs = len + 3;
3965
3966 while (offs < vpd_size - 4) {
3967 int i;
3968
3969 if (!memcmp("RW", buf + offs, 2)) /* end marker */
3970 break;
3971 len = buf[offs + 2];
3972 if (offs + len + 3 >= vpd_size)
3973 break;
3974
3975 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
3976 if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
3977 seq_printf(seq, " %s: %.*s\n",
3978 vpd_tags[i].label, len, buf + offs + 3);
3979 break;
3980 }
3981 }
3982 offs += len + 3;
3983 }
3984out:
3985 kfree(buf);
3986}
3987
3867static int sky2_debug_show(struct seq_file *seq, void *v) 3988static int sky2_debug_show(struct seq_file *seq, void *v)
3868{ 3989{
3869 struct net_device *dev = seq->private; 3990 struct net_device *dev = seq->private;
@@ -3873,14 +3994,18 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
3873 unsigned idx, last; 3994 unsigned idx, last;
3874 int sop; 3995 int sop;
3875 3996
3876 if (!netif_running(dev)) 3997 sky2_show_vpd(seq, hw);
3877 return -ENETDOWN;
3878 3998
3879 seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", 3999 seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
3880 sky2_read32(hw, B0_ISRC), 4000 sky2_read32(hw, B0_ISRC),
3881 sky2_read32(hw, B0_IMSK), 4001 sky2_read32(hw, B0_IMSK),
3882 sky2_read32(hw, B0_Y2_SP_ICR)); 4002 sky2_read32(hw, B0_Y2_SP_ICR));
3883 4003
4004 if (!netif_running(dev)) {
4005 seq_printf(seq, "network not running\n");
4006 return 0;
4007 }
4008
3884 napi_disable(&hw->napi); 4009 napi_disable(&hw->napi);
3885 last = sky2_read16(hw, STAT_PUT_IDX); 4010 last = sky2_read16(hw, STAT_PUT_IDX);
3886 4011
@@ -4204,69 +4329,6 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
4204 return err; 4329 return err;
4205} 4330}
4206 4331
4207/*
4208 * Read and parse the first part of Vital Product Data
4209 */
4210#define VPD_SIZE 128
4211#define VPD_MAGIC 0x82
4212
4213static void __devinit sky2_vpd_info(struct sky2_hw *hw)
4214{
4215 int cap = pci_find_capability(hw->pdev, PCI_CAP_ID_VPD);
4216 const u8 *p;
4217 u8 *vpd_buf = NULL;
4218 u16 len;
4219 static struct vpd_tag {
4220 char tag[2];
4221 char *label;
4222 } vpd_tags[] = {
4223 { "PN", "Part Number" },
4224 { "EC", "Engineering Level" },
4225 { "MN", "Manufacturer" },
4226 };
4227
4228 if (!cap)
4229 goto out;
4230
4231 vpd_buf = kmalloc(VPD_SIZE, GFP_KERNEL);
4232 if (!vpd_buf)
4233 goto out;
4234
4235 if (sky2_vpd_read(hw, cap, vpd_buf, 0, VPD_SIZE))
4236 goto out;
4237
4238 if (vpd_buf[0] != VPD_MAGIC)
4239 goto out;
4240 len = vpd_buf[1];
4241 if (len == 0 || len > VPD_SIZE - 4)
4242 goto out;
4243 p = vpd_buf + 3;
4244 dev_info(&hw->pdev->dev, "%.*s\n", len, p);
4245 p += len;
4246
4247 while (p < vpd_buf + VPD_SIZE - 4) {
4248 int i;
4249
4250 if (!memcmp("RW", p, 2)) /* end marker */
4251 break;
4252
4253 len = p[2];
4254 if (len > (p - vpd_buf) - 4)
4255 break;
4256
4257 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4258 if (!memcmp(vpd_tags[i].tag, p, 2)) {
4259 printk(KERN_DEBUG " %s: %.*s\n",
4260 vpd_tags[i].label, len, p + 3);
4261 break;
4262 }
4263 }
4264 p += len + 3;
4265 }
4266out:
4267 kfree(vpd_buf);
4268}
4269
4270/* This driver supports yukon2 chipset only */ 4332/* This driver supports yukon2 chipset only */
4271static const char *sky2_name(u8 chipid, char *buf, int sz) 4333static const char *sky2_name(u8 chipid, char *buf, int sz)
4272{ 4334{
@@ -4294,6 +4356,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4294 struct net_device *dev; 4356 struct net_device *dev;
4295 struct sky2_hw *hw; 4357 struct sky2_hw *hw;
4296 int err, using_dac = 0, wol_default; 4358 int err, using_dac = 0, wol_default;
4359 u32 reg;
4297 char buf1[16]; 4360 char buf1[16];
4298 4361
4299 err = pci_enable_device(pdev); 4362 err = pci_enable_device(pdev);
@@ -4327,6 +4390,34 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4327 } 4390 }
4328 } 4391 }
4329 4392
4393 /* Get configuration information
4394 * Note: only regular PCI config access once to test for HW issues
4395 * other PCI access through shared memory for speed and to
4396 * avoid MMCONFIG problems.
4397 */
4398 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4399 if (err) {
4400 dev_err(&pdev->dev, "PCI read config failed\n");
4401 goto err_out_free_regions;
4402 }
4403
4404 /* size of available VPD, only impact sysfs */
4405 err = pci_vpd_truncate(pdev, 1ul << (((reg & PCI_VPD_ROM_SZ) >> 14) + 8));
4406 if (err)
4407 dev_warn(&pdev->dev, "Can't set VPD size\n");
4408
4409#ifdef __BIG_ENDIAN
4410 /* The sk98lin vendor driver uses hardware byte swapping but
4411 * this driver uses software swapping.
4412 */
4413 reg &= ~PCI_REV_DESC;
4414 err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg);
4415 if (err) {
4416 dev_err(&pdev->dev, "PCI write config failed\n");
4417 goto err_out_free_regions;
4418 }
4419#endif
4420
4330 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4421 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4331 4422
4332 err = -ENOMEM; 4423 err = -ENOMEM;
@@ -4344,18 +4435,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4344 goto err_out_free_hw; 4435 goto err_out_free_hw;
4345 } 4436 }
4346 4437
4347#ifdef __BIG_ENDIAN
4348 /* The sk98lin vendor driver uses hardware byte swapping but
4349 * this driver uses software swapping.
4350 */
4351 {
4352 u32 reg;
4353 reg = sky2_pci_read32(hw, PCI_DEV_REG2);
4354 reg &= ~PCI_REV_DESC;
4355 sky2_pci_write32(hw, PCI_DEV_REG2, reg);
4356 }
4357#endif
4358
4359 /* ring for status responses */ 4438 /* ring for status responses */
4360 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma); 4439 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
4361 if (!hw->st_le) 4440 if (!hw->st_le)
@@ -4370,8 +4449,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4370 4449
4371 sky2_reset(hw); 4450 sky2_reset(hw);
4372 4451
4373 sky2_vpd_info(hw);
4374
4375 dev = sky2_init_netdev(hw, 0, using_dac, wol_default); 4452 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4376 if (!dev) { 4453 if (!dev) {
4377 err = -ENOMEM; 4454 err = -ENOMEM;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 223cde0d43b..293610334a7 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1545,7 +1545,7 @@ smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1545{ 1545{
1546 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1546 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1547 strncpy(info->version, version, sizeof(info->version)); 1547 strncpy(info->version, version, sizeof(info->version));
1548 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); 1548 strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
1549} 1549}
1550 1550
1551static int smc911x_ethtool_nwayreset(struct net_device *dev) 1551static int smc911x_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index b215a8d85e6..fdcbaf8dfa7 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1614,7 +1614,7 @@ smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1614{ 1614{
1615 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1615 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1616 strncpy(info->version, version, sizeof(info->version)); 1616 strncpy(info->version, version, sizeof(info->version));
1617 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); 1617 strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
1618} 1618}
1619 1619
1620static int smc_ethtool_nwayreset(struct net_device *dev) 1620static int smc_ethtool_nwayreset(struct net_device *dev)
@@ -1643,6 +1643,117 @@ static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
1643 lp->msg_enable = level; 1643 lp->msg_enable = level;
1644} 1644}
1645 1645
1646static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
1647{
1648 u16 ctl;
1649 struct smc_local *lp = netdev_priv(dev);
1650 void __iomem *ioaddr = lp->base;
1651
1652 spin_lock_irq(&lp->lock);
1653 /* load word into GP register */
1654 SMC_SELECT_BANK(lp, 1);
1655 SMC_SET_GP(lp, word);
1656 /* set the address to put the data in EEPROM */
1657 SMC_SELECT_BANK(lp, 2);
1658 SMC_SET_PTR(lp, addr);
1659 /* tell it to write */
1660 SMC_SELECT_BANK(lp, 1);
1661 ctl = SMC_GET_CTL(lp);
1662 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
1663 /* wait for it to finish */
1664 do {
1665 udelay(1);
1666 } while (SMC_GET_CTL(lp) & CTL_STORE);
1667 /* clean up */
1668 SMC_SET_CTL(lp, ctl);
1669 SMC_SELECT_BANK(lp, 2);
1670 spin_unlock_irq(&lp->lock);
1671 return 0;
1672}
1673
1674static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
1675{
1676 u16 ctl;
1677 struct smc_local *lp = netdev_priv(dev);
1678 void __iomem *ioaddr = lp->base;
1679
1680 spin_lock_irq(&lp->lock);
1681 /* set the EEPROM address to get the data from */
1682 SMC_SELECT_BANK(lp, 2);
1683 SMC_SET_PTR(lp, addr | PTR_READ);
1684 /* tell it to load */
1685 SMC_SELECT_BANK(lp, 1);
1686 SMC_SET_GP(lp, 0xffff); /* init to known */
1687 ctl = SMC_GET_CTL(lp);
1688 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
1689 /* wait for it to finish */
1690 do {
1691 udelay(1);
1692 } while (SMC_GET_CTL(lp) & CTL_RELOAD);
1693 /* read word from GP register */
1694 *word = SMC_GET_GP(lp);
1695 /* clean up */
1696 SMC_SET_CTL(lp, ctl);
1697 SMC_SELECT_BANK(lp, 2);
1698 spin_unlock_irq(&lp->lock);
1699 return 0;
1700}
1701
1702static int smc_ethtool_geteeprom_len(struct net_device *dev)
1703{
1704 return 0x23 * 2;
1705}
1706
1707static int smc_ethtool_geteeprom(struct net_device *dev,
1708 struct ethtool_eeprom *eeprom, u8 *data)
1709{
1710 int i;
1711 int imax;
1712
1713 DBG(1, "Reading %d bytes at %d(0x%x)\n",
1714 eeprom->len, eeprom->offset, eeprom->offset);
1715 imax = smc_ethtool_geteeprom_len(dev);
1716 for (i = 0; i < eeprom->len; i += 2) {
1717 int ret;
1718 u16 wbuf;
1719 int offset = i + eeprom->offset;
1720 if (offset > imax)
1721 break;
1722 ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
1723 if (ret != 0)
1724 return ret;
1725 DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
1726 data[i] = (wbuf >> 8) & 0xff;
1727 data[i+1] = wbuf & 0xff;
1728 }
1729 return 0;
1730}
1731
1732static int smc_ethtool_seteeprom(struct net_device *dev,
1733 struct ethtool_eeprom *eeprom, u8 *data)
1734{
1735 int i;
1736 int imax;
1737
1738 DBG(1, "Writing %d bytes to %d(0x%x)\n",
1739 eeprom->len, eeprom->offset, eeprom->offset);
1740 imax = smc_ethtool_geteeprom_len(dev);
1741 for (i = 0; i < eeprom->len; i += 2) {
1742 int ret;
1743 u16 wbuf;
1744 int offset = i + eeprom->offset;
1745 if (offset > imax)
1746 break;
1747 wbuf = (data[i] << 8) | data[i + 1];
1748 DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
1749 ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
1750 if (ret != 0)
1751 return ret;
1752 }
1753 return 0;
1754}
1755
1756
1646static const struct ethtool_ops smc_ethtool_ops = { 1757static const struct ethtool_ops smc_ethtool_ops = {
1647 .get_settings = smc_ethtool_getsettings, 1758 .get_settings = smc_ethtool_getsettings,
1648 .set_settings = smc_ethtool_setsettings, 1759 .set_settings = smc_ethtool_setsettings,
@@ -1652,8 +1763,22 @@ static const struct ethtool_ops smc_ethtool_ops = {
1652 .set_msglevel = smc_ethtool_setmsglevel, 1763 .set_msglevel = smc_ethtool_setmsglevel,
1653 .nway_reset = smc_ethtool_nwayreset, 1764 .nway_reset = smc_ethtool_nwayreset,
1654 .get_link = ethtool_op_get_link, 1765 .get_link = ethtool_op_get_link,
1655// .get_eeprom = smc_ethtool_geteeprom, 1766 .get_eeprom_len = smc_ethtool_geteeprom_len,
1656// .set_eeprom = smc_ethtool_seteeprom, 1767 .get_eeprom = smc_ethtool_geteeprom,
1768 .set_eeprom = smc_ethtool_seteeprom,
1769};
1770
1771static const struct net_device_ops smc_netdev_ops = {
1772 .ndo_open = smc_open,
1773 .ndo_stop = smc_close,
1774 .ndo_start_xmit = smc_hard_start_xmit,
1775 .ndo_tx_timeout = smc_timeout,
1776 .ndo_set_multicast_list = smc_set_multicast_list,
1777 .ndo_validate_addr = eth_validate_addr,
1778 .ndo_set_mac_address = eth_mac_addr,
1779#ifdef CONFIG_NET_POLL_CONTROLLER
1780 .ndo_poll_controller = smc_poll_controller,
1781#endif
1657}; 1782};
1658 1783
1659/* 1784/*
@@ -1865,16 +1990,9 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1865 /* Fill in the fields of the device structure with ethernet values. */ 1990 /* Fill in the fields of the device structure with ethernet values. */
1866 ether_setup(dev); 1991 ether_setup(dev);
1867 1992
1868 dev->open = smc_open;
1869 dev->stop = smc_close;
1870 dev->hard_start_xmit = smc_hard_start_xmit;
1871 dev->tx_timeout = smc_timeout;
1872 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1993 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1873 dev->set_multicast_list = smc_set_multicast_list; 1994 dev->netdev_ops = &smc_netdev_ops;
1874 dev->ethtool_ops = &smc_ethtool_ops; 1995 dev->ethtool_ops = &smc_ethtool_ops;
1875#ifdef CONFIG_NET_POLL_CONTROLLER
1876 dev->poll_controller = smc_poll_controller;
1877#endif
1878 1996
1879 tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); 1997 tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
1880 INIT_WORK(&lp->phy_configure, smc_phy_configure); 1998 INIT_WORK(&lp->phy_configure, smc_phy_configure);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index c4ccd121bc9..ed9ae43523a 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -1141,6 +1141,16 @@ static const char * chip_ids[ 16 ] = {
1141 1141
1142#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) 1142#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
1143 1143
1144#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
1145
1146#define SMC_SET_GP(lp, x) \
1147 do { \
1148 if (SMC_MUST_ALIGN_WRITE(lp)) \
1149 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
1150 else \
1151 SMC_outw(x, ioaddr, GP_REG(lp)); \
1152 } while (0)
1153
1144#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp)) 1154#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
1145 1155
1146#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) 1156#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 783c1a7b869..6e175e5555a 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -368,48 +368,53 @@ out:
368 return reg; 368 return reg;
369} 369}
370 370
371/* Autodetects and initialises external phy for SMSC9115 and SMSC9117 flavors. 371/* Switch to external phy. Assumes tx and rx are stopped. */
372 * If something goes wrong, returns -ENODEV to revert back to internal phy. 372static void smsc911x_phy_enable_external(struct smsc911x_data *pdata)
373 * Performed at initialisation only, so interrupts are enabled */
374static int smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
375{ 373{
376 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); 374 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
377 375
378 /* External phy is requested, supported, and detected */ 376 /* Disable phy clocks to the MAC */
379 if (hwcfg & HW_CFG_EXT_PHY_DET_) { 377 hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
378 hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
379 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
380 udelay(10); /* Enough time for clocks to stop */
380 381
381 /* Switch to external phy. Assuming tx and rx are stopped 382 /* Switch to external phy */
382 * because smsc911x_phy_initialise is called before 383 hwcfg |= HW_CFG_EXT_PHY_EN_;
383 * smsc911x_rx_initialise and tx_initialise. */ 384 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
384 385
385 /* Disable phy clocks to the MAC */ 386 /* Enable phy clocks to the MAC */
386 hwcfg &= (~HW_CFG_PHY_CLK_SEL_); 387 hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
387 hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; 388 hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
388 smsc911x_reg_write(pdata, HW_CFG, hwcfg); 389 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
389 udelay(10); /* Enough time for clocks to stop */ 390 udelay(10); /* Enough time for clocks to restart */
390 391
391 /* Switch to external phy */ 392 hwcfg |= HW_CFG_SMI_SEL_;
392 hwcfg |= HW_CFG_EXT_PHY_EN_; 393 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
393 smsc911x_reg_write(pdata, HW_CFG, hwcfg); 394}
394
395 /* Enable phy clocks to the MAC */
396 hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
397 hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
398 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
399 udelay(10); /* Enough time for clocks to restart */
400 395
401 hwcfg |= HW_CFG_SMI_SEL_; 396/* Autodetects and enables external phy if present on supported chips.
402 smsc911x_reg_write(pdata, HW_CFG, hwcfg); 397 * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY
398 * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */
399static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
400{
401 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
403 402
404 SMSC_TRACE(HW, "Successfully switched to external PHY"); 403 if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) {
404 SMSC_TRACE(HW, "Forcing internal PHY");
405 pdata->using_extphy = 0;
406 } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) {
407 SMSC_TRACE(HW, "Forcing external PHY");
408 smsc911x_phy_enable_external(pdata);
409 pdata->using_extphy = 1;
410 } else if (hwcfg & HW_CFG_EXT_PHY_DET_) {
411 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY");
412 smsc911x_phy_enable_external(pdata);
405 pdata->using_extphy = 1; 413 pdata->using_extphy = 1;
406 } else { 414 } else {
407 SMSC_WARNING(HW, "No external PHY detected, " 415 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY");
408 "Using internal PHY instead."); 416 pdata->using_extphy = 0;
409 /* Use internal phy */
410 return -ENODEV;
411 } 417 }
412 return 0;
413} 418}
414 419
415/* Fetches a tx status out of the status fifo */ 420/* Fetches a tx status out of the status fifo */
@@ -769,7 +774,7 @@ static int smsc911x_mii_probe(struct net_device *dev)
769 return -ENODEV; 774 return -ENODEV;
770 } 775 }
771 776
772 phydev = phy_connect(dev, phydev->dev.bus_id, 777 phydev = phy_connect(dev, dev_name(&phydev->dev),
773 &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface); 778 &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface);
774 779
775 if (IS_ERR(phydev)) { 780 if (IS_ERR(phydev)) {
@@ -778,7 +783,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
778 } 783 }
779 784
780 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 785 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
781 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 786 dev->name, phydev->drv->name,
787 dev_name(&phydev->dev), phydev->irq);
782 788
783 /* mask with MAC supported features */ 789 /* mask with MAC supported features */
784 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 790 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -824,22 +830,18 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
824 830
825 pdata->mii_bus->parent = &pdev->dev; 831 pdata->mii_bus->parent = &pdev->dev;
826 832
827 pdata->using_extphy = 0;
828
829 switch (pdata->idrev & 0xFFFF0000) { 833 switch (pdata->idrev & 0xFFFF0000) {
830 case 0x01170000: 834 case 0x01170000:
831 case 0x01150000: 835 case 0x01150000:
832 case 0x117A0000: 836 case 0x117A0000:
833 case 0x115A0000: 837 case 0x115A0000:
834 /* External PHY supported, try to autodetect */ 838 /* External PHY supported, try to autodetect */
835 if (smsc911x_phy_initialise_external(pdata) < 0) { 839 smsc911x_phy_initialise_external(pdata);
836 SMSC_TRACE(HW, "No external PHY detected, "
837 "using internal PHY");
838 }
839 break; 840 break;
840 default: 841 default:
841 SMSC_TRACE(HW, "External PHY is not supported, " 842 SMSC_TRACE(HW, "External PHY is not supported, "
842 "using internal PHY"); 843 "using internal PHY");
844 pdata->using_extphy = 0;
843 break; 845 break;
844 } 846 }
845 847
@@ -984,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
984 /* We processed all packets available. Tell NAPI it can 986 /* We processed all packets available. Tell NAPI it can
985 * stop polling then re-enable rx interrupts */ 987 * stop polling then re-enable rx interrupts */
986 smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); 988 smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
987 netif_rx_complete(napi); 989 napi_complete(napi);
988 temp = smsc911x_reg_read(pdata, INT_EN); 990 temp = smsc911x_reg_read(pdata, INT_EN);
989 temp |= INT_EN_RSFL_EN_; 991 temp |= INT_EN_RSFL_EN_;
990 smsc911x_reg_write(pdata, INT_EN, temp); 992 smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1246,7 +1248,7 @@ static int smsc911x_open(struct net_device *dev)
1246 napi_enable(&pdata->napi); 1248 napi_enable(&pdata->napi);
1247 1249
1248 temp = smsc911x_reg_read(pdata, INT_EN); 1250 temp = smsc911x_reg_read(pdata, INT_EN);
1249 temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_); 1251 temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_);
1250 smsc911x_reg_write(pdata, INT_EN, temp); 1252 smsc911x_reg_write(pdata, INT_EN, temp);
1251 1253
1252 spin_lock_irq(&pdata->mac_lock); 1254 spin_lock_irq(&pdata->mac_lock);
@@ -1418,11 +1420,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1418 1420
1419 /* Request the hardware to stop, then perform the 1421 /* Request the hardware to stop, then perform the
1420 * update when we get an RX_STOP interrupt */ 1422 * update when we get an RX_STOP interrupt */
1421 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1422 temp = smsc911x_reg_read(pdata, INT_EN);
1423 temp |= INT_EN_RXSTOP_INT_EN_;
1424 smsc911x_reg_write(pdata, INT_EN, temp);
1425
1426 temp = smsc911x_mac_read(pdata, MAC_CR); 1423 temp = smsc911x_mac_read(pdata, MAC_CR);
1427 temp &= ~(MAC_CR_RXEN_); 1424 temp &= ~(MAC_CR_RXEN_);
1428 smsc911x_mac_write(pdata, MAC_CR, temp); 1425 smsc911x_mac_write(pdata, MAC_CR, temp);
@@ -1461,11 +1458,9 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1461 /* Called when there is a multicast update scheduled and 1458 /* Called when there is a multicast update scheduled and
1462 * it is now safe to complete the update */ 1459 * it is now safe to complete the update */
1463 SMSC_TRACE(INTR, "RX Stop interrupt"); 1460 SMSC_TRACE(INTR, "RX Stop interrupt");
1464 temp = smsc911x_reg_read(pdata, INT_EN);
1465 temp &= (~INT_EN_RXSTOP_INT_EN_);
1466 smsc911x_reg_write(pdata, INT_EN, temp);
1467 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); 1461 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1468 smsc911x_rx_multicast_update_workaround(pdata); 1462 if (pdata->multicast_update_pending)
1463 smsc911x_rx_multicast_update_workaround(pdata);
1469 serviced = IRQ_HANDLED; 1464 serviced = IRQ_HANDLED;
1470 } 1465 }
1471 1466
@@ -1485,16 +1480,16 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1485 } 1480 }
1486 1481
1487 if (likely(intsts & inten & INT_STS_RSFL_)) { 1482 if (likely(intsts & inten & INT_STS_RSFL_)) {
1488 if (likely(netif_rx_schedule_prep(&pdata->napi))) { 1483 if (likely(napi_schedule_prep(&pdata->napi))) {
1489 /* Disable Rx interrupts */ 1484 /* Disable Rx interrupts */
1490 temp = smsc911x_reg_read(pdata, INT_EN); 1485 temp = smsc911x_reg_read(pdata, INT_EN);
1491 temp &= (~INT_EN_RSFL_EN_); 1486 temp &= (~INT_EN_RSFL_EN_);
1492 smsc911x_reg_write(pdata, INT_EN, temp); 1487 smsc911x_reg_write(pdata, INT_EN, temp);
1493 /* Schedule a NAPI poll */ 1488 /* Schedule a NAPI poll */
1494 __netif_rx_schedule(&pdata->napi); 1489 __napi_schedule(&pdata->napi);
1495 } else { 1490 } else {
1496 SMSC_WARNING(RX_ERR, 1491 SMSC_WARNING(RX_ERR,
1497 "netif_rx_schedule_prep failed"); 1492 "napi_schedule_prep failed");
1498 } 1493 }
1499 serviced = IRQ_HANDLED; 1494 serviced = IRQ_HANDLED;
1500 } 1495 }
@@ -1545,7 +1540,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
1545{ 1540{
1546 strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); 1541 strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
1547 strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); 1542 strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
1548 strlcpy(info->bus_info, dev->dev.parent->bus_id, 1543 strlcpy(info->bus_info, dev_name(dev->dev.parent),
1549 sizeof(info->bus_info)); 1544 sizeof(info->bus_info));
1550} 1545}
1551 1546
@@ -1747,6 +1742,21 @@ static const struct net_device_ops smsc911x_netdev_ops = {
1747#endif 1742#endif
1748}; 1743};
1749 1744
1745/* copies the current mac address from hardware to dev->dev_addr */
1746static void __devinit smsc911x_read_mac_address(struct net_device *dev)
1747{
1748 struct smsc911x_data *pdata = netdev_priv(dev);
1749 u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
1750 u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
1751
1752 dev->dev_addr[0] = (u8)(mac_low32);
1753 dev->dev_addr[1] = (u8)(mac_low32 >> 8);
1754 dev->dev_addr[2] = (u8)(mac_low32 >> 16);
1755 dev->dev_addr[3] = (u8)(mac_low32 >> 24);
1756 dev->dev_addr[4] = (u8)(mac_high16);
1757 dev->dev_addr[5] = (u8)(mac_high16 >> 8);
1758}
1759
1750/* Initializing private device structures, only called from probe */ 1760/* Initializing private device structures, only called from probe */
1751static int __devinit smsc911x_init(struct net_device *dev) 1761static int __devinit smsc911x_init(struct net_device *dev)
1752{ 1762{
@@ -1834,6 +1844,12 @@ static int __devinit smsc911x_init(struct net_device *dev)
1834 SMSC_WARNING(PROBE, 1844 SMSC_WARNING(PROBE,
1835 "This driver is not intended for this chip revision"); 1845 "This driver is not intended for this chip revision");
1836 1846
1847 /* workaround for platforms without an eeprom, where the mac address
1848 * is stored elsewhere and set by the bootloader. This saves the
1849 * mac address before resetting the device */
1850 if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
1851 smsc911x_read_mac_address(dev);
1852
1837 /* Reset the LAN911x */ 1853 /* Reset the LAN911x */
1838 if (smsc911x_soft_reset(pdata)) 1854 if (smsc911x_soft_reset(pdata))
1839 return -ENODEV; 1855 return -ENODEV;
@@ -1892,9 +1908,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1892 struct net_device *dev; 1908 struct net_device *dev;
1893 struct smsc911x_data *pdata; 1909 struct smsc911x_data *pdata;
1894 struct smsc911x_platform_config *config = pdev->dev.platform_data; 1910 struct smsc911x_platform_config *config = pdev->dev.platform_data;
1895 struct resource *res; 1911 struct resource *res, *irq_res;
1896 unsigned int intcfg = 0; 1912 unsigned int intcfg = 0;
1897 int res_size; 1913 int res_size, irq_flags;
1898 int retval; 1914 int retval;
1899 DECLARE_MAC_BUF(mac); 1915 DECLARE_MAC_BUF(mac);
1900 1916
@@ -1919,6 +1935,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1919 } 1935 }
1920 res_size = res->end - res->start; 1936 res_size = res->end - res->start;
1921 1937
1938 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1939 if (!irq_res) {
1940 pr_warning("%s: Could not allocate irq resource.\n",
1941 SMSC_CHIPNAME);
1942 retval = -ENODEV;
1943 goto out_0;
1944 }
1945
1922 if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { 1946 if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) {
1923 retval = -EBUSY; 1947 retval = -EBUSY;
1924 goto out_0; 1948 goto out_0;
@@ -1935,7 +1959,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1935 1959
1936 pdata = netdev_priv(dev); 1960 pdata = netdev_priv(dev);
1937 1961
1938 dev->irq = platform_get_irq(pdev, 0); 1962 dev->irq = irq_res->start;
1963 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
1939 pdata->ioaddr = ioremap_nocache(res->start, res_size); 1964 pdata->ioaddr = ioremap_nocache(res->start, res_size);
1940 1965
1941 /* copy config parameters across to pdata */ 1966 /* copy config parameters across to pdata */
@@ -1968,8 +1993,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1968 smsc911x_reg_write(pdata, INT_EN, 0); 1993 smsc911x_reg_write(pdata, INT_EN, 0);
1969 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); 1994 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1970 1995
1971 retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, 1996 retval = request_irq(dev->irq, smsc911x_irqhandler,
1972 dev->name, dev); 1997 irq_flags | IRQF_SHARED, dev->name, dev);
1973 if (retval) { 1998 if (retval) {
1974 SMSC_WARNING(PROBE, 1999 SMSC_WARNING(PROBE,
1975 "Unable to claim requested irq: %d", dev->irq); 2000 "Unable to claim requested irq: %d", dev->irq);
@@ -2005,14 +2030,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2005 } else { 2030 } else {
2006 /* Try reading mac address from device. if EEPROM is present 2031 /* Try reading mac address from device. if EEPROM is present
2007 * it will already have been set */ 2032 * it will already have been set */
2008 u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); 2033 smsc911x_read_mac_address(dev);
2009 u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
2010 dev->dev_addr[0] = (u8)(mac_low32);
2011 dev->dev_addr[1] = (u8)(mac_low32 >> 8);
2012 dev->dev_addr[2] = (u8)(mac_low32 >> 16);
2013 dev->dev_addr[3] = (u8)(mac_low32 >> 24);
2014 dev->dev_addr[4] = (u8)(mac_high16);
2015 dev->dev_addr[5] = (u8)(mac_high16 >> 8);
2016 2034
2017 if (is_valid_ether_addr(dev->dev_addr)) { 2035 if (is_valid_ether_addr(dev->dev_addr)) {
2018 /* eeprom values are valid so use them */ 2036 /* eeprom values are valid so use them */
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index a1e4b3895b3..da8b977a535 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
666 smsc9420_pci_flush_write(pd); 666 smsc9420_pci_flush_write(pd);
667 667
668 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); 668 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
669 netif_rx_schedule(&pd->napi); 669 napi_schedule(&pd->napi);
670 } 670 }
671 671
672 if (ints_to_clear) 672 if (ints_to_clear)
@@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
889 smsc9420_pci_flush_write(pd); 889 smsc9420_pci_flush_write(pd);
890 890
891 if (work_done < budget) { 891 if (work_done < budget) {
892 netif_rx_complete(&pd->napi); 892 napi_complete(&pd->napi);
893 893
894 /* re-enable RX DMA interrupts */ 894 /* re-enable RX DMA interrupts */
895 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 895 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
@@ -1156,7 +1156,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
1156 smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, 1156 smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
1157 phydev->phy_id); 1157 phydev->phy_id);
1158 1158
1159 phydev = phy_connect(dev, phydev->dev.bus_id, 1159 phydev = phy_connect(dev, dev_name(&phydev->dev),
1160 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1160 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1161 1161
1162 if (IS_ERR(phydev)) { 1162 if (IS_ERR(phydev)) {
@@ -1165,7 +1165,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
1165 } 1165 }
1166 1166
1167 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 1167 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1168 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 1168 dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1169 1169
1170 /* mask with MAC supported features */ 1170 /* mask with MAC supported features */
1171 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 1171 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 88d2c67788d..7f6b4a4052e 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1301,7 +1301,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
1301 /* if all packets are in the stack, enable interrupts and return 0 */ 1301 /* if all packets are in the stack, enable interrupts and return 0 */
1302 /* if not, return 1 */ 1302 /* if not, return 1 */
1303 if (packets_done < budget) { 1303 if (packets_done < budget) {
1304 netif_rx_complete(napi); 1304 napi_complete(napi);
1305 spider_net_rx_irq_on(card); 1305 spider_net_rx_irq_on(card);
1306 card->ignore_rx_ramfull = 0; 1306 card->ignore_rx_ramfull = 0;
1307 } 1307 }
@@ -1528,7 +1528,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1528 spider_net_refill_rx_chain(card); 1528 spider_net_refill_rx_chain(card);
1529 spider_net_enable_rxdmac(card); 1529 spider_net_enable_rxdmac(card);
1530 card->num_rx_ints ++; 1530 card->num_rx_ints ++;
1531 netif_rx_schedule(&card->napi); 1531 napi_schedule(&card->napi);
1532 } 1532 }
1533 show_error = 0; 1533 show_error = 0;
1534 break; 1534 break;
@@ -1548,7 +1548,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1548 spider_net_refill_rx_chain(card); 1548 spider_net_refill_rx_chain(card);
1549 spider_net_enable_rxdmac(card); 1549 spider_net_enable_rxdmac(card);
1550 card->num_rx_ints ++; 1550 card->num_rx_ints ++;
1551 netif_rx_schedule(&card->napi); 1551 napi_schedule(&card->napi);
1552 show_error = 0; 1552 show_error = 0;
1553 break; 1553 break;
1554 1554
@@ -1562,7 +1562,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1562 spider_net_refill_rx_chain(card); 1562 spider_net_refill_rx_chain(card);
1563 spider_net_enable_rxdmac(card); 1563 spider_net_enable_rxdmac(card);
1564 card->num_rx_ints ++; 1564 card->num_rx_ints ++;
1565 netif_rx_schedule(&card->napi); 1565 napi_schedule(&card->napi);
1566 show_error = 0; 1566 show_error = 0;
1567 break; 1567 break;
1568 1568
@@ -1656,11 +1656,11 @@ spider_net_interrupt(int irq, void *ptr)
1656 1656
1657 if (status_reg & SPIDER_NET_RXINT ) { 1657 if (status_reg & SPIDER_NET_RXINT ) {
1658 spider_net_rx_irq_off(card); 1658 spider_net_rx_irq_off(card);
1659 netif_rx_schedule(&card->napi); 1659 napi_schedule(&card->napi);
1660 card->num_rx_ints ++; 1660 card->num_rx_ints ++;
1661 } 1661 }
1662 if (status_reg & SPIDER_NET_TXINT) 1662 if (status_reg & SPIDER_NET_TXINT)
1663 netif_rx_schedule(&card->napi); 1663 napi_schedule(&card->napi);
1664 1664
1665 if (status_reg & SPIDER_NET_LINKINT) 1665 if (status_reg & SPIDER_NET_LINKINT)
1666 spider_net_link_reset(netdev); 1666 spider_net_link_reset(netdev);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index da3a76b18ef..98fe79515ba 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1342,8 +1342,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1342 if (intr_status & (IntrRxDone | IntrRxEmpty)) { 1342 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1343 u32 enable; 1343 u32 enable;
1344 1344
1345 if (likely(netif_rx_schedule_prep(&np->napi))) { 1345 if (likely(napi_schedule_prep(&np->napi))) {
1346 __netif_rx_schedule(&np->napi); 1346 __napi_schedule(&np->napi);
1347 enable = readl(ioaddr + IntrEnable); 1347 enable = readl(ioaddr + IntrEnable);
1348 enable &= ~(IntrRxDone | IntrRxEmpty); 1348 enable &= ~(IntrRxDone | IntrRxEmpty);
1349 writel(enable, ioaddr + IntrEnable); 1349 writel(enable, ioaddr + IntrEnable);
@@ -1587,7 +1587,7 @@ static int netdev_poll(struct napi_struct *napi, int budget)
1587 intr_status = readl(ioaddr + IntrStatus); 1587 intr_status = readl(ioaddr + IntrStatus);
1588 } while (intr_status & (IntrRxDone | IntrRxEmpty)); 1588 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1589 1589
1590 netif_rx_complete(napi); 1590 napi_complete(napi);
1591 intr_status = readl(ioaddr + IntrEnable); 1591 intr_status = readl(ioaddr + IntrEnable);
1592 intr_status |= IntrRxDone | IntrRxEmpty; 1592 intr_status |= IntrRxDone | IntrRxEmpty;
1593 writel(intr_status, ioaddr + IntrEnable); 1593 writel(intr_status, ioaddr + IntrEnable);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 49187634106..5322bb79b2b 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
921 gp->status = readl(gp->regs + GREG_STAT); 921 gp->status = readl(gp->regs + GREG_STAT);
922 } while (gp->status & GREG_STAT_NAPI); 922 } while (gp->status & GREG_STAT_NAPI);
923 923
924 __netif_rx_complete(napi); 924 __napi_complete(napi);
925 gem_enable_ints(gp); 925 gem_enable_ints(gp);
926 926
927 spin_unlock_irqrestore(&gp->lock, flags); 927 spin_unlock_irqrestore(&gp->lock, flags);
@@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
944 944
945 spin_lock_irqsave(&gp->lock, flags); 945 spin_lock_irqsave(&gp->lock, flags);
946 946
947 if (netif_rx_schedule_prep(&gp->napi)) { 947 if (napi_schedule_prep(&gp->napi)) {
948 u32 gem_status = readl(gp->regs + GREG_STAT); 948 u32 gem_status = readl(gp->regs + GREG_STAT);
949 949
950 if (gem_status == 0) { 950 if (gem_status == 0) {
@@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
954 } 954 }
955 gp->status = gem_status; 955 gp->status = gem_status;
956 gem_disable_ints(gp); 956 gem_disable_ints(gp);
957 __netif_rx_schedule(&gp->napi); 957 __napi_schedule(&gp->napi);
958 } 958 }
959 959
960 spin_unlock_irqrestore(&gp->lock, flags); 960 spin_unlock_irqrestore(&gp->lock, flags);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index bcd0e60cbda..b52a1c088f3 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -725,7 +725,7 @@ static int tc_mii_probe(struct net_device *dev)
725 } 725 }
726 726
727 /* attach the mac to the phy */ 727 /* attach the mac to the phy */
728 phydev = phy_connect(dev, phydev->dev.bus_id, 728 phydev = phy_connect(dev, dev_name(&phydev->dev),
729 &tc_handle_link_change, 0, 729 &tc_handle_link_change, 0,
730 lp->chiptype == TC35815_TX4939 ? 730 lp->chiptype == TC35815_TX4939 ?
731 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); 731 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
@@ -735,7 +735,7 @@ static int tc_mii_probe(struct net_device *dev)
735 } 735 }
736 printk(KERN_INFO "%s: attached PHY driver [%s] " 736 printk(KERN_INFO "%s: attached PHY driver [%s] "
737 "(mii_bus:phy_addr=%s, id=%x)\n", 737 "(mii_bus:phy_addr=%s, id=%x)\n",
738 dev->name, phydev->drv->name, phydev->dev.bus_id, 738 dev->name, phydev->drv->name, dev_name(&phydev->dev),
739 phydev->phy_id); 739 phydev->phy_id);
740 740
741 /* mask with MAC supported features */ 741 /* mask with MAC supported features */
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1609 if (!(dmactl & DMA_IntMask)) { 1609 if (!(dmactl & DMA_IntMask)) {
1610 /* disable interrupts */ 1610 /* disable interrupts */
1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); 1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1612 if (netif_rx_schedule_prep(&lp->napi)) 1612 if (napi_schedule_prep(&lp->napi))
1613 __netif_rx_schedule(&lp->napi); 1613 __napi_schedule(&lp->napi);
1614 else { 1614 else {
1615 printk(KERN_ERR "%s: interrupt taken in poll\n", 1615 printk(KERN_ERR "%s: interrupt taken in poll\n",
1616 dev->name); 1616 dev->name);
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1919 spin_unlock(&lp->lock); 1919 spin_unlock(&lp->lock);
1920 1920
1921 if (received < budget) { 1921 if (received < budget) {
1922 netif_rx_complete(napi); 1922 napi_complete(napi);
1923 /* enable interrupts */ 1923 /* enable interrupts */
1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); 1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1925 } 1925 }
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index a7a4dc4d631..be9f38f8f0b 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
265 bdx_isr_extra(priv, isr); 265 bdx_isr_extra(priv, isr);
266 266
267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { 267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
268 if (likely(netif_rx_schedule_prep(&priv->napi))) { 268 if (likely(napi_schedule_prep(&priv->napi))) {
269 __netif_rx_schedule(&priv->napi); 269 __napi_schedule(&priv->napi);
270 RET(IRQ_HANDLED); 270 RET(IRQ_HANDLED);
271 } else { 271 } else {
272 /* NOTE: we get here if intr has slipped into window 272 /* NOTE: we get here if intr has slipped into window
@@ -302,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
302 * device lock and allow waiting tasks (eg rmmod) to advance) */ 302 * device lock and allow waiting tasks (eg rmmod) to advance) */
303 priv->napi_stop = 0; 303 priv->napi_stop = 0;
304 304
305 netif_rx_complete(napi); 305 napi_complete(napi);
306 bdx_enable_interrupts(priv); 306 bdx_enable_interrupts(priv);
307 } 307 }
308 return work_done; 308 return work_done;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4595962fb8e..479a37f75f3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -860,7 +860,7 @@ static int tg3_bmcr_reset(struct tg3 *tp)
860 860
861static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 861static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
862{ 862{
863 struct tg3 *tp = (struct tg3 *)bp->priv; 863 struct tg3 *tp = bp->priv;
864 u32 val; 864 u32 val;
865 865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
@@ -874,7 +874,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
874 874
875static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 875static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
876{ 876{
877 struct tg3 *tp = (struct tg3 *)bp->priv; 877 struct tg3 *tp = bp->priv;
878 878
879 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 879 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
880 return -EAGAIN; 880 return -EAGAIN;
@@ -4460,7 +4460,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4460 sblk->status &= ~SD_STATUS_UPDATED; 4460 sblk->status &= ~SD_STATUS_UPDATED;
4461 4461
4462 if (likely(!tg3_has_work(tp))) { 4462 if (likely(!tg3_has_work(tp))) {
4463 netif_rx_complete(napi); 4463 napi_complete(napi);
4464 tg3_restart_ints(tp); 4464 tg3_restart_ints(tp);
4465 break; 4465 break;
4466 } 4466 }
@@ -4470,7 +4470,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4470 4470
4471tx_recovery: 4471tx_recovery:
4472 /* work_done is guaranteed to be less than budget. */ 4472 /* work_done is guaranteed to be less than budget. */
4473 netif_rx_complete(napi); 4473 napi_complete(napi);
4474 schedule_work(&tp->reset_task); 4474 schedule_work(&tp->reset_task);
4475 return work_done; 4475 return work_done;
4476} 4476}
@@ -4519,7 +4519,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4519 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4519 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4520 4520
4521 if (likely(!tg3_irq_sync(tp))) 4521 if (likely(!tg3_irq_sync(tp)))
4522 netif_rx_schedule(&tp->napi); 4522 napi_schedule(&tp->napi);
4523 4523
4524 return IRQ_HANDLED; 4524 return IRQ_HANDLED;
4525} 4525}
@@ -4544,7 +4544,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
4544 */ 4544 */
4545 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4545 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4546 if (likely(!tg3_irq_sync(tp))) 4546 if (likely(!tg3_irq_sync(tp)))
4547 netif_rx_schedule(&tp->napi); 4547 napi_schedule(&tp->napi);
4548 4548
4549 return IRQ_RETVAL(1); 4549 return IRQ_RETVAL(1);
4550} 4550}
@@ -4586,7 +4586,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4586 sblk->status &= ~SD_STATUS_UPDATED; 4586 sblk->status &= ~SD_STATUS_UPDATED;
4587 if (likely(tg3_has_work(tp))) { 4587 if (likely(tg3_has_work(tp))) {
4588 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4588 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4589 netif_rx_schedule(&tp->napi); 4589 napi_schedule(&tp->napi);
4590 } else { 4590 } else {
4591 /* No work, shared interrupt perhaps? re-enable 4591 /* No work, shared interrupt perhaps? re-enable
4592 * interrupts, and flush that PCI write 4592 * interrupts, and flush that PCI write
@@ -4632,7 +4632,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4632 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4632 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4633 if (tg3_irq_sync(tp)) 4633 if (tg3_irq_sync(tp))
4634 goto out; 4634 goto out;
4635 if (netif_rx_schedule_prep(&tp->napi)) { 4635 if (napi_schedule_prep(&tp->napi)) {
4636 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4636 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4637 /* Update last_tag to mark that this status has been 4637 /* Update last_tag to mark that this status has been
4638 * seen. Because interrupt may be shared, we may be 4638 * seen. Because interrupt may be shared, we may be
@@ -4640,7 +4640,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4640 * if tg3_poll() is not scheduled. 4640 * if tg3_poll() is not scheduled.
4641 */ 4641 */
4642 tp->last_tag = sblk->status_tag; 4642 tp->last_tag = sblk->status_tag;
4643 __netif_rx_schedule(&tp->napi); 4643 __napi_schedule(&tp->napi);
4644 } 4644 }
4645out: 4645out:
4646 return IRQ_RETVAL(handled); 4646 return IRQ_RETVAL(handled);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 43853e3b210..4a65fc2dd92 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -274,6 +274,15 @@ static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
274 274
275 return ; 275 return ;
276} 276}
277
278static const struct net_device_ops xl_netdev_ops = {
279 .ndo_open = xl_open,
280 .ndo_stop = xl_close,
281 .ndo_start_xmit = xl_xmit,
282 .ndo_change_mtu = xl_change_mtu,
283 .ndo_set_multicast_list = xl_set_rx_mode,
284 .ndo_set_mac_address = xl_set_mac_address,
285};
277 286
278static int __devinit xl_probe(struct pci_dev *pdev, 287static int __devinit xl_probe(struct pci_dev *pdev,
279 const struct pci_device_id *ent) 288 const struct pci_device_id *ent)
@@ -337,13 +346,7 @@ static int __devinit xl_probe(struct pci_dev *pdev,
337 return i ; 346 return i ;
338 } 347 }
339 348
340 dev->open=&xl_open; 349 dev->netdev_ops = &xl_netdev_ops;
341 dev->hard_start_xmit=&xl_xmit;
342 dev->change_mtu=&xl_change_mtu;
343 dev->stop=&xl_close;
344 dev->do_ioctl=NULL;
345 dev->set_multicast_list=&xl_set_rx_mode;
346 dev->set_mac_address=&xl_set_mac_address ;
347 SET_NETDEV_DEV(dev, &pdev->dev); 350 SET_NETDEV_DEV(dev, &pdev->dev);
348 351
349 pci_set_drvdata(pdev,dev) ; 352 pci_set_drvdata(pdev,dev) ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b566d6d79ec..b9db1b5a58a 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -92,6 +92,8 @@ static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned
92 outw(val, dev->base_addr + reg); 92 outw(val, dev->base_addr + reg);
93} 93}
94 94
95static struct net_device_ops abyss_netdev_ops;
96
95static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) 97static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
96{ 98{
97 static int versionprinted; 99 static int versionprinted;
@@ -157,8 +159,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
157 159
158 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); 160 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
159 161
160 dev->open = abyss_open; 162 dev->netdev_ops = &abyss_netdev_ops;
161 dev->stop = abyss_close;
162 163
163 pci_set_drvdata(pdev, dev); 164 pci_set_drvdata(pdev, dev);
164 SET_NETDEV_DEV(dev, &pdev->dev); 165 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -450,6 +451,11 @@ static struct pci_driver abyss_driver = {
450 451
451static int __init abyss_init (void) 452static int __init abyss_init (void)
452{ 453{
454 abyss_netdev_ops = tms380tr_netdev_ops;
455
456 abyss_netdev_ops.ndo_open = abyss_open;
457 abyss_netdev_ops.ndo_stop = abyss_close;
458
453 return pci_register_driver(&abyss_driver); 459 return pci_register_driver(&abyss_driver);
454} 460}
455 461
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index fa7bce6e0c6..9d896116cf7 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -200,7 +200,6 @@ static void tr_rx(struct net_device *dev);
200static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); 200static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
201static void tok_rerun(unsigned long dev_addr); 201static void tok_rerun(unsigned long dev_addr);
202static void ibmtr_readlog(struct net_device *dev); 202static void ibmtr_readlog(struct net_device *dev);
203static struct net_device_stats *tok_get_stats(struct net_device *dev);
204static int ibmtr_change_mtu(struct net_device *dev, int mtu); 203static int ibmtr_change_mtu(struct net_device *dev, int mtu);
205static void find_turbo_adapters(int *iolist); 204static void find_turbo_adapters(int *iolist);
206 205
@@ -816,18 +815,21 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
816 815
817/*****************************************************************************/ 816/*****************************************************************************/
818 817
818static const struct net_device_ops trdev_netdev_ops = {
819 .ndo_open = tok_open,
820 .ndo_stop = tok_close,
821 .ndo_start_xmit = tok_send_packet,
822 .ndo_set_multicast_list = tok_set_multicast_list,
823 .ndo_change_mtu = ibmtr_change_mtu,
824};
825
819static int __devinit trdev_init(struct net_device *dev) 826static int __devinit trdev_init(struct net_device *dev)
820{ 827{
821 struct tok_info *ti = netdev_priv(dev); 828 struct tok_info *ti = netdev_priv(dev);
822 829
823 SET_PAGE(ti->srb_page); 830 SET_PAGE(ti->srb_page);
824 ti->open_failure = NO ; 831 ti->open_failure = NO ;
825 dev->open = tok_open; 832 dev->netdev_ops = &trdev_netdev_ops;
826 dev->stop = tok_close;
827 dev->hard_start_xmit = tok_send_packet;
828 dev->get_stats = tok_get_stats;
829 dev->set_multicast_list = tok_set_multicast_list;
830 dev->change_mtu = ibmtr_change_mtu;
831 833
832 return 0; 834 return 0;
833} 835}
@@ -1460,7 +1462,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id)
1460 "%02X\n", 1462 "%02X\n",
1461 (int)retcode, (int)readb(ti->ssb + 6)); 1463 (int)retcode, (int)readb(ti->ssb + 6));
1462 else 1464 else
1463 ti->tr_stats.tx_packets++; 1465 dev->stats.tx_packets++;
1464 break; 1466 break;
1465 case XMIT_XID_CMD: 1467 case XMIT_XID_CMD:
1466 DPRINTK("xmit xid ret_code: %02X\n", 1468 DPRINTK("xmit xid ret_code: %02X\n",
@@ -1646,7 +1648,7 @@ static void tr_tx(struct net_device *dev)
1646 break; 1648 break;
1647 } 1649 }
1648 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1650 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1649 ti->tr_stats.tx_bytes += ti->current_skb->len; 1651 dev->stats.tx_bytes += ti->current_skb->len;
1650 dev_kfree_skb_irq(ti->current_skb); 1652 dev_kfree_skb_irq(ti->current_skb);
1651 ti->current_skb = NULL; 1653 ti->current_skb = NULL;
1652 netif_wake_queue(dev); 1654 netif_wake_queue(dev);
@@ -1722,7 +1724,7 @@ static void tr_rx(struct net_device *dev)
1722 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { 1724 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
1723 SET_PAGE(ti->asb_page); 1725 SET_PAGE(ti->asb_page);
1724 writeb(DATA_LOST, ti->asb + RETCODE_OFST); 1726 writeb(DATA_LOST, ti->asb + RETCODE_OFST);
1725 ti->tr_stats.rx_dropped++; 1727 dev->stats.rx_dropped++;
1726 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1728 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1727 return; 1729 return;
1728 } 1730 }
@@ -1757,7 +1759,7 @@ static void tr_rx(struct net_device *dev)
1757 1759
1758 if (!(skb = dev_alloc_skb(skb_size))) { 1760 if (!(skb = dev_alloc_skb(skb_size))) {
1759 DPRINTK("out of memory. frame dropped.\n"); 1761 DPRINTK("out of memory. frame dropped.\n");
1760 ti->tr_stats.rx_dropped++; 1762 dev->stats.rx_dropped++;
1761 SET_PAGE(ti->asb_page); 1763 SET_PAGE(ti->asb_page);
1762 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); 1764 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
1763 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1765 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
@@ -1813,8 +1815,8 @@ static void tr_rx(struct net_device *dev)
1813 1815
1814 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1816 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1815 1817
1816 ti->tr_stats.rx_bytes += skb->len; 1818 dev->stats.rx_bytes += skb->len;
1817 ti->tr_stats.rx_packets++; 1819 dev->stats.rx_packets++;
1818 1820
1819 skb->protocol = tr_type_trans(skb, dev); 1821 skb->protocol = tr_type_trans(skb, dev);
1820 if (IPv4_p) { 1822 if (IPv4_p) {
@@ -1876,21 +1878,6 @@ static void ibmtr_readlog(struct net_device *dev)
1876 1878
1877/*****************************************************************************/ 1879/*****************************************************************************/
1878 1880
1879/* tok_get_stats(): Basically a scaffold routine which will return
1880 the address of the tr_statistics structure associated with
1881 this device -- the tr.... structure is an ethnet look-alike
1882 so at least for this iteration may suffice. */
1883
1884static struct net_device_stats *tok_get_stats(struct net_device *dev)
1885{
1886
1887 struct tok_info *toki;
1888 toki = netdev_priv(dev);
1889 return (struct net_device_stats *) &toki->tr_stats;
1890}
1891
1892/*****************************************************************************/
1893
1894static int ibmtr_change_mtu(struct net_device *dev, int mtu) 1881static int ibmtr_change_mtu(struct net_device *dev, int mtu)
1895{ 1882{
1896 struct tok_info *ti = netdev_priv(dev); 1883 struct tok_info *ti = netdev_priv(dev);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 239c75217b1..0b2b7925da2 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -207,7 +207,6 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev);
207static int streamer_close(struct net_device *dev); 207static int streamer_close(struct net_device *dev);
208static void streamer_set_rx_mode(struct net_device *dev); 208static void streamer_set_rx_mode(struct net_device *dev);
209static irqreturn_t streamer_interrupt(int irq, void *dev_id); 209static irqreturn_t streamer_interrupt(int irq, void *dev_id);
210static struct net_device_stats *streamer_get_stats(struct net_device *dev);
211static int streamer_set_mac_address(struct net_device *dev, void *addr); 210static int streamer_set_mac_address(struct net_device *dev, void *addr);
212static void streamer_arb_cmd(struct net_device *dev); 211static void streamer_arb_cmd(struct net_device *dev);
213static int streamer_change_mtu(struct net_device *dev, int mtu); 212static int streamer_change_mtu(struct net_device *dev, int mtu);
@@ -222,6 +221,18 @@ struct streamer_private *dev_streamer=NULL;
222#endif 221#endif
223#endif 222#endif
224 223
224static const struct net_device_ops streamer_netdev_ops = {
225 .ndo_open = streamer_open,
226 .ndo_stop = streamer_close,
227 .ndo_start_xmit = streamer_xmit,
228 .ndo_change_mtu = streamer_change_mtu,
229#if STREAMER_IOCTL
230 .ndo_do_ioctl = streamer_ioctl,
231#endif
232 .ndo_set_multicast_list = streamer_set_rx_mode,
233 .ndo_set_mac_address = streamer_set_mac_address,
234};
235
225static int __devinit streamer_init_one(struct pci_dev *pdev, 236static int __devinit streamer_init_one(struct pci_dev *pdev,
226 const struct pci_device_id *ent) 237 const struct pci_device_id *ent)
227{ 238{
@@ -321,18 +332,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
321 init_waitqueue_head(&streamer_priv->srb_wait); 332 init_waitqueue_head(&streamer_priv->srb_wait);
322 init_waitqueue_head(&streamer_priv->trb_wait); 333 init_waitqueue_head(&streamer_priv->trb_wait);
323 334
324 dev->open = &streamer_open; 335 dev->netdev_ops = &streamer_netdev_ops;
325 dev->hard_start_xmit = &streamer_xmit;
326 dev->change_mtu = &streamer_change_mtu;
327 dev->stop = &streamer_close;
328#if STREAMER_IOCTL
329 dev->do_ioctl = &streamer_ioctl;
330#else
331 dev->do_ioctl = NULL;
332#endif
333 dev->set_multicast_list = &streamer_set_rx_mode;
334 dev->get_stats = &streamer_get_stats;
335 dev->set_mac_address = &streamer_set_mac_address;
336 dev->irq = pdev->irq; 336 dev->irq = pdev->irq;
337 dev->base_addr=pio_start; 337 dev->base_addr=pio_start;
338 SET_NETDEV_DEV(dev, &pdev->dev); 338 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -937,7 +937,7 @@ static void streamer_rx(struct net_device *dev)
937 if (skb == NULL) 937 if (skb == NULL)
938 { 938 {
939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); 939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
940 streamer_priv->streamer_stats.rx_dropped++; 940 dev->stats.rx_dropped++;
941 } else { /* we allocated an skb OK */ 941 } else { /* we allocated an skb OK */
942 if (buffer_cnt == 1) { 942 if (buffer_cnt == 1) {
943 /* release the DMA mapping */ 943 /* release the DMA mapping */
@@ -1009,8 +1009,8 @@ static void streamer_rx(struct net_device *dev)
1009 /* send up to the protocol */ 1009 /* send up to the protocol */
1010 netif_rx(skb); 1010 netif_rx(skb);
1011 } 1011 }
1012 streamer_priv->streamer_stats.rx_packets++; 1012 dev->stats.rx_packets++;
1013 streamer_priv->streamer_stats.rx_bytes += length; 1013 dev->stats.rx_bytes += length;
1014 } /* if skb == null */ 1014 } /* if skb == null */
1015 } /* end received without errors */ 1015 } /* end received without errors */
1016 1016
@@ -1053,8 +1053,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id)
1053 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { 1053 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
1054 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); 1054 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
1055 streamer_priv->free_tx_ring_entries++; 1055 streamer_priv->free_tx_ring_entries++;
1056 streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; 1056 dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
1057 streamer_priv->streamer_stats.tx_packets++; 1057 dev->stats.tx_packets++;
1058 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); 1058 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; 1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; 1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
@@ -1484,13 +1484,6 @@ static void streamer_srb_bh(struct net_device *dev)
1484 } /* switch srb[0] */ 1484 } /* switch srb[0] */
1485} 1485}
1486 1486
1487static struct net_device_stats *streamer_get_stats(struct net_device *dev)
1488{
1489 struct streamer_private *streamer_priv;
1490 streamer_priv = netdev_priv(dev);
1491 return (struct net_device_stats *) &streamer_priv->streamer_stats;
1492}
1493
1494static int streamer_set_mac_address(struct net_device *dev, void *addr) 1487static int streamer_set_mac_address(struct net_device *dev, void *addr)
1495{ 1488{
1496 struct sockaddr *saddr = addr; 1489 struct sockaddr *saddr = addr;
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
index 13ccee6449c..3c58d6a3fbc 100644
--- a/drivers/net/tokenring/lanstreamer.h
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -299,7 +299,6 @@ struct streamer_private {
299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received, 299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
300 free_tx_ring_entries; 300 free_tx_ring_entries;
301 301
302 struct net_device_stats streamer_stats;
303 __u16 streamer_lan_status; 302 __u16 streamer_lan_status;
304 __u8 streamer_ring_speed; 303 __u8 streamer_ring_speed;
305 __u16 pkt_buf_sz; 304 __u16 pkt_buf_sz;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index ecb5c7c9691..77dc9da4c0b 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -187,7 +187,6 @@ static int olympic_close(struct net_device *dev);
187static void olympic_set_rx_mode(struct net_device *dev); 187static void olympic_set_rx_mode(struct net_device *dev);
188static void olympic_freemem(struct net_device *dev) ; 188static void olympic_freemem(struct net_device *dev) ;
189static irqreturn_t olympic_interrupt(int irq, void *dev_id); 189static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191static int olympic_set_mac_address(struct net_device *dev, void *addr) ; 190static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192static void olympic_arb_cmd(struct net_device *dev); 191static void olympic_arb_cmd(struct net_device *dev);
193static int olympic_change_mtu(struct net_device *dev, int mtu); 192static int olympic_change_mtu(struct net_device *dev, int mtu);
@@ -195,6 +194,15 @@ static void olympic_srb_bh(struct net_device *dev) ;
195static void olympic_asb_bh(struct net_device *dev) ; 194static void olympic_asb_bh(struct net_device *dev) ;
196static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; 195static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 196
197static const struct net_device_ops olympic_netdev_ops = {
198 .ndo_open = olympic_open,
199 .ndo_stop = olympic_close,
200 .ndo_start_xmit = olympic_xmit,
201 .ndo_change_mtu = olympic_change_mtu,
202 .ndo_set_multicast_list = olympic_set_rx_mode,
203 .ndo_set_mac_address = olympic_set_mac_address,
204};
205
198static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 206static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199{ 207{
200 struct net_device *dev ; 208 struct net_device *dev ;
@@ -253,14 +261,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
253 goto op_free_iomap; 261 goto op_free_iomap;
254 } 262 }
255 263
256 dev->open=&olympic_open; 264 dev->netdev_ops = &olympic_netdev_ops;
257 dev->hard_start_xmit=&olympic_xmit;
258 dev->change_mtu=&olympic_change_mtu;
259 dev->stop=&olympic_close;
260 dev->do_ioctl=NULL;
261 dev->set_multicast_list=&olympic_set_rx_mode;
262 dev->get_stats=&olympic_get_stats ;
263 dev->set_mac_address=&olympic_set_mac_address ;
264 SET_NETDEV_DEV(dev, &pdev->dev); 265 SET_NETDEV_DEV(dev, &pdev->dev);
265 266
266 pci_set_drvdata(pdev,dev) ; 267 pci_set_drvdata(pdev,dev) ;
@@ -785,7 +786,7 @@ static void olympic_rx(struct net_device *dev)
785 } 786 }
786 olympic_priv->rx_ring_last_received += i ; 787 olympic_priv->rx_ring_last_received += i ;
787 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
788 olympic_priv->olympic_stats.rx_errors++; 789 dev->stats.rx_errors++;
789 } else { 790 } else {
790 791
791 if (buffer_cnt == 1) { 792 if (buffer_cnt == 1) {
@@ -796,7 +797,7 @@ static void olympic_rx(struct net_device *dev)
796 797
797 if (skb == NULL) { 798 if (skb == NULL) {
798 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; 799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
799 olympic_priv->olympic_stats.rx_dropped++ ; 800 dev->stats.rx_dropped++;
800 /* Update counters even though we don't transfer the frame */ 801 /* Update counters even though we don't transfer the frame */
801 olympic_priv->rx_ring_last_received += i ; 802 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 803 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -862,8 +863,8 @@ static void olympic_rx(struct net_device *dev)
862 skb->protocol = tr_type_trans(skb,dev); 863 skb->protocol = tr_type_trans(skb,dev);
863 netif_rx(skb) ; 864 netif_rx(skb) ;
864 } 865 }
865 olympic_priv->olympic_stats.rx_packets++ ; 866 dev->stats.rx_packets++ ;
866 olympic_priv->olympic_stats.rx_bytes += length ; 867 dev->stats.rx_bytes += length ;
867 } /* if skb == null */ 868 } /* if skb == null */
868 } /* If status & 0x3b */ 869 } /* If status & 0x3b */
869 870
@@ -971,8 +972,8 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id)
971 olympic_priv->tx_ring_last_status++; 972 olympic_priv->tx_ring_last_status++;
972 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); 973 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
973 olympic_priv->free_tx_ring_entries++; 974 olympic_priv->free_tx_ring_entries++;
974 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; 975 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
975 olympic_priv->olympic_stats.tx_packets++ ; 976 dev->stats.tx_packets++ ;
976 pci_unmap_single(olympic_priv->pdev, 977 pci_unmap_single(olympic_priv->pdev,
977 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), 978 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
978 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); 979 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
@@ -1344,13 +1345,6 @@ static void olympic_srb_bh(struct net_device *dev)
1344 1345
1345} 1346}
1346 1347
1347static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1348{
1349 struct olympic_private *olympic_priv ;
1350 olympic_priv=netdev_priv(dev);
1351 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1352}
1353
1354static int olympic_set_mac_address (struct net_device *dev, void *addr) 1348static int olympic_set_mac_address (struct net_device *dev, void *addr)
1355{ 1349{
1356 struct sockaddr *saddr = addr ; 1350 struct sockaddr *saddr = addr ;
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
index 10fbba08978..30631bae4c9 100644
--- a/drivers/net/tokenring/olympic.h
+++ b/drivers/net/tokenring/olympic.h
@@ -275,7 +275,6 @@ struct olympic_private {
275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; 275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; 276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
277 277
278 struct net_device_stats olympic_stats ;
279 u16 olympic_lan_status ; 278 u16 olympic_lan_status ;
280 u8 olympic_ring_speed ; 279 u8 olympic_ring_speed ;
281 u16 pkt_buf_sz ; 280 u16 pkt_buf_sz ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5be34c2fd48..b11bb72dc7a 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -2330,6 +2330,17 @@ void tmsdev_term(struct net_device *dev)
2330 DMA_BIDIRECTIONAL); 2330 DMA_BIDIRECTIONAL);
2331} 2331}
2332 2332
2333const struct net_device_ops tms380tr_netdev_ops = {
2334 .ndo_open = tms380tr_open,
2335 .ndo_stop = tms380tr_close,
2336 .ndo_start_xmit = tms380tr_send_packet,
2337 .ndo_tx_timeout = tms380tr_timeout,
2338 .ndo_get_stats = tms380tr_get_stats,
2339 .ndo_set_multicast_list = tms380tr_set_multicast_list,
2340 .ndo_set_mac_address = tms380tr_set_mac_address,
2341};
2342EXPORT_SYMBOL(tms380tr_netdev_ops);
2343
2333int tmsdev_init(struct net_device *dev, struct device *pdev) 2344int tmsdev_init(struct net_device *dev, struct device *pdev)
2334{ 2345{
2335 struct net_local *tms_local; 2346 struct net_local *tms_local;
@@ -2353,16 +2364,8 @@ int tmsdev_init(struct net_device *dev, struct device *pdev)
2353 return -ENOMEM; 2364 return -ENOMEM;
2354 } 2365 }
2355 2366
2356 /* These can be overridden by the card driver if needed */ 2367 dev->netdev_ops = &tms380tr_netdev_ops;
2357 dev->open = tms380tr_open;
2358 dev->stop = tms380tr_close;
2359 dev->do_ioctl = NULL;
2360 dev->hard_start_xmit = tms380tr_send_packet;
2361 dev->tx_timeout = tms380tr_timeout;
2362 dev->watchdog_timeo = HZ; 2368 dev->watchdog_timeo = HZ;
2363 dev->get_stats = tms380tr_get_stats;
2364 dev->set_multicast_list = &tms380tr_set_multicast_list;
2365 dev->set_mac_address = tms380tr_set_mac_address;
2366 2369
2367 return 0; 2370 return 0;
2368} 2371}
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index 7af76d70884..60b30ee38dc 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15 15
16/* module prototypes */ 16/* module prototypes */
17extern const struct net_device_ops tms380tr_netdev_ops;
17int tms380tr_open(struct net_device *dev); 18int tms380tr_open(struct net_device *dev);
18int tms380tr_close(struct net_device *dev); 19int tms380tr_close(struct net_device *dev);
19irqreturn_t tms380tr_interrupt(int irq, void *dev_id); 20irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 5f601773c26..b397e8785d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -157,8 +157,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
157 157
158 tp->tmspriv = cardinfo; 158 tp->tmspriv = cardinfo;
159 159
160 dev->open = tms380tr_open; 160 dev->netdev_ops = &tms380tr_netdev_ops;
161 dev->stop = tms380tr_close; 161
162 pci_set_drvdata(pdev, dev); 162 pci_set_drvdata(pdev, dev);
163 SET_NETDEV_DEV(dev, &pdev->dev); 163 SET_NETDEV_DEV(dev, &pdev->dev);
164 164
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a9fd2b2ccaf..bb43e7fb2a5 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
888 888
889 if (num_received < budget) { 889 if (num_received < budget) {
890 data->rxpending = 0; 890 data->rxpending = 0;
891 netif_rx_complete(napi); 891 napi_complete(napi);
892 892
893 TSI_WRITE(TSI108_EC_INTMASK, 893 TSI_WRITE(TSI108_EC_INTMASK,
894 TSI_READ(TSI108_EC_INTMASK) 894 TSI_READ(TSI108_EC_INTMASK)
@@ -915,11 +915,11 @@ static void tsi108_rx_int(struct net_device *dev)
915 * 915 *
916 * This can happen if this code races with tsi108_poll(), which masks 916 * This can happen if this code races with tsi108_poll(), which masks
917 * the interrupts after tsi108_irq_one() read the mask, but before 917 * the interrupts after tsi108_irq_one() read the mask, but before
918 * netif_rx_schedule is called. It could also happen due to calls 918 * napi_schedule is called. It could also happen due to calls
919 * from tsi108_check_rxring(). 919 * from tsi108_check_rxring().
920 */ 920 */
921 921
922 if (netif_rx_schedule_prep(&data->napi)) { 922 if (napi_schedule_prep(&data->napi)) {
923 /* Mask, rather than ack, the receive interrupts. The ack 923 /* Mask, rather than ack, the receive interrupts. The ack
924 * will happen in tsi108_poll(). 924 * will happen in tsi108_poll().
925 */ 925 */
@@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev)
930 | TSI108_INT_RXTHRESH | 930 | TSI108_INT_RXTHRESH |
931 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | 931 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
932 TSI108_INT_RXWAIT); 932 TSI108_INT_RXWAIT);
933 __netif_rx_schedule(&data->napi); 933 __napi_schedule(&data->napi);
934 } else { 934 } else {
935 if (!netif_running(dev)) { 935 if (!netif_running(dev)) {
936 /* This can happen if an interrupt occurs while the 936 /* This can happen if an interrupt occurs while the
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 6c3428a37c0..9f946d42108 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data)
103{ 103{
104 struct net_device *dev = (struct net_device *)data; 104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev); 105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(&tp->napi); 106 napi_schedule(&tp->napi);
107} 107}
108 108
109int tulip_poll(struct napi_struct *napi, int budget) 109int tulip_poll(struct napi_struct *napi, int budget)
@@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
300 300
301 /* Remove us from polling list and enable RX intr. */ 301 /* Remove us from polling list and enable RX intr. */
302 302
303 netif_rx_complete(napi); 303 napi_complete(napi);
304 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); 304 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
305 305
306 /* The last op happens after poll completion. Which means the following: 306 /* The last op happens after poll completion. Which means the following:
@@ -333,10 +333,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
333 333
334 /* Think: timer_pending() was an explicit signature of bug. 334 /* Think: timer_pending() was an explicit signature of bug.
335 * Timer can be pending now but fired and completed 335 * Timer can be pending now but fired and completed
336 * before we did netif_rx_complete(). See? We would lose it. */ 336 * before we did napi_complete(). See? We would lose it. */
337 337
338 /* remove ourselves from the polling list */ 338 /* remove ourselves from the polling list */
339 netif_rx_complete(napi); 339 napi_complete(napi);
340 340
341 return work_done; 341 return work_done;
342} 342}
@@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
519 rxd++; 519 rxd++;
520 /* Mask RX intrs and add the device to poll list. */ 520 /* Mask RX intrs and add the device to poll list. */
521 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); 521 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
522 netif_rx_schedule(&tp->napi); 522 napi_schedule(&tp->napi);
523 523
524 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) 524 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
525 break; 525 break;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 09fea31d3e3..a1b0697340b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -63,6 +63,8 @@
63#include <linux/virtio_net.h> 63#include <linux/virtio_net.h>
64#include <net/net_namespace.h> 64#include <net/net_namespace.h>
65#include <net/netns/generic.h> 65#include <net/netns/generic.h>
66#include <net/rtnetlink.h>
67#include <net/sock.h>
66 68
67#include <asm/system.h> 69#include <asm/system.h>
68#include <asm/uaccess.h> 70#include <asm/uaccess.h>
@@ -87,26 +89,127 @@ struct tap_filter {
87 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 89 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
88}; 90};
89 91
92struct tun_file {
93 atomic_t count;
94 struct tun_struct *tun;
95 struct net *net;
96 wait_queue_head_t read_wait;
97};
98
99struct tun_sock;
100
90struct tun_struct { 101struct tun_struct {
91 struct list_head list; 102 struct tun_file *tfile;
92 unsigned int flags; 103 unsigned int flags;
93 int attached;
94 uid_t owner; 104 uid_t owner;
95 gid_t group; 105 gid_t group;
96 106
97 wait_queue_head_t read_wait;
98 struct sk_buff_head readq; 107 struct sk_buff_head readq;
99 108
100 struct net_device *dev; 109 struct net_device *dev;
101 struct fasync_struct *fasync; 110 struct fasync_struct *fasync;
102 111
103 struct tap_filter txflt; 112 struct tap_filter txflt;
113 struct sock *sk;
114 struct socket socket;
104 115
105#ifdef TUN_DEBUG 116#ifdef TUN_DEBUG
106 int debug; 117 int debug;
107#endif 118#endif
108}; 119};
109 120
121struct tun_sock {
122 struct sock sk;
123 struct tun_struct *tun;
124};
125
126static inline struct tun_sock *tun_sk(struct sock *sk)
127{
128 return container_of(sk, struct tun_sock, sk);
129}
130
131static int tun_attach(struct tun_struct *tun, struct file *file)
132{
133 struct tun_file *tfile = file->private_data;
134 const struct cred *cred = current_cred();
135 int err;
136
137 ASSERT_RTNL();
138
139 /* Check permissions */
140 if (((tun->owner != -1 && cred->euid != tun->owner) ||
141 (tun->group != -1 && !in_egroup_p(tun->group))) &&
142 !capable(CAP_NET_ADMIN))
143 return -EPERM;
144
145 netif_tx_lock_bh(tun->dev);
146
147 err = -EINVAL;
148 if (tfile->tun)
149 goto out;
150
151 err = -EBUSY;
152 if (tun->tfile)
153 goto out;
154
155 err = 0;
156 tfile->tun = tun;
157 tun->tfile = tfile;
158 dev_hold(tun->dev);
159 atomic_inc(&tfile->count);
160
161out:
162 netif_tx_unlock_bh(tun->dev);
163 return err;
164}
165
166static void __tun_detach(struct tun_struct *tun)
167{
168 struct tun_file *tfile = tun->tfile;
169
170 /* Detach from net device */
171 netif_tx_lock_bh(tun->dev);
172 tfile->tun = NULL;
173 tun->tfile = NULL;
174 netif_tx_unlock_bh(tun->dev);
175
176 /* Drop read queue */
177 skb_queue_purge(&tun->readq);
178
179 /* Drop the extra count on the net device */
180 dev_put(tun->dev);
181}
182
183static void tun_detach(struct tun_struct *tun)
184{
185 rtnl_lock();
186 __tun_detach(tun);
187 rtnl_unlock();
188}
189
190static struct tun_struct *__tun_get(struct tun_file *tfile)
191{
192 struct tun_struct *tun = NULL;
193
194 if (atomic_inc_not_zero(&tfile->count))
195 tun = tfile->tun;
196
197 return tun;
198}
199
200static struct tun_struct *tun_get(struct file *file)
201{
202 return __tun_get(file->private_data);
203}
204
205static void tun_put(struct tun_struct *tun)
206{
207 struct tun_file *tfile = tun->tfile;
208
209 if (atomic_dec_and_test(&tfile->count))
210 tun_detach(tfile->tun);
211}
212
110/* TAP filterting */ 213/* TAP filterting */
111static void addr_hash_set(u32 *mask, const u8 *addr) 214static void addr_hash_set(u32 *mask, const u8 *addr)
112{ 215{
@@ -219,13 +322,23 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
219 322
220/* Network device part of the driver */ 323/* Network device part of the driver */
221 324
222static int tun_net_id;
223struct tun_net {
224 struct list_head dev_list;
225};
226
227static const struct ethtool_ops tun_ethtool_ops; 325static const struct ethtool_ops tun_ethtool_ops;
228 326
327/* Net device detach from fd. */
328static void tun_net_uninit(struct net_device *dev)
329{
330 struct tun_struct *tun = netdev_priv(dev);
331 struct tun_file *tfile = tun->tfile;
332
333 /* Inform the methods they need to stop using the dev.
334 */
335 if (tfile) {
336 wake_up_all(&tfile->read_wait);
337 if (atomic_dec_and_test(&tfile->count))
338 __tun_detach(tun);
339 }
340}
341
229/* Net device open. */ 342/* Net device open. */
230static int tun_net_open(struct net_device *dev) 343static int tun_net_open(struct net_device *dev)
231{ 344{
@@ -248,7 +361,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
248 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 361 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
249 362
250 /* Drop packet if interface is not attached */ 363 /* Drop packet if interface is not attached */
251 if (!tun->attached) 364 if (!tun->tfile)
252 goto drop; 365 goto drop;
253 366
254 /* Drop if the filter does not like it. 367 /* Drop if the filter does not like it.
@@ -280,7 +393,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
280 /* Notify and wake up reader process */ 393 /* Notify and wake up reader process */
281 if (tun->flags & TUN_FASYNC) 394 if (tun->flags & TUN_FASYNC)
282 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 395 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
283 wake_up_interruptible(&tun->read_wait); 396 wake_up_interruptible(&tun->tfile->read_wait);
284 return 0; 397 return 0;
285 398
286drop: 399drop:
@@ -312,6 +425,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
312} 425}
313 426
314static const struct net_device_ops tun_netdev_ops = { 427static const struct net_device_ops tun_netdev_ops = {
428 .ndo_uninit = tun_net_uninit,
315 .ndo_open = tun_net_open, 429 .ndo_open = tun_net_open,
316 .ndo_stop = tun_net_close, 430 .ndo_stop = tun_net_close,
317 .ndo_start_xmit = tun_net_xmit, 431 .ndo_start_xmit = tun_net_xmit,
@@ -319,6 +433,7 @@ static const struct net_device_ops tun_netdev_ops = {
319}; 433};
320 434
321static const struct net_device_ops tap_netdev_ops = { 435static const struct net_device_ops tap_netdev_ops = {
436 .ndo_uninit = tun_net_uninit,
322 .ndo_open = tun_net_open, 437 .ndo_open = tun_net_open,
323 .ndo_stop = tun_net_close, 438 .ndo_stop = tun_net_close,
324 .ndo_start_xmit = tun_net_xmit, 439 .ndo_start_xmit = tun_net_xmit,
@@ -365,86 +480,66 @@ static void tun_net_init(struct net_device *dev)
365/* Poll */ 480/* Poll */
366static unsigned int tun_chr_poll(struct file *file, poll_table * wait) 481static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
367{ 482{
368 struct tun_struct *tun = file->private_data; 483 struct tun_file *tfile = file->private_data;
369 unsigned int mask = POLLOUT | POLLWRNORM; 484 struct tun_struct *tun = __tun_get(tfile);
485 struct sock *sk = tun->sk;
486 unsigned int mask = 0;
370 487
371 if (!tun) 488 if (!tun)
372 return -EBADFD; 489 return POLLERR;
373 490
374 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 491 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
375 492
376 poll_wait(file, &tun->read_wait, wait); 493 poll_wait(file, &tfile->read_wait, wait);
377 494
378 if (!skb_queue_empty(&tun->readq)) 495 if (!skb_queue_empty(&tun->readq))
379 mask |= POLLIN | POLLRDNORM; 496 mask |= POLLIN | POLLRDNORM;
380 497
498 if (sock_writeable(sk) ||
499 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
500 sock_writeable(sk)))
501 mask |= POLLOUT | POLLWRNORM;
502
503 if (tun->dev->reg_state != NETREG_REGISTERED)
504 mask = POLLERR;
505
506 tun_put(tun);
381 return mask; 507 return mask;
382} 508}
383 509
384/* prepad is the amount to reserve at front. len is length after that. 510/* prepad is the amount to reserve at front. len is length after that.
385 * linear is a hint as to how much to copy (usually headers). */ 511 * linear is a hint as to how much to copy (usually headers). */
386static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, 512static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
387 gfp_t gfp) 513 size_t prepad, size_t len,
514 size_t linear, int noblock)
388{ 515{
516 struct sock *sk = tun->sk;
389 struct sk_buff *skb; 517 struct sk_buff *skb;
390 unsigned int i; 518 int err;
391
392 skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
393 if (skb) {
394 skb_reserve(skb, prepad);
395 skb_put(skb, len);
396 return skb;
397 }
398 519
399 /* Under a page? Don't bother with paged skb. */ 520 /* Under a page? Don't bother with paged skb. */
400 if (prepad + len < PAGE_SIZE) 521 if (prepad + len < PAGE_SIZE)
401 return NULL; 522 linear = len;
402 523
403 /* Start with a normal skb, and add pages. */ 524 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
404 skb = alloc_skb(prepad + linear, gfp); 525 &err);
405 if (!skb) 526 if (!skb)
406 return NULL; 527 return ERR_PTR(err);
407 528
408 skb_reserve(skb, prepad); 529 skb_reserve(skb, prepad);
409 skb_put(skb, linear); 530 skb_put(skb, linear);
410 531 skb->data_len = len - linear;
411 len -= linear; 532 skb->len += len - linear;
412
413 for (i = 0; i < MAX_SKB_FRAGS; i++) {
414 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
415
416 f->page = alloc_page(gfp|__GFP_ZERO);
417 if (!f->page)
418 break;
419
420 f->page_offset = 0;
421 f->size = PAGE_SIZE;
422
423 skb->data_len += PAGE_SIZE;
424 skb->len += PAGE_SIZE;
425 skb->truesize += PAGE_SIZE;
426 skb_shinfo(skb)->nr_frags++;
427
428 if (len < PAGE_SIZE) {
429 len = 0;
430 break;
431 }
432 len -= PAGE_SIZE;
433 }
434
435 /* Too large, or alloc fail? */
436 if (unlikely(len)) {
437 kfree_skb(skb);
438 skb = NULL;
439 }
440 533
441 return skb; 534 return skb;
442} 535}
443 536
444/* Get packet from user space buffer */ 537/* Get packet from user space buffer */
445static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) 538static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
539 struct iovec *iv, size_t count,
540 int noblock)
446{ 541{
447 struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) }; 542 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
448 struct sk_buff *skb; 543 struct sk_buff *skb;
449 size_t len = count, align = 0; 544 size_t len = count, align = 0;
450 struct virtio_net_hdr gso = { 0 }; 545 struct virtio_net_hdr gso = { 0 };
@@ -474,9 +569,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
474 return -EINVAL; 569 return -EINVAL;
475 } 570 }
476 571
477 if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) { 572 skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
478 tun->dev->stats.rx_dropped++; 573 if (IS_ERR(skb)) {
479 return -ENOMEM; 574 if (PTR_ERR(skb) != -EAGAIN)
575 tun->dev->stats.rx_dropped++;
576 return PTR_ERR(skb);
480 } 577 }
481 578
482 if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) { 579 if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
@@ -562,14 +659,20 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
562static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, 659static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
563 unsigned long count, loff_t pos) 660 unsigned long count, loff_t pos)
564{ 661{
565 struct tun_struct *tun = iocb->ki_filp->private_data; 662 struct file *file = iocb->ki_filp;
663 struct tun_struct *tun = tun_get(file);
664 ssize_t result;
566 665
567 if (!tun) 666 if (!tun)
568 return -EBADFD; 667 return -EBADFD;
569 668
570 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 669 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
571 670
572 return tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); 671 result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count),
672 file->f_flags & O_NONBLOCK);
673
674 tun_put(tun);
675 return result;
573} 676}
574 677
575/* Put packet to the user space buffer */ 678/* Put packet to the user space buffer */
@@ -642,7 +745,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
642 unsigned long count, loff_t pos) 745 unsigned long count, loff_t pos)
643{ 746{
644 struct file *file = iocb->ki_filp; 747 struct file *file = iocb->ki_filp;
645 struct tun_struct *tun = file->private_data; 748 struct tun_file *tfile = file->private_data;
749 struct tun_struct *tun = __tun_get(tfile);
646 DECLARE_WAITQUEUE(wait, current); 750 DECLARE_WAITQUEUE(wait, current);
647 struct sk_buff *skb; 751 struct sk_buff *skb;
648 ssize_t len, ret = 0; 752 ssize_t len, ret = 0;
@@ -653,10 +757,12 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
653 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 757 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
654 758
655 len = iov_length(iv, count); 759 len = iov_length(iv, count);
656 if (len < 0) 760 if (len < 0) {
657 return -EINVAL; 761 ret = -EINVAL;
762 goto out;
763 }
658 764
659 add_wait_queue(&tun->read_wait, &wait); 765 add_wait_queue(&tfile->read_wait, &wait);
660 while (len) { 766 while (len) {
661 current->state = TASK_INTERRUPTIBLE; 767 current->state = TASK_INTERRUPTIBLE;
662 768
@@ -670,6 +776,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
670 ret = -ERESTARTSYS; 776 ret = -ERESTARTSYS;
671 break; 777 break;
672 } 778 }
779 if (tun->dev->reg_state != NETREG_REGISTERED) {
780 ret = -EIO;
781 break;
782 }
673 783
674 /* Nothing to read, let's sleep */ 784 /* Nothing to read, let's sleep */
675 schedule(); 785 schedule();
@@ -683,8 +793,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
683 } 793 }
684 794
685 current->state = TASK_RUNNING; 795 current->state = TASK_RUNNING;
686 remove_wait_queue(&tun->read_wait, &wait); 796 remove_wait_queue(&tfile->read_wait, &wait);
687 797
798out:
799 tun_put(tun);
688 return ret; 800 return ret;
689} 801}
690 802
@@ -693,54 +805,78 @@ static void tun_setup(struct net_device *dev)
693 struct tun_struct *tun = netdev_priv(dev); 805 struct tun_struct *tun = netdev_priv(dev);
694 806
695 skb_queue_head_init(&tun->readq); 807 skb_queue_head_init(&tun->readq);
696 init_waitqueue_head(&tun->read_wait);
697 808
698 tun->owner = -1; 809 tun->owner = -1;
699 tun->group = -1; 810 tun->group = -1;
700 811
701 dev->ethtool_ops = &tun_ethtool_ops; 812 dev->ethtool_ops = &tun_ethtool_ops;
702 dev->destructor = free_netdev; 813 dev->destructor = free_netdev;
703 dev->features |= NETIF_F_NETNS_LOCAL;
704} 814}
705 815
706static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name) 816/* Trivial set of netlink ops to allow deleting tun or tap
817 * device with netlink.
818 */
819static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
820{
821 return -EINVAL;
822}
823
824static struct rtnl_link_ops tun_link_ops __read_mostly = {
825 .kind = DRV_NAME,
826 .priv_size = sizeof(struct tun_struct),
827 .setup = tun_setup,
828 .validate = tun_validate,
829};
830
831static void tun_sock_write_space(struct sock *sk)
707{ 832{
708 struct tun_struct *tun; 833 struct tun_struct *tun;
709 834
710 ASSERT_RTNL(); 835 if (!sock_writeable(sk))
711 list_for_each_entry(tun, &tn->dev_list, list) { 836 return;
712 if (!strncmp(tun->dev->name, name, IFNAMSIZ)) 837
713 return tun; 838 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
714 } 839 wake_up_interruptible_sync(sk->sk_sleep);
840
841 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
842 return;
715 843
716 return NULL; 844 tun = container_of(sk, struct tun_sock, sk)->tun;
845 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
717} 846}
718 847
848static void tun_sock_destruct(struct sock *sk)
849{
850 dev_put(container_of(sk, struct tun_sock, sk)->tun->dev);
851}
852
853static struct proto tun_proto = {
854 .name = "tun",
855 .owner = THIS_MODULE,
856 .obj_size = sizeof(struct tun_sock),
857};
858
719static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 859static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
720{ 860{
721 struct tun_net *tn; 861 struct sock *sk;
722 struct tun_struct *tun; 862 struct tun_struct *tun;
723 struct net_device *dev; 863 struct net_device *dev;
724 const struct cred *cred = current_cred(); 864 struct tun_file *tfile = file->private_data;
725 int err; 865 int err;
726 866
727 tn = net_generic(net, tun_net_id); 867 dev = __dev_get_by_name(net, ifr->ifr_name);
728 tun = tun_get_by_name(tn, ifr->ifr_name); 868 if (dev) {
729 if (tun) { 869 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
730 if (tun->attached) 870 tun = netdev_priv(dev);
731 return -EBUSY; 871 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
732 872 tun = netdev_priv(dev);
733 /* Check permissions */ 873 else
734 if (((tun->owner != -1 && 874 return -EINVAL;
735 cred->euid != tun->owner) || 875
736 (tun->group != -1 && 876 err = tun_attach(tun, file);
737 cred->egid != tun->group)) && 877 if (err < 0)
738 !capable(CAP_NET_ADMIN)) { 878 return err;
739 return -EPERM;
740 }
741 } 879 }
742 else if (__dev_get_by_name(net, ifr->ifr_name))
743 return -EINVAL;
744 else { 880 else {
745 char *name; 881 char *name;
746 unsigned long flags = 0; 882 unsigned long flags = 0;
@@ -771,25 +907,45 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
771 return -ENOMEM; 907 return -ENOMEM;
772 908
773 dev_net_set(dev, net); 909 dev_net_set(dev, net);
910 dev->rtnl_link_ops = &tun_link_ops;
774 911
775 tun = netdev_priv(dev); 912 tun = netdev_priv(dev);
776 tun->dev = dev; 913 tun->dev = dev;
777 tun->flags = flags; 914 tun->flags = flags;
778 tun->txflt.count = 0; 915 tun->txflt.count = 0;
779 916
917 err = -ENOMEM;
918 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
919 if (!sk)
920 goto err_free_dev;
921
922 /* This ref count is for tun->sk. */
923 dev_hold(dev);
924 sock_init_data(&tun->socket, sk);
925 sk->sk_write_space = tun_sock_write_space;
926 sk->sk_destruct = tun_sock_destruct;
927 sk->sk_sndbuf = INT_MAX;
928 sk->sk_sleep = &tfile->read_wait;
929
930 tun->sk = sk;
931 container_of(sk, struct tun_sock, sk)->tun = tun;
932
780 tun_net_init(dev); 933 tun_net_init(dev);
781 934
782 if (strchr(dev->name, '%')) { 935 if (strchr(dev->name, '%')) {
783 err = dev_alloc_name(dev, dev->name); 936 err = dev_alloc_name(dev, dev->name);
784 if (err < 0) 937 if (err < 0)
785 goto err_free_dev; 938 goto err_free_sk;
786 } 939 }
787 940
941 err = -EINVAL;
788 err = register_netdevice(tun->dev); 942 err = register_netdevice(tun->dev);
789 if (err < 0) 943 if (err < 0)
790 goto err_free_dev; 944 goto err_free_dev;
791 945
792 list_add(&tun->list, &tn->dev_list); 946 err = tun_attach(tun, file);
947 if (err < 0)
948 goto err_free_dev;
793 } 949 }
794 950
795 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 951 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
@@ -809,10 +965,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
809 else 965 else
810 tun->flags &= ~TUN_VNET_HDR; 966 tun->flags &= ~TUN_VNET_HDR;
811 967
812 file->private_data = tun;
813 tun->attached = 1;
814 get_net(dev_net(tun->dev));
815
816 /* Make sure persistent devices do not get stuck in 968 /* Make sure persistent devices do not get stuck in
817 * xoff state. 969 * xoff state.
818 */ 970 */
@@ -822,6 +974,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
822 strcpy(ifr->ifr_name, tun->dev->name); 974 strcpy(ifr->ifr_name, tun->dev->name);
823 return 0; 975 return 0;
824 976
977 err_free_sk:
978 sock_put(sk);
825 err_free_dev: 979 err_free_dev:
826 free_netdev(dev); 980 free_netdev(dev);
827 failed: 981 failed:
@@ -830,7 +984,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
830 984
831static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) 985static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
832{ 986{
833 struct tun_struct *tun = file->private_data; 987 struct tun_struct *tun = tun_get(file);
834 988
835 if (!tun) 989 if (!tun)
836 return -EBADFD; 990 return -EBADFD;
@@ -855,6 +1009,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
855 if (tun->flags & TUN_VNET_HDR) 1009 if (tun->flags & TUN_VNET_HDR)
856 ifr->ifr_flags |= IFF_VNET_HDR; 1010 ifr->ifr_flags |= IFF_VNET_HDR;
857 1011
1012 tun_put(tun);
858 return 0; 1013 return 0;
859} 1014}
860 1015
@@ -901,22 +1056,34 @@ static int set_offload(struct net_device *dev, unsigned long arg)
901static int tun_chr_ioctl(struct inode *inode, struct file *file, 1056static int tun_chr_ioctl(struct inode *inode, struct file *file,
902 unsigned int cmd, unsigned long arg) 1057 unsigned int cmd, unsigned long arg)
903{ 1058{
904 struct tun_struct *tun = file->private_data; 1059 struct tun_file *tfile = file->private_data;
1060 struct tun_struct *tun;
905 void __user* argp = (void __user*)arg; 1061 void __user* argp = (void __user*)arg;
906 struct ifreq ifr; 1062 struct ifreq ifr;
1063 int sndbuf;
907 int ret; 1064 int ret;
908 1065
909 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 1066 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
910 if (copy_from_user(&ifr, argp, sizeof ifr)) 1067 if (copy_from_user(&ifr, argp, sizeof ifr))
911 return -EFAULT; 1068 return -EFAULT;
912 1069
1070 if (cmd == TUNGETFEATURES) {
1071 /* Currently this just means: "what IFF flags are valid?".
1072 * This is needed because we never checked for invalid flags on
1073 * TUNSETIFF. */
1074 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1075 IFF_VNET_HDR,
1076 (unsigned int __user*)argp);
1077 }
1078
1079 tun = __tun_get(tfile);
913 if (cmd == TUNSETIFF && !tun) { 1080 if (cmd == TUNSETIFF && !tun) {
914 int err; 1081 int err;
915 1082
916 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 1083 ifr.ifr_name[IFNAMSIZ-1] = '\0';
917 1084
918 rtnl_lock(); 1085 rtnl_lock();
919 err = tun_set_iff(current->nsproxy->net_ns, file, &ifr); 1086 err = tun_set_iff(tfile->net, file, &ifr);
920 rtnl_unlock(); 1087 rtnl_unlock();
921 1088
922 if (err) 1089 if (err)
@@ -927,28 +1094,21 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
927 return 0; 1094 return 0;
928 } 1095 }
929 1096
930 if (cmd == TUNGETFEATURES) {
931 /* Currently this just means: "what IFF flags are valid?".
932 * This is needed because we never checked for invalid flags on
933 * TUNSETIFF. */
934 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
935 IFF_VNET_HDR,
936 (unsigned int __user*)argp);
937 }
938 1097
939 if (!tun) 1098 if (!tun)
940 return -EBADFD; 1099 return -EBADFD;
941 1100
942 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1101 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
943 1102
1103 ret = 0;
944 switch (cmd) { 1104 switch (cmd) {
945 case TUNGETIFF: 1105 case TUNGETIFF:
946 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); 1106 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
947 if (ret) 1107 if (ret)
948 return ret; 1108 break;
949 1109
950 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1110 if (copy_to_user(argp, &ifr, sizeof(ifr)))
951 return -EFAULT; 1111 ret = -EFAULT;
952 break; 1112 break;
953 1113
954 case TUNSETNOCSUM: 1114 case TUNSETNOCSUM:
@@ -1000,7 +1160,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1000 ret = 0; 1160 ret = 0;
1001 } 1161 }
1002 rtnl_unlock(); 1162 rtnl_unlock();
1003 return ret; 1163 break;
1004 1164
1005#ifdef TUN_DEBUG 1165#ifdef TUN_DEBUG
1006 case TUNSETDEBUG: 1166 case TUNSETDEBUG:
@@ -1011,24 +1171,25 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1011 rtnl_lock(); 1171 rtnl_lock();
1012 ret = set_offload(tun->dev, arg); 1172 ret = set_offload(tun->dev, arg);
1013 rtnl_unlock(); 1173 rtnl_unlock();
1014 return ret; 1174 break;
1015 1175
1016 case TUNSETTXFILTER: 1176 case TUNSETTXFILTER:
1017 /* Can be set only for TAPs */ 1177 /* Can be set only for TAPs */
1178 ret = -EINVAL;
1018 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1179 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1019 return -EINVAL; 1180 break;
1020 rtnl_lock(); 1181 rtnl_lock();
1021 ret = update_filter(&tun->txflt, (void __user *)arg); 1182 ret = update_filter(&tun->txflt, (void __user *)arg);
1022 rtnl_unlock(); 1183 rtnl_unlock();
1023 return ret; 1184 break;
1024 1185
1025 case SIOCGIFHWADDR: 1186 case SIOCGIFHWADDR:
1026 /* Get hw addres */ 1187 /* Get hw addres */
1027 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 1188 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1028 ifr.ifr_hwaddr.sa_family = tun->dev->type; 1189 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1029 if (copy_to_user(argp, &ifr, sizeof ifr)) 1190 if (copy_to_user(argp, &ifr, sizeof ifr))
1030 return -EFAULT; 1191 ret = -EFAULT;
1031 return 0; 1192 break;
1032 1193
1033 case SIOCSIFHWADDR: 1194 case SIOCSIFHWADDR:
1034 /* Set hw address */ 1195 /* Set hw address */
@@ -1038,18 +1199,35 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1038 rtnl_lock(); 1199 rtnl_lock();
1039 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1200 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1040 rtnl_unlock(); 1201 rtnl_unlock();
1041 return ret; 1202 break;
1203
1204 case TUNGETSNDBUF:
1205 sndbuf = tun->sk->sk_sndbuf;
1206 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1207 ret = -EFAULT;
1208 break;
1209
1210 case TUNSETSNDBUF:
1211 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1212 ret = -EFAULT;
1213 break;
1214 }
1215
1216 tun->sk->sk_sndbuf = sndbuf;
1217 break;
1042 1218
1043 default: 1219 default:
1044 return -EINVAL; 1220 ret = -EINVAL;
1221 break;
1045 }; 1222 };
1046 1223
1047 return 0; 1224 tun_put(tun);
1225 return ret;
1048} 1226}
1049 1227
1050static int tun_chr_fasync(int fd, struct file *file, int on) 1228static int tun_chr_fasync(int fd, struct file *file, int on)
1051{ 1229{
1052 struct tun_struct *tun = file->private_data; 1230 struct tun_struct *tun = tun_get(file);
1053 int ret; 1231 int ret;
1054 1232
1055 if (!tun) 1233 if (!tun)
@@ -1071,42 +1249,50 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1071 ret = 0; 1249 ret = 0;
1072out: 1250out:
1073 unlock_kernel(); 1251 unlock_kernel();
1252 tun_put(tun);
1074 return ret; 1253 return ret;
1075} 1254}
1076 1255
1077static int tun_chr_open(struct inode *inode, struct file * file) 1256static int tun_chr_open(struct inode *inode, struct file * file)
1078{ 1257{
1258 struct tun_file *tfile;
1079 cycle_kernel_lock(); 1259 cycle_kernel_lock();
1080 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1260 DBG1(KERN_INFO "tunX: tun_chr_open\n");
1081 file->private_data = NULL; 1261
1262 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1263 if (!tfile)
1264 return -ENOMEM;
1265 atomic_set(&tfile->count, 0);
1266 tfile->tun = NULL;
1267 tfile->net = get_net(current->nsproxy->net_ns);
1268 init_waitqueue_head(&tfile->read_wait);
1269 file->private_data = tfile;
1082 return 0; 1270 return 0;
1083} 1271}
1084 1272
1085static int tun_chr_close(struct inode *inode, struct file *file) 1273static int tun_chr_close(struct inode *inode, struct file *file)
1086{ 1274{
1087 struct tun_struct *tun = file->private_data; 1275 struct tun_file *tfile = file->private_data;
1088 1276 struct tun_struct *tun = __tun_get(tfile);
1089 if (!tun)
1090 return 0;
1091 1277
1092 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1093 1278
1094 rtnl_lock(); 1279 if (tun) {
1280 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1095 1281
1096 /* Detach from net device */ 1282 rtnl_lock();
1097 file->private_data = NULL; 1283 __tun_detach(tun);
1098 tun->attached = 0;
1099 put_net(dev_net(tun->dev));
1100 1284
1101 /* Drop read queue */ 1285 /* If desireable, unregister the netdevice. */
1102 skb_queue_purge(&tun->readq); 1286 if (!(tun->flags & TUN_PERSIST)) {
1287 sock_put(tun->sk);
1288 unregister_netdevice(tun->dev);
1289 }
1103 1290
1104 if (!(tun->flags & TUN_PERSIST)) { 1291 rtnl_unlock();
1105 list_del(&tun->list);
1106 unregister_netdevice(tun->dev);
1107 } 1292 }
1108 1293
1109 rtnl_unlock(); 1294 put_net(tfile->net);
1295 kfree(tfile);
1110 1296
1111 return 0; 1297 return 0;
1112} 1298}
@@ -1187,7 +1373,7 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
1187static u32 tun_get_link(struct net_device *dev) 1373static u32 tun_get_link(struct net_device *dev)
1188{ 1374{
1189 struct tun_struct *tun = netdev_priv(dev); 1375 struct tun_struct *tun = netdev_priv(dev);
1190 return tun->attached; 1376 return !!tun->tfile;
1191} 1377}
1192 1378
1193static u32 tun_get_rx_csum(struct net_device *dev) 1379static u32 tun_get_rx_csum(struct net_device *dev)
@@ -1216,45 +1402,6 @@ static const struct ethtool_ops tun_ethtool_ops = {
1216 .set_rx_csum = tun_set_rx_csum 1402 .set_rx_csum = tun_set_rx_csum
1217}; 1403};
1218 1404
1219static int tun_init_net(struct net *net)
1220{
1221 struct tun_net *tn;
1222
1223 tn = kmalloc(sizeof(*tn), GFP_KERNEL);
1224 if (tn == NULL)
1225 return -ENOMEM;
1226
1227 INIT_LIST_HEAD(&tn->dev_list);
1228
1229 if (net_assign_generic(net, tun_net_id, tn)) {
1230 kfree(tn);
1231 return -ENOMEM;
1232 }
1233
1234 return 0;
1235}
1236
1237static void tun_exit_net(struct net *net)
1238{
1239 struct tun_net *tn;
1240 struct tun_struct *tun, *nxt;
1241
1242 tn = net_generic(net, tun_net_id);
1243
1244 rtnl_lock();
1245 list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) {
1246 DBG(KERN_INFO "%s cleaned up\n", tun->dev->name);
1247 unregister_netdevice(tun->dev);
1248 }
1249 rtnl_unlock();
1250
1251 kfree(tn);
1252}
1253
1254static struct pernet_operations tun_net_ops = {
1255 .init = tun_init_net,
1256 .exit = tun_exit_net,
1257};
1258 1405
1259static int __init tun_init(void) 1406static int __init tun_init(void)
1260{ 1407{
@@ -1263,10 +1410,10 @@ static int __init tun_init(void)
1263 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1410 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1264 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1411 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
1265 1412
1266 ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops); 1413 ret = rtnl_link_register(&tun_link_ops);
1267 if (ret) { 1414 if (ret) {
1268 printk(KERN_ERR "tun: Can't register pernet ops\n"); 1415 printk(KERN_ERR "tun: Can't register link_ops\n");
1269 goto err_pernet; 1416 goto err_linkops;
1270 } 1417 }
1271 1418
1272 ret = misc_register(&tun_miscdev); 1419 ret = misc_register(&tun_miscdev);
@@ -1274,18 +1421,17 @@ static int __init tun_init(void)
1274 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1421 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
1275 goto err_misc; 1422 goto err_misc;
1276 } 1423 }
1277 return 0; 1424 return 0;
1278
1279err_misc: 1425err_misc:
1280 unregister_pernet_gen_device(tun_net_id, &tun_net_ops); 1426 rtnl_link_unregister(&tun_link_ops);
1281err_pernet: 1427err_linkops:
1282 return ret; 1428 return ret;
1283} 1429}
1284 1430
1285static void tun_cleanup(void) 1431static void tun_cleanup(void)
1286{ 1432{
1287 misc_deregister(&tun_miscdev); 1433 misc_deregister(&tun_miscdev);
1288 unregister_pernet_gen_device(tun_net_id, &tun_net_ops); 1434 rtnl_link_unregister(&tun_link_ops);
1289} 1435}
1290 1436
1291module_init(tun_init); 1437module_init(tun_init);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 3af9a9516cc..a8e5651f316 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1783,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
1783 } 1783 }
1784 1784
1785 if (work_done < budget) { 1785 if (work_done < budget) {
1786 netif_rx_complete(napi); 1786 napi_complete(napi);
1787 iowrite32(TYPHOON_INTR_NONE, 1787 iowrite32(TYPHOON_INTR_NONE,
1788 tp->ioaddr + TYPHOON_REG_INTR_MASK); 1788 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1789 typhoon_post_pci_writes(tp->ioaddr); 1789 typhoon_post_pci_writes(tp->ioaddr);
@@ -1806,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance)
1806 1806
1807 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); 1807 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1808 1808
1809 if (netif_rx_schedule_prep(&tp->napi)) { 1809 if (napi_schedule_prep(&tp->napi)) {
1810 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 1810 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1811 typhoon_post_pci_writes(ioaddr); 1811 typhoon_post_pci_writes(ioaddr);
1812 __netif_rx_schedule(&tp->napi); 1812 __napi_schedule(&tp->napi);
1813 } else { 1813 } else {
1814 printk(KERN_ERR "%s: Error, poll already scheduled\n", 1814 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1815 dev->name); 1815 dev->name);
@@ -1944,7 +1944,7 @@ typhoon_start_runtime(struct typhoon *tp)
1944 goto error_out; 1944 goto error_out;
1945 1945
1946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); 1946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1947 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q); 1947 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1948 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1948 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1949 if(err < 0) 1949 if(err < 0)
1950 goto error_out; 1950 goto error_out;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index dd7022ca735..673fd512591 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -174,18 +174,18 @@ struct tx_desc {
174 u64 tx_addr; /* opaque for hardware, for TX_DESC */ 174 u64 tx_addr; /* opaque for hardware, for TX_DESC */
175 }; 175 };
176 __le32 processFlags; 176 __le32 processFlags;
177#define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) 177#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001)
178#define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) 178#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002)
179#define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) 179#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004)
180#define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008) 180#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008)
181#define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010) 181#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010)
182#define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020) 182#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020)
183#define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040) 183#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040)
184#define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080) 184#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080)
185#define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100) 185#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100)
186#define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00) 186#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00)
187#define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000) 187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
188#define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000) 188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
190} __attribute__ ((packed)); 190} __attribute__ ((packed));
191 191
@@ -203,8 +203,8 @@ struct tcpopt_desc {
203 u8 flags; 203 u8 flags;
204 u8 numDesc; 204 u8 numDesc;
205 __le16 mss_flags; 205 __le16 mss_flags;
206#define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) 206#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000)
207#define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) 207#define TYPHOON_TSO_LAST cpu_to_le16(0x2000)
208 __le32 respAddrLo; 208 __le32 respAddrLo;
209 __le32 bytesTx; 209 __le32 bytesTx;
210 __le32 status; 210 __le32 status;
@@ -222,8 +222,8 @@ struct ipsec_desc {
222 u8 flags; 222 u8 flags;
223 u8 numDesc; 223 u8 numDesc;
224 __le16 ipsecFlags; 224 __le16 ipsecFlags;
225#define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) 225#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000)
226#define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) 226#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001)
227 __le32 sa1; 227 __le32 sa1;
228 __le32 sa2; 228 __le32 sa2;
229 __le32 reserved; 229 __le32 reserved;
@@ -248,41 +248,41 @@ struct rx_desc {
248 u32 addr; /* opaque, comes from virtAddr */ 248 u32 addr; /* opaque, comes from virtAddr */
249 u32 addrHi; /* opaque, comes from virtAddrHi */ 249 u32 addrHi; /* opaque, comes from virtAddrHi */
250 __le32 rxStatus; 250 __le32 rxStatus;
251#define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) 251#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000)
252#define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) 252#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001)
253#define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) 253#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002)
254#define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003) 254#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003)
255#define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004) 255#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004)
256#define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005) 256#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005)
257#define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006) 257#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006)
258#define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007) 258#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007)
259#define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003) 259#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003)
260#define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000) 260#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000)
261#define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001) 261#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001)
262#define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002) 262#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002)
263#define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004) 263#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004)
264#define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008) 264#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008)
265#define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010) 265#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010)
266#define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020) 266#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020)
267#define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040) 267#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040)
268#define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080) 268#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080)
269#define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) 269#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100)
270#define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) 270#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200)
271#define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) 271#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400)
272 __le16 filterResults; 272 __le16 filterResults;
273#define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) 273#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff)
274#define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) 274#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000)
275 __le16 ipsecResults; 275 __le16 ipsecResults;
276#define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) 276#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001)
277#define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) 277#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002)
278#define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) 278#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004)
279#define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008) 279#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008)
280#define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010) 280#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010)
281#define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020) 281#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020)
282#define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040) 282#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040)
283#define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) 283#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080)
284#define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) 284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
285#define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) 285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
286 __be32 vlanTag; 286 __be32 vlanTag;
287} __attribute__ ((packed)); 287} __attribute__ ((packed));
288 288
@@ -318,31 +318,31 @@ struct cmd_desc {
318 u8 flags; 318 u8 flags;
319 u8 numDesc; 319 u8 numDesc;
320 __le16 cmd; 320 __le16 cmd;
321#define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) 321#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001)
322#define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) 322#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002)
323#define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) 323#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003)
324#define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004) 324#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004)
325#define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005) 325#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005)
326#define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007) 326#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007)
327#define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013) 327#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013)
328#define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a) 328#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a)
329#define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b) 329#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b)
330#define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023) 330#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023)
331#define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025) 331#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025)
332#define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026) 332#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026)
333#define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027) 333#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027)
334#define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b) 334#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b)
335#define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034) 335#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034)
336#define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035) 336#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035)
337#define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043) 337#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043)
338#define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045) 338#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045)
339#define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049) 339#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049)
340#define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f) 340#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f)
341#define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057) 341#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057)
342#define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d) 342#define TYPHOON_CMD_HALT cpu_to_le16(0x005d)
343#define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e) 343#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e)
344#define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) 344#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067)
345#define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) 345#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069)
346 u16 seqNo; 346 u16 seqNo;
347 __le16 parm1; 347 __le16 parm1;
348 __le32 parm2; 348 __le32 parm2;
@@ -380,11 +380,11 @@ struct resp_desc {
380 380
381/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) 381/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
382 */ 382 */
383#define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001) 383#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001)
384#define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002) 384#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002)
385#define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004) 385#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004)
386#define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008) 386#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008)
387#define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010) 387#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010)
388 388
389/* TYPHOON_CMD_READ_STATS response format 389/* TYPHOON_CMD_READ_STATS response format
390 */ 390 */
@@ -416,40 +416,40 @@ struct stats_resp {
416 __le32 rxOverflow; 416 __le32 rxOverflow;
417 __le32 rxFiltered; 417 __le32 rxFiltered;
418 __le32 linkStatus; 418 __le32 linkStatus;
419#define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) 419#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001)
420#define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) 420#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001)
421#define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) 421#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000)
422#define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002) 422#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002)
423#define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002) 423#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002)
424#define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000) 424#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000)
425#define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) 425#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004)
426#define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) 426#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004)
427#define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) 427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
428 __le32 unused2; 428 __le32 unused2;
429 __le32 unused3; 429 __le32 unused3;
430} __attribute__ ((packed)); 430} __attribute__ ((packed));
431 431
432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) 432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
433 */ 433 */
434#define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000) 434#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000)
435#define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001) 435#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001)
436#define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002) 436#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002)
437#define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003) 437#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003)
438#define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004) 438#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004)
439 439
440/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) 440/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1)
441 */ 441 */
442#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004) 442#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004)
443#define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010) 443#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010)
444#define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020) 444#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020)
445#define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400) 445#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400)
446#define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800) 446#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800)
447 447
448/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) 448/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1)
449 */ 449 */
450#define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000) 450#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000)
451#define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001) 451#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001)
452#define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002) 452#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002)
453 453
454/* TYPHOON_CMD_CREATE_SA descriptor and settings 454/* TYPHOON_CMD_CREATE_SA descriptor and settings
455 */ 455 */
@@ -459,9 +459,9 @@ struct sa_descriptor {
459 u16 cmd; 459 u16 cmd;
460 u16 seqNo; 460 u16 seqNo;
461 u16 mode; 461 u16 mode;
462#define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000) 462#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000)
463#define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001) 463#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001)
464#define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002) 464#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002)
465 u8 hashFlags; 465 u8 hashFlags;
466#define TYPHOON_SA_HASH_ENABLE 0x01 466#define TYPHOON_SA_HASH_ENABLE 0x01
467#define TYPHOON_SA_HASH_SHA1 0x02 467#define TYPHOON_SA_HASH_SHA1 0x02
@@ -493,22 +493,22 @@ struct sa_descriptor {
493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) 493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
494 * This is all for IPv4. 494 * This is all for IPv4.
495 */ 495 */
496#define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002) 496#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002)
497#define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004) 497#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004)
498#define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008) 498#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008)
499#define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010) 499#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010)
500#define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020) 500#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020)
501#define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040) 501#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040)
502#define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080) 502#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080)
503#define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100) 503#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100)
504#define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200) 504#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200)
505 505
506/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) 506/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1)
507 */ 507 */
508#define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01) 508#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01)
509#define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02) 509#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02)
510#define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04) 510#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04)
511#define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08) 511#define TYPHOON_WAKE_ARP cpu_to_le16(0x08)
512 512
513/* These are used to load the firmware image on the NIC 513/* These are used to load the firmware image on the NIC
514 */ 514 */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e87986867ba..1c095c63f98 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -39,7 +39,7 @@
39#include <asm/ucc_fast.h> 39#include <asm/ucc_fast.h>
40 40
41#include "ucc_geth.h" 41#include "ucc_geth.h"
42#include "ucc_geth_mii.h" 42#include "fsl_pq_mdio.h"
43 43
44#undef DEBUG 44#undef DEBUG
45 45
@@ -1557,7 +1557,7 @@ static int init_phy(struct net_device *dev)
1557 of_node_put(phy); 1557 of_node_put(phy);
1558 of_node_put(mdio); 1558 of_node_put(mdio);
1559 1559
1560 uec_mdio_bus_name(bus_name, mdio); 1560 fsl_pq_mdio_bus_name(bus_name, mdio);
1561 snprintf(phy_id, sizeof(phy_id), "%s:%02x", 1561 snprintf(phy_id, sizeof(phy_id), "%s:%02x",
1562 bus_name, *id); 1562 bus_name, *id);
1563 1563
@@ -3266,7 +3266,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
3266 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3266 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3267 3267
3268 if (howmany < budget) { 3268 if (howmany < budget) {
3269 netif_rx_complete(napi); 3269 napi_complete(napi);
3270 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); 3270 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
3271 } 3271 }
3272 3272
@@ -3297,10 +3297,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3297 3297
3298 /* check for receive events that require processing */ 3298 /* check for receive events that require processing */
3299 if (ucce & UCCE_RX_EVENTS) { 3299 if (ucce & UCCE_RX_EVENTS) {
3300 if (netif_rx_schedule_prep(&ugeth->napi)) { 3300 if (napi_schedule_prep(&ugeth->napi)) {
3301 uccm &= ~UCCE_RX_EVENTS; 3301 uccm &= ~UCCE_RX_EVENTS;
3302 out_be32(uccf->p_uccm, uccm); 3302 out_be32(uccf->p_uccm, uccm);
3303 __netif_rx_schedule(&ugeth->napi); 3303 __napi_schedule(&ugeth->napi);
3304 } 3304 }
3305 } 3305 }
3306 3306
@@ -3657,7 +3657,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3657 if (err) 3657 if (err)
3658 return -1; 3658 return -1;
3659 3659
3660 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start); 3660 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x",
3661 res.start&0xfffff);
3661 } 3662 }
3662 3663
3663 /* get the phy interface type, or default to MII */ 3664 /* get the phy interface type, or default to MII */
@@ -3803,11 +3804,6 @@ static int __init ucc_geth_init(void)
3803{ 3804{
3804 int i, ret; 3805 int i, ret;
3805 3806
3806 ret = uec_mdio_init();
3807
3808 if (ret)
3809 return ret;
3810
3811 if (netif_msg_drv(&debug)) 3807 if (netif_msg_drv(&debug))
3812 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 3808 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
3813 for (i = 0; i < 8; i++) 3809 for (i = 0; i < 8; i++)
@@ -3816,16 +3812,12 @@ static int __init ucc_geth_init(void)
3816 3812
3817 ret = of_register_platform_driver(&ucc_geth_driver); 3813 ret = of_register_platform_driver(&ucc_geth_driver);
3818 3814
3819 if (ret)
3820 uec_mdio_exit();
3821
3822 return ret; 3815 return ret;
3823} 3816}
3824 3817
3825static void __exit ucc_geth_exit(void) 3818static void __exit ucc_geth_exit(void)
3826{ 3819{
3827 of_unregister_platform_driver(&ucc_geth_driver); 3820 of_unregister_platform_driver(&ucc_geth_driver);
3828 uec_mdio_exit();
3829} 3821}
3830 3822
3831module_init(ucc_geth_init); 3823module_init(ucc_geth_init);
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 16cbe42ba43..66d18971fa0 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -28,8 +28,6 @@
28#include <asm/ucc.h> 28#include <asm/ucc.h>
29#include <asm/ucc_fast.h> 29#include <asm/ucc_fast.h>
30 30
31#include "ucc_geth_mii.h"
32
33#define DRV_DESC "QE UCC Gigabit Ethernet Controller" 31#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
34#define DRV_NAME "ucc_geth" 32#define DRV_NAME "ucc_geth"
35#define DRV_VERSION "1.1" 33#define DRV_VERSION "1.1"
@@ -184,6 +182,18 @@ struct ucc_geth {
184#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY) 182#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
185#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE) 183#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
186 184
185/* TBI defines */
186#define ENET_TBI_MII_CR 0x00 /* Control */
187#define ENET_TBI_MII_SR 0x01 /* Status */
188#define ENET_TBI_MII_ANA 0x04 /* AN advertisement */
189#define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */
190#define ENET_TBI_MII_ANEX 0x06 /* AN expansion */
191#define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */
192#define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */
193#define ENET_TBI_MII_EXST 0x0F /* Extended status */
194#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
195#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
196
187/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ 197/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
188#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control 198#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
189 Rx */ 199 Rx */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 68a7f541413..a755bea559b 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -39,7 +39,6 @@
39#include <asm/types.h> 39#include <asm/types.h>
40 40
41#include "ucc_geth.h" 41#include "ucc_geth.h"
42#include "ucc_geth_mii.h"
43 42
44static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { 43static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
45 "tx-64-frames", 44 "tx-64-frames",
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
deleted file mode 100644
index 54635911305..00000000000
--- a/drivers/net/ucc_geth_mii.c
+++ /dev/null
@@ -1,295 +0,0 @@
1/*
2 * drivers/net/ucc_geth_mii.c
3 *
4 * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation
5 * Provides Bus interface for MII Management regs in the UCC register space
6 *
7 * Copyright (C) 2007 Freescale Semiconductor, Inc.
8 *
9 * Authors: Li Yang <leoli@freescale.com>
10 * Kim Phillips <kim.phillips@freescale.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/unistd.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/skbuff.h>
31#include <linux/spinlock.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34#include <linux/platform_device.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
37#include <linux/phy.h>
38#include <linux/fsl_devices.h>
39#include <linux/of_platform.h>
40
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44#include <asm/ucc.h>
45
46#include "ucc_geth_mii.h"
47#include "ucc_geth.h"
48
49#define DEBUG
50#ifdef DEBUG
51#define vdbg(format, arg...) printk(KERN_DEBUG , format "\n" , ## arg)
52#else
53#define vdbg(format, arg...) do {} while(0)
54#endif
55
56#define MII_DRV_DESC "QE UCC Ethernet Controller MII Bus"
57#define MII_DRV_NAME "fsl-uec_mdio"
58
59/* Write value to the PHY for this device to the register at regnum, */
60/* waiting until the write is done before it returns. All PHY */
61/* configuration has to be done through the master UEC MIIM regs */
62int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
63{
64 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
65
66 /* Setting up the MII Mangement Address Register */
67 out_be32(&regs->miimadd,
68 (mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | regnum);
69
70 /* Setting up the MII Mangement Control Register with the value */
71 out_be32(&regs->miimcon, value);
72
73 /* Wait till MII management write is complete */
74 while ((in_be32(&regs->miimind)) & MIIMIND_BUSY)
75 cpu_relax();
76
77 return 0;
78}
79
80/* Reads from register regnum in the PHY for device dev, */
81/* returning the value. Clears miimcom first. All PHY */
82/* configuration has to be done through the TSEC1 MIIM regs */
83int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
84{
85 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
86 u16 value;
87
88 /* Setting up the MII Mangement Address Register */
89 out_be32(&regs->miimadd,
90 (mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | regnum);
91
92 /* Clear miimcom, perform an MII management read cycle */
93 out_be32(&regs->miimcom, 0);
94 out_be32(&regs->miimcom, MIIMCOM_READ_CYCLE);
95
96 /* Wait till MII management write is complete */
97 while ((in_be32(&regs->miimind)) & (MIIMIND_BUSY | MIIMIND_NOT_VALID))
98 cpu_relax();
99
100 /* Read MII management status */
101 value = in_be32(&regs->miimstat);
102
103 return value;
104}
105
106/* Reset the MIIM registers, and wait for the bus to free */
107static int uec_mdio_reset(struct mii_bus *bus)
108{
109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
110 unsigned int timeout = PHY_INIT_TIMEOUT;
111
112 mutex_lock(&bus->mdio_lock);
113
114 /* Reset the management interface */
115 out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT);
116
117 /* Setup the MII Mgmt clock speed */
118 out_be32(&regs->miimcfg, MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112);
119
120 /* Wait until the bus is free */
121 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
122 cpu_relax();
123
124 mutex_unlock(&bus->mdio_lock);
125
126 if (timeout <= 0) {
127 printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
128 return -EBUSY;
129 }
130
131 return 0;
132}
133
134static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *match)
135{
136 struct device *device = &ofdev->dev;
137 struct device_node *np = ofdev->node, *tempnp = NULL;
138 struct device_node *child = NULL;
139 struct ucc_mii_mng __iomem *regs;
140 struct mii_bus *new_bus;
141 struct resource res;
142 int k, err = 0;
143
144 new_bus = mdiobus_alloc();
145 if (NULL == new_bus)
146 return -ENOMEM;
147
148 new_bus->name = "UCC Ethernet Controller MII Bus";
149 new_bus->read = &uec_mdio_read;
150 new_bus->write = &uec_mdio_write;
151 new_bus->reset = &uec_mdio_reset;
152
153 memset(&res, 0, sizeof(res));
154
155 err = of_address_to_resource(np, 0, &res);
156 if (err)
157 goto reg_map_fail;
158
159 uec_mdio_bus_name(new_bus->id, np);
160
161 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
162
163 if (NULL == new_bus->irq) {
164 err = -ENOMEM;
165 goto reg_map_fail;
166 }
167
168 for (k = 0; k < 32; k++)
169 new_bus->irq[k] = PHY_POLL;
170
171 while ((child = of_get_next_child(np, child)) != NULL) {
172 int irq = irq_of_parse_and_map(child, 0);
173 if (irq != NO_IRQ) {
174 const u32 *id = of_get_property(child, "reg", NULL);
175 new_bus->irq[*id] = irq;
176 }
177 }
178
179 /* Set the base address */
180 regs = ioremap(res.start, sizeof(struct ucc_mii_mng));
181
182 if (NULL == regs) {
183 err = -ENOMEM;
184 goto ioremap_fail;
185 }
186
187 new_bus->priv = (void __force *)regs;
188
189 new_bus->parent = device;
190 dev_set_drvdata(device, new_bus);
191
192 /* Read MII management master from device tree */
193 while ((tempnp = of_find_compatible_node(tempnp, "network", "ucc_geth"))
194 != NULL) {
195 struct resource tempres;
196
197 err = of_address_to_resource(tempnp, 0, &tempres);
198 if (err)
199 goto bus_register_fail;
200
201 /* if our mdio regs fall within this UCC regs range */
202 if ((res.start >= tempres.start) &&
203 (res.end <= tempres.end)) {
204 /* set this UCC to be the MII master */
205 const u32 *id;
206
207 id = of_get_property(tempnp, "cell-index", NULL);
208 if (!id) {
209 id = of_get_property(tempnp, "device-id", NULL);
210 if (!id)
211 goto bus_register_fail;
212 }
213
214 ucc_set_qe_mux_mii_mng(*id - 1);
215
216 /* assign the TBI an address which won't
217 * conflict with the PHYs */
218 out_be32(&regs->utbipar, UTBIPAR_INIT_TBIPA);
219 break;
220 }
221 }
222
223 err = mdiobus_register(new_bus);
224 if (0 != err) {
225 printk(KERN_ERR "%s: Cannot register as MDIO bus\n",
226 new_bus->name);
227 goto bus_register_fail;
228 }
229
230 return 0;
231
232bus_register_fail:
233 iounmap(regs);
234ioremap_fail:
235 kfree(new_bus->irq);
236reg_map_fail:
237 mdiobus_free(new_bus);
238
239 return err;
240}
241
242static int uec_mdio_remove(struct of_device *ofdev)
243{
244 struct device *device = &ofdev->dev;
245 struct mii_bus *bus = dev_get_drvdata(device);
246
247 mdiobus_unregister(bus);
248
249 dev_set_drvdata(device, NULL);
250
251 iounmap((void __iomem *)bus->priv);
252 bus->priv = NULL;
253 mdiobus_free(bus);
254
255 return 0;
256}
257
258static struct of_device_id uec_mdio_match[] = {
259 {
260 .type = "mdio",
261 .compatible = "ucc_geth_phy",
262 },
263 {
264 .compatible = "fsl,ucc-mdio",
265 },
266 {},
267};
268
269static struct of_platform_driver uec_mdio_driver = {
270 .name = MII_DRV_NAME,
271 .probe = uec_mdio_probe,
272 .remove = uec_mdio_remove,
273 .match_table = uec_mdio_match,
274};
275
276int __init uec_mdio_init(void)
277{
278 return of_register_platform_driver(&uec_mdio_driver);
279}
280
281/* called from __init ucc_geth_init, therefore can not be __exit */
282void uec_mdio_exit(void)
283{
284 of_unregister_platform_driver(&uec_mdio_driver);
285}
286
287void uec_mdio_bus_name(char *name, struct device_node *np)
288{
289 const u32 *reg;
290
291 reg = of_get_property(np, "reg", NULL);
292
293 snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
294}
295
diff --git a/drivers/net/ucc_geth_mii.h b/drivers/net/ucc_geth_mii.h
deleted file mode 100644
index 840cf80235b..00000000000
--- a/drivers/net/ucc_geth_mii.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * drivers/net/ucc_geth_mii.h
3 *
4 * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation
5 * Provides Bus interface for MII Management regs in the UCC register space
6 *
7 * Copyright (C) 2007 Freescale Semiconductor, Inc.
8 *
9 * Authors: Li Yang <leoli@freescale.com>
10 * Kim Phillips <kim.phillips@freescale.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#ifndef __UEC_MII_H
19#define __UEC_MII_H
20
21/* UCC GETH MIIMCFG (MII Management Configuration Register) */
22#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
23 management */
24#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
25 suppress */
26#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
27 << shift */
28#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* max clock divide */
29#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000
30#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001
31#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002
32#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003
33#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004
34#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005
35#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008
36#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006
37#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007
38#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009
39#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a
40#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b
41#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c
42#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d
43#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e
44#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f
45
46/* UCC GETH MIIMCOM (MII Management Command Register) */
47#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
48#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
49
50/* UCC GETH MIIMADD (MII Management Address Register) */
51#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
52 << shift */
53#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
54 << shift */
55
56/* UCC GETH MIIMCON (MII Management Control Register) */
57#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
58 << shift */
59#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
60 << shift */
61
62/* UCC GETH MIIMIND (MII Management Indicator Register) */
63#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
64#define MIIMIND_SCAN 0x00000002 /* Scan in
65 progress */
66#define MIIMIND_BUSY 0x00000001
67
68/* Initial TBI Physical Address */
69#define UTBIPAR_INIT_TBIPA 0x1f
70
71struct ucc_mii_mng {
72 u32 miimcfg; /* MII management configuration reg */
73 u32 miimcom; /* MII management command reg */
74 u32 miimadd; /* MII management address reg */
75 u32 miimcon; /* MII management control reg */
76 u32 miimstat; /* MII management status reg */
77 u32 miimind; /* MII management indication reg */
78 u8 notcare[28]; /* Space holder */
79 u32 utbipar; /* TBI phy address reg */
80} __attribute__ ((packed));
81
82/* TBI / MII Set Register */
83enum enet_tbi_mii_reg {
84 ENET_TBI_MII_CR = 0x00, /* Control */
85 ENET_TBI_MII_SR = 0x01, /* Status */
86 ENET_TBI_MII_ANA = 0x04, /* AN advertisement */
87 ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability */
88 ENET_TBI_MII_ANEX = 0x06, /* AN expansion */
89 ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit */
90 ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page */
91 ENET_TBI_MII_EXST = 0x0F, /* Extended status */
92 ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics */
93 ENET_TBI_MII_TBICON = 0x11 /* TBI control */
94};
95
96int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
97int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
98int __init uec_mdio_init(void);
99void uec_mdio_exit(void);
100void uec_mdio_bus_name(char *name, struct device_node *np);
101#endif /* __UEC_MII_H */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index fe98acaead9..e6d62fe405c 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -936,8 +936,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
936 if (!odev->rx_buf_missing) { 936 if (!odev->rx_buf_missing) {
937 /* Packet is complete. Inject into stack. */ 937 /* Packet is complete. Inject into stack. */
938 /* We have IP packet here */ 938 /* We have IP packet here */
939 odev->skb_rx_buf->protocol = 939 odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
940 __constant_htons(ETH_P_IP);
941 /* don't check it */ 940 /* don't check it */
942 odev->skb_rx_buf->ip_summed = 941 odev->skb_rx_buf->ip_summed =
943 CHECKSUM_UNNECESSARY; 942 CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index bcd858c567e..b7f763e1298 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -169,7 +169,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
169 struct rndis_keepalive_c *msg = (void *)buf; 169 struct rndis_keepalive_c *msg = (void *)buf;
170 170
171 msg->msg_type = RNDIS_MSG_KEEPALIVE_C; 171 msg->msg_type = RNDIS_MSG_KEEPALIVE_C;
172 msg->msg_len = ccpu2(sizeof *msg); 172 msg->msg_len = cpu_to_le32(sizeof *msg);
173 msg->status = RNDIS_STATUS_SUCCESS; 173 msg->status = RNDIS_STATUS_SUCCESS;
174 retval = usb_control_msg(dev->udev, 174 retval = usb_control_msg(dev->udev,
175 usb_sndctrlpipe(dev->udev, 0), 175 usb_sndctrlpipe(dev->udev, 0),
@@ -237,7 +237,7 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
237 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); 237 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
238 u.get->oid = oid; 238 u.get->oid = oid;
239 u.get->len = cpu_to_le32(in_len); 239 u.get->len = cpu_to_le32(in_len);
240 u.get->offset = ccpu2(20); 240 u.get->offset = cpu_to_le32(20);
241 241
242 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); 242 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
243 if (unlikely(retval < 0)) { 243 if (unlikely(retval < 0)) {
@@ -297,9 +297,9 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
297 goto fail; 297 goto fail;
298 298
299 u.init->msg_type = RNDIS_MSG_INIT; 299 u.init->msg_type = RNDIS_MSG_INIT;
300 u.init->msg_len = ccpu2(sizeof *u.init); 300 u.init->msg_len = cpu_to_le32(sizeof *u.init);
301 u.init->major_version = ccpu2(1); 301 u.init->major_version = cpu_to_le32(1);
302 u.init->minor_version = ccpu2(0); 302 u.init->minor_version = cpu_to_le32(0);
303 303
304 /* max transfer (in spec) is 0x4000 at full speed, but for 304 /* max transfer (in spec) is 0x4000 at full speed, but for
305 * TX we'll stick to one Ethernet packet plus RNDIS framing. 305 * TX we'll stick to one Ethernet packet plus RNDIS framing.
@@ -403,10 +403,10 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
403 /* set a nonzero filter to enable data transfers */ 403 /* set a nonzero filter to enable data transfers */
404 memset(u.set, 0, sizeof *u.set); 404 memset(u.set, 0, sizeof *u.set);
405 u.set->msg_type = RNDIS_MSG_SET; 405 u.set->msg_type = RNDIS_MSG_SET;
406 u.set->msg_len = ccpu2(4 + sizeof *u.set); 406 u.set->msg_len = cpu_to_le32(4 + sizeof *u.set);
407 u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; 407 u.set->oid = OID_GEN_CURRENT_PACKET_FILTER;
408 u.set->len = ccpu2(4); 408 u.set->len = cpu_to_le32(4);
409 u.set->offset = ccpu2((sizeof *u.set) - 8); 409 u.set->offset = cpu_to_le32((sizeof *u.set) - 8);
410 *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; 410 *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER;
411 411
412 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); 412 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
@@ -423,7 +423,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
423halt_fail_and_release: 423halt_fail_and_release:
424 memset(u.halt, 0, sizeof *u.halt); 424 memset(u.halt, 0, sizeof *u.halt);
425 u.halt->msg_type = RNDIS_MSG_HALT; 425 u.halt->msg_type = RNDIS_MSG_HALT;
426 u.halt->msg_len = ccpu2(sizeof *u.halt); 426 u.halt->msg_len = cpu_to_le32(sizeof *u.halt);
427 (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); 427 (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE);
428fail_and_release: 428fail_and_release:
429 usb_set_intfdata(info->data, NULL); 429 usb_set_intfdata(info->data, NULL);
@@ -448,7 +448,7 @@ void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
448 halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); 448 halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
449 if (halt) { 449 if (halt) {
450 halt->msg_type = RNDIS_MSG_HALT; 450 halt->msg_type = RNDIS_MSG_HALT;
451 halt->msg_len = ccpu2(sizeof *halt); 451 halt->msg_len = cpu_to_le32(sizeof *halt);
452 (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); 452 (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE);
453 kfree(halt); 453 kfree(halt);
454 } 454 }
@@ -543,7 +543,7 @@ fill:
543 memset(hdr, 0, sizeof *hdr); 543 memset(hdr, 0, sizeof *hdr);
544 hdr->msg_type = RNDIS_MSG_PACKET; 544 hdr->msg_type = RNDIS_MSG_PACKET;
545 hdr->msg_len = cpu_to_le32(skb->len); 545 hdr->msg_len = cpu_to_le32(skb->len);
546 hdr->data_offset = ccpu2(sizeof(*hdr) - 8); 546 hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
547 hdr->data_len = cpu_to_le32(len); 547 hdr->data_len = cpu_to_le32(len);
548 548
549 /* FIXME make the last packet always be short ... */ 549 /* FIXME make the last packet always be short ... */
@@ -562,9 +562,6 @@ static const struct driver_info rndis_info = {
562 .tx_fixup = rndis_tx_fixup, 562 .tx_fixup = rndis_tx_fixup,
563}; 563};
564 564
565#undef ccpu2
566
567
568/*-------------------------------------------------------------------------*/ 565/*-------------------------------------------------------------------------*/
569 566
570static const struct usb_device_id products [] = { 567static const struct usb_device_id products [] = {
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5574abe29c7..5b0b9647382 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -55,7 +55,6 @@ struct smsc95xx_priv {
55 55
56struct usb_context { 56struct usb_context {
57 struct usb_ctrlrequest req; 57 struct usb_ctrlrequest req;
58 struct completion notify;
59 struct usbnet *dev; 58 struct usbnet *dev;
60}; 59};
61 60
@@ -307,7 +306,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
307 return 0; 306 return 0;
308} 307}
309 308
310static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) 309static void smsc95xx_async_cmd_callback(struct urb *urb)
311{ 310{
312 struct usb_context *usb_context = urb->context; 311 struct usb_context *usb_context = urb->context;
313 struct usbnet *dev = usb_context->dev; 312 struct usbnet *dev = usb_context->dev;
@@ -316,8 +315,6 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs)
316 if (status < 0) 315 if (status < 0)
317 devwarn(dev, "async callback failed with %d", status); 316 devwarn(dev, "async callback failed with %d", status);
318 317
319 complete(&usb_context->notify);
320
321 kfree(usb_context); 318 kfree(usb_context);
322 usb_free_urb(urb); 319 usb_free_urb(urb);
323} 320}
@@ -348,11 +345,10 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
348 usb_context->req.wValue = 00; 345 usb_context->req.wValue = 00;
349 usb_context->req.wIndex = cpu_to_le16(index); 346 usb_context->req.wIndex = cpu_to_le16(index);
350 usb_context->req.wLength = cpu_to_le16(size); 347 usb_context->req.wLength = cpu_to_le16(size);
351 init_completion(&usb_context->notify);
352 348
353 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), 349 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
354 (void *)&usb_context->req, data, size, 350 (void *)&usb_context->req, data, size,
355 (usb_complete_t)smsc95xx_async_cmd_callback, 351 smsc95xx_async_cmd_callback,
356 (void *)usb_context); 352 (void *)usb_context);
357 353
358 status = usb_submit_urb(urb, GFP_ATOMIC); 354 status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 3b8e6325427..4671436ecf0 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
589 work_done = rhine_rx(dev, budget); 589 work_done = rhine_rx(dev, budget);
590 590
591 if (work_done < budget) { 591 if (work_done < budget) {
592 netif_rx_complete(napi); 592 napi_complete(napi);
593 593
594 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 594 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
595 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 595 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1319,7 +1319,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1319 IntrPCIErr | IntrStatsMax | IntrLinkChange, 1319 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1320 ioaddr + IntrEnable); 1320 ioaddr + IntrEnable);
1321 1321
1322 netif_rx_schedule(&rp->napi); 1322 napi_schedule(&rp->napi);
1323 } 1323 }
1324 1324
1325 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1325 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 29a33090d3d..ea43e1832af 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -183,7 +183,7 @@ struct rdesc1 {
183}; 183};
184 184
185enum { 185enum {
186 RX_INTEN = __constant_cpu_to_le16(0x8000) 186 RX_INTEN = cpu_to_le16(0x8000)
187}; 187};
188 188
189struct rx_desc { 189struct rx_desc {
@@ -210,7 +210,7 @@ struct tdesc1 {
210} __attribute__ ((__packed__)); 210} __attribute__ ((__packed__));
211 211
212enum { 212enum {
213 TD_QUEUE = __constant_cpu_to_le16(0x8000) 213 TD_QUEUE = cpu_to_le16(0x8000)
214}; 214};
215 215
216struct td_buf { 216struct td_buf {
@@ -242,7 +242,7 @@ struct velocity_td_info {
242 242
243enum velocity_owner { 243enum velocity_owner {
244 OWNED_BY_HOST = 0, 244 OWNED_BY_HOST = 0,
245 OWNED_BY_NIC = __constant_cpu_to_le16(0x8000) 245 OWNED_BY_NIC = cpu_to_le16(0x8000)
246}; 246};
247 247
248 248
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c68808336c8..3d003392022 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -37,12 +37,15 @@ module_param(gso, bool, 0444);
37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38#define GOOD_COPY_LEN 128 38#define GOOD_COPY_LEN 128
39 39
40#define VIRTNET_SEND_COMMAND_SG_MAX 2
41
40struct virtnet_info 42struct virtnet_info
41{ 43{
42 struct virtio_device *vdev; 44 struct virtio_device *vdev;
43 struct virtqueue *rvq, *svq; 45 struct virtqueue *rvq, *svq, *cvq;
44 struct net_device *dev; 46 struct net_device *dev;
45 struct napi_struct napi; 47 struct napi_struct napi;
48 unsigned int status;
46 49
47 /* The skb we couldn't send because buffers were full. */ 50 /* The skb we couldn't send because buffers were full. */
48 struct sk_buff *last_xmit_skb; 51 struct sk_buff *last_xmit_skb;
@@ -375,9 +378,9 @@ static void skb_recv_done(struct virtqueue *rvq)
375{ 378{
376 struct virtnet_info *vi = rvq->vdev->priv; 379 struct virtnet_info *vi = rvq->vdev->priv;
377 /* Schedule NAPI, Suppress further interrupts if successful. */ 380 /* Schedule NAPI, Suppress further interrupts if successful. */
378 if (netif_rx_schedule_prep(&vi->napi)) { 381 if (napi_schedule_prep(&vi->napi)) {
379 rvq->vq_ops->disable_cb(rvq); 382 rvq->vq_ops->disable_cb(rvq);
380 __netif_rx_schedule(&vi->napi); 383 __napi_schedule(&vi->napi);
381 } 384 }
382} 385}
383 386
@@ -403,11 +406,11 @@ again:
403 406
404 /* Out of packets? */ 407 /* Out of packets? */
405 if (received < budget) { 408 if (received < budget) {
406 netif_rx_complete(napi); 409 napi_complete(napi);
407 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 410 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
408 && napi_schedule_prep(napi)) { 411 && napi_schedule_prep(napi)) {
409 vi->rvq->vq_ops->disable_cb(vi->rvq); 412 vi->rvq->vq_ops->disable_cb(vi->rvq);
410 __netif_rx_schedule(napi); 413 __napi_schedule(napi);
411 goto again; 414 goto again;
412 } 415 }
413 } 416 }
@@ -562,6 +565,22 @@ stop_queue:
562 goto done; 565 goto done;
563} 566}
564 567
568static int virtnet_set_mac_address(struct net_device *dev, void *p)
569{
570 struct virtnet_info *vi = netdev_priv(dev);
571 struct virtio_device *vdev = vi->vdev;
572 int ret;
573
574 ret = eth_mac_addr(dev, p);
575 if (ret)
576 return ret;
577
578 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
579 dev->dev_addr, dev->addr_len);
580
581 return 0;
582}
583
565#ifdef CONFIG_NET_POLL_CONTROLLER 584#ifdef CONFIG_NET_POLL_CONTROLLER
566static void virtnet_netpoll(struct net_device *dev) 585static void virtnet_netpoll(struct net_device *dev)
567{ 586{
@@ -581,13 +600,60 @@ static int virtnet_open(struct net_device *dev)
581 * won't get another interrupt, so process any outstanding packets 600 * won't get another interrupt, so process any outstanding packets
582 * now. virtnet_poll wants re-enable the queue, so we disable here. 601 * now. virtnet_poll wants re-enable the queue, so we disable here.
583 * We synchronize against interrupts via NAPI_STATE_SCHED */ 602 * We synchronize against interrupts via NAPI_STATE_SCHED */
584 if (netif_rx_schedule_prep(&vi->napi)) { 603 if (napi_schedule_prep(&vi->napi)) {
585 vi->rvq->vq_ops->disable_cb(vi->rvq); 604 vi->rvq->vq_ops->disable_cb(vi->rvq);
586 __netif_rx_schedule(&vi->napi); 605 __napi_schedule(&vi->napi);
587 } 606 }
588 return 0; 607 return 0;
589} 608}
590 609
610/*
611 * Send command via the control virtqueue and check status. Commands
612 * supported by the hypervisor, as indicated by feature bits, should
613 * never fail unless improperly formated.
614 */
615static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
616 struct scatterlist *data, int out, int in)
617{
618 struct scatterlist sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
619 struct virtio_net_ctrl_hdr ctrl;
620 virtio_net_ctrl_ack status = ~0;
621 unsigned int tmp;
622
623 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
624 BUG(); /* Caller should know better */
625 return false;
626 }
627
628 BUG_ON(out + in > VIRTNET_SEND_COMMAND_SG_MAX);
629
630 out++; /* Add header */
631 in++; /* Add return status */
632
633 ctrl.class = class;
634 ctrl.cmd = cmd;
635
636 sg_init_table(sg, out + in);
637
638 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
639 memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2));
640 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
641
642 if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0)
643 BUG();
644
645 vi->cvq->vq_ops->kick(vi->cvq);
646
647 /*
648 * Spin for a response, the kick causes an ioport write, trapping
649 * into the hypervisor, so the request should be handled immediately.
650 */
651 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
652 cpu_relax();
653
654 return status == VIRTIO_NET_OK;
655}
656
591static int virtnet_close(struct net_device *dev) 657static int virtnet_close(struct net_device *dev)
592{ 658{
593 struct virtnet_info *vi = netdev_priv(dev); 659 struct virtnet_info *vi = netdev_priv(dev);
@@ -608,10 +674,104 @@ static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
608 return ethtool_op_set_tx_hw_csum(dev, data); 674 return ethtool_op_set_tx_hw_csum(dev, data);
609} 675}
610 676
677static void virtnet_set_rx_mode(struct net_device *dev)
678{
679 struct virtnet_info *vi = netdev_priv(dev);
680 struct scatterlist sg[2];
681 u8 promisc, allmulti;
682 struct virtio_net_ctrl_mac *mac_data;
683 struct dev_addr_list *addr;
684 void *buf;
685 int i;
686
687 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
688 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
689 return;
690
691 promisc = ((dev->flags & IFF_PROMISC) != 0);
692 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
693
694 sg_set_buf(sg, &promisc, sizeof(promisc));
695
696 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
697 VIRTIO_NET_CTRL_RX_PROMISC,
698 sg, 1, 0))
699 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
700 promisc ? "en" : "dis");
701
702 sg_set_buf(sg, &allmulti, sizeof(allmulti));
703
704 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
705 VIRTIO_NET_CTRL_RX_ALLMULTI,
706 sg, 1, 0))
707 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
708 allmulti ? "en" : "dis");
709
710 /* MAC filter - use one buffer for both lists */
711 mac_data = buf = kzalloc(((dev->uc_count + dev->mc_count) * ETH_ALEN) +
712 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
713 if (!buf) {
714 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
715 return;
716 }
717
718 /* Store the unicast list and count in the front of the buffer */
719 mac_data->entries = dev->uc_count;
720 addr = dev->uc_list;
721 for (i = 0; i < dev->uc_count; i++, addr = addr->next)
722 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
723
724 sg_set_buf(&sg[0], mac_data,
725 sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN));
726
727 /* multicast list and count fill the end */
728 mac_data = (void *)&mac_data->macs[dev->uc_count][0];
729
730 mac_data->entries = dev->mc_count;
731 addr = dev->mc_list;
732 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
733 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
734
735 sg_set_buf(&sg[1], mac_data,
736 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
737
738 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
739 VIRTIO_NET_CTRL_MAC_TABLE_SET,
740 sg, 2, 0))
741 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
742
743 kfree(buf);
744}
745
746static void virnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
747{
748 struct virtnet_info *vi = netdev_priv(dev);
749 struct scatterlist sg;
750
751 sg_set_buf(&sg, &vid, sizeof(vid));
752
753 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
754 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
755 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
756}
757
758static void virnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
759{
760 struct virtnet_info *vi = netdev_priv(dev);
761 struct scatterlist sg;
762
763 sg_set_buf(&sg, &vid, sizeof(vid));
764
765 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
766 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
767 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
768}
769
611static struct ethtool_ops virtnet_ethtool_ops = { 770static struct ethtool_ops virtnet_ethtool_ops = {
612 .set_tx_csum = virtnet_set_tx_csum, 771 .set_tx_csum = virtnet_set_tx_csum,
613 .set_sg = ethtool_op_set_sg, 772 .set_sg = ethtool_op_set_sg,
614 .set_tso = ethtool_op_set_tso, 773 .set_tso = ethtool_op_set_tso,
774 .get_link = ethtool_op_get_link,
615}; 775};
616 776
617#define MIN_MTU 68 777#define MIN_MTU 68
@@ -630,13 +790,51 @@ static const struct net_device_ops virtnet_netdev = {
630 .ndo_stop = virtnet_close, 790 .ndo_stop = virtnet_close,
631 .ndo_start_xmit = start_xmit, 791 .ndo_start_xmit = start_xmit,
632 .ndo_validate_addr = eth_validate_addr, 792 .ndo_validate_addr = eth_validate_addr,
633 .ndo_set_mac_address = eth_mac_addr, 793 .ndo_set_mac_address = virtnet_set_mac_address,
794 .ndo_set_rx_mode = virtnet_set_rx_mode,
634 .ndo_change_mtu = virtnet_change_mtu, 795 .ndo_change_mtu = virtnet_change_mtu,
796 .ndo_vlan_rx_add_vid = virnet_vlan_rx_add_vid,
797 .ndo_vlan_rx_kill_vid = virnet_vlan_rx_kill_vid,
635#ifdef CONFIG_NET_POLL_CONTROLLER 798#ifdef CONFIG_NET_POLL_CONTROLLER
636 .ndo_poll_controller = virtnet_netpoll, 799 .ndo_poll_controller = virtnet_netpoll,
637#endif 800#endif
638}; 801};
639 802
803static void virtnet_update_status(struct virtnet_info *vi)
804{
805 u16 v;
806
807 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
808 return;
809
810 vi->vdev->config->get(vi->vdev,
811 offsetof(struct virtio_net_config, status),
812 &v, sizeof(v));
813
814 /* Ignore unknown (future) status bits */
815 v &= VIRTIO_NET_S_LINK_UP;
816
817 if (vi->status == v)
818 return;
819
820 vi->status = v;
821
822 if (vi->status & VIRTIO_NET_S_LINK_UP) {
823 netif_carrier_on(vi->dev);
824 netif_wake_queue(vi->dev);
825 } else {
826 netif_carrier_off(vi->dev);
827 netif_stop_queue(vi->dev);
828 }
829}
830
831static void virtnet_config_changed(struct virtio_device *vdev)
832{
833 struct virtnet_info *vi = vdev->priv;
834
835 virtnet_update_status(vi);
836}
837
640static int virtnet_probe(struct virtio_device *vdev) 838static int virtnet_probe(struct virtio_device *vdev)
641{ 839{
642 int err; 840 int err;
@@ -678,8 +876,11 @@ static int virtnet_probe(struct virtio_device *vdev)
678 vdev->config->get(vdev, 876 vdev->config->get(vdev,
679 offsetof(struct virtio_net_config, mac), 877 offsetof(struct virtio_net_config, mac),
680 dev->dev_addr, dev->addr_len); 878 dev->dev_addr, dev->addr_len);
681 } else 879 } else {
682 random_ether_addr(dev->dev_addr); 880 random_ether_addr(dev->dev_addr);
881 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
882 dev->dev_addr, dev->addr_len);
883 }
683 884
684 /* Set up our device-specific information */ 885 /* Set up our device-specific information */
685 vi = netdev_priv(dev); 886 vi = netdev_priv(dev);
@@ -715,6 +916,17 @@ static int virtnet_probe(struct virtio_device *vdev)
715 goto free_recv; 916 goto free_recv;
716 } 917 }
717 918
919 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
920 vi->cvq = vdev->config->find_vq(vdev, 2, NULL);
921 if (IS_ERR(vi->cvq)) {
922 err = PTR_ERR(vi->svq);
923 goto free_send;
924 }
925
926 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
927 dev->features |= NETIF_F_HW_VLAN_FILTER;
928 }
929
718 /* Initialize our empty receive and send queues. */ 930 /* Initialize our empty receive and send queues. */
719 skb_queue_head_init(&vi->recv); 931 skb_queue_head_init(&vi->recv);
720 skb_queue_head_init(&vi->send); 932 skb_queue_head_init(&vi->send);
@@ -727,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev)
727 err = register_netdev(dev); 939 err = register_netdev(dev);
728 if (err) { 940 if (err) {
729 pr_debug("virtio_net: registering device failed\n"); 941 pr_debug("virtio_net: registering device failed\n");
730 goto free_send; 942 goto free_ctrl;
731 } 943 }
732 944
733 /* Last of all, set up some receive buffers. */ 945 /* Last of all, set up some receive buffers. */
@@ -739,11 +951,17 @@ static int virtnet_probe(struct virtio_device *vdev)
739 goto unregister; 951 goto unregister;
740 } 952 }
741 953
954 vi->status = VIRTIO_NET_S_LINK_UP;
955 virtnet_update_status(vi);
956
742 pr_debug("virtnet: registered device %s\n", dev->name); 957 pr_debug("virtnet: registered device %s\n", dev->name);
743 return 0; 958 return 0;
744 959
745unregister: 960unregister:
746 unregister_netdev(dev); 961 unregister_netdev(dev);
962free_ctrl:
963 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
964 vdev->config->del_vq(vi->cvq);
747free_send: 965free_send:
748 vdev->config->del_vq(vi->svq); 966 vdev->config->del_vq(vi->svq);
749free_recv: 967free_recv:
@@ -775,6 +993,8 @@ static void virtnet_remove(struct virtio_device *vdev)
775 993
776 vdev->config->del_vq(vi->svq); 994 vdev->config->del_vq(vi->svq);
777 vdev->config->del_vq(vi->rvq); 995 vdev->config->del_vq(vi->rvq);
996 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
997 vdev->config->del_vq(vi->cvq);
778 unregister_netdev(vi->dev); 998 unregister_netdev(vi->dev);
779 999
780 while (vi->pages) 1000 while (vi->pages)
@@ -794,7 +1014,8 @@ static unsigned int features[] = {
794 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 1014 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
795 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1015 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
796 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 1016 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
797 VIRTIO_NET_F_MRG_RXBUF, 1017 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1018 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
798 VIRTIO_F_NOTIFY_ON_EMPTY, 1019 VIRTIO_F_NOTIFY_ON_EMPTY,
799}; 1020};
800 1021
@@ -806,6 +1027,7 @@ static struct virtio_driver virtio_net = {
806 .id_table = id_table, 1027 .id_table = id_table,
807 .probe = virtnet_probe, 1028 .probe = virtnet_probe,
808 .remove = __devexit_p(virtnet_remove), 1029 .remove = __devexit_p(virtnet_remove),
1030 .config_changed = virtnet_config_changed,
809}; 1031};
810 1032
811static int __init init(void) 1033static int __init init(void)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index b46897996f7..9693b0fd323 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -296,7 +296,13 @@ static void c101_destroy_card(card_t *card)
296 kfree(card); 296 kfree(card);
297} 297}
298 298
299 299static const struct net_device_ops c101_ops = {
300 .ndo_open = c101_open,
301 .ndo_stop = c101_close,
302 .ndo_change_mtu = hdlc_change_mtu,
303 .ndo_start_xmit = hdlc_start_xmit,
304 .ndo_do_ioctl = c101_ioctl,
305};
300 306
301static int __init c101_run(unsigned long irq, unsigned long winbase) 307static int __init c101_run(unsigned long irq, unsigned long winbase)
302{ 308{
@@ -367,9 +373,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
367 dev->mem_start = winbase; 373 dev->mem_start = winbase;
368 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; 374 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
369 dev->tx_queue_len = 50; 375 dev->tx_queue_len = 50;
370 dev->do_ioctl = c101_ioctl; 376 dev->netdev_ops = &c101_ops;
371 dev->open = c101_open;
372 dev->stop = c101_close;
373 hdlc->attach = sca_attach; 377 hdlc->attach = sca_attach;
374 hdlc->xmit = sca_xmit; 378 hdlc->xmit = sca_xmit;
375 card->settings.clock_type = CLOCK_EXT; 379 card->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index d80b72e22de..0d7ba117ef6 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -427,6 +427,15 @@ static void __exit cosa_exit(void)
427} 427}
428module_exit(cosa_exit); 428module_exit(cosa_exit);
429 429
430static const struct net_device_ops cosa_ops = {
431 .ndo_open = cosa_net_open,
432 .ndo_stop = cosa_net_close,
433 .ndo_change_mtu = hdlc_change_mtu,
434 .ndo_start_xmit = hdlc_start_xmit,
435 .ndo_do_ioctl = cosa_net_ioctl,
436 .ndo_tx_timeout = cosa_net_timeout,
437};
438
430static int cosa_probe(int base, int irq, int dma) 439static int cosa_probe(int base, int irq, int dma)
431{ 440{
432 struct cosa_data *cosa = cosa_cards+nr_cards; 441 struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -575,10 +584,7 @@ static int cosa_probe(int base, int irq, int dma)
575 } 584 }
576 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; 585 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
577 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; 586 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
578 chan->netdev->open = cosa_net_open; 587 chan->netdev->netdev_ops = &cosa_ops;
579 chan->netdev->stop = cosa_net_close;
580 chan->netdev->do_ioctl = cosa_net_ioctl;
581 chan->netdev->tx_timeout = cosa_net_timeout;
582 chan->netdev->watchdog_timeo = TX_TIMEOUT; 588 chan->netdev->watchdog_timeo = TX_TIMEOUT;
583 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
584 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 888025db2f0..8face5db8f3 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -883,6 +883,15 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
883 return ret; 883 return ret;
884} 884}
885 885
886static const struct net_device_ops dscc4_ops = {
887 .ndo_open = dscc4_open,
888 .ndo_stop = dscc4_close,
889 .ndo_change_mtu = hdlc_change_mtu,
890 .ndo_start_xmit = hdlc_start_xmit,
891 .ndo_do_ioctl = dscc4_ioctl,
892 .ndo_tx_timeout = dscc4_tx_timeout,
893};
894
886static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) 895static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
887{ 896{
888 struct dscc4_pci_priv *ppriv; 897 struct dscc4_pci_priv *ppriv;
@@ -916,13 +925,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
916 hdlc_device *hdlc = dev_to_hdlc(d); 925 hdlc_device *hdlc = dev_to_hdlc(d);
917 926
918 d->base_addr = (unsigned long)ioaddr; 927 d->base_addr = (unsigned long)ioaddr;
919 d->init = NULL;
920 d->irq = pdev->irq; 928 d->irq = pdev->irq;
921 d->open = dscc4_open; 929 d->netdev_ops = &dscc4_ops;
922 d->stop = dscc4_close;
923 d->set_multicast_list = NULL;
924 d->do_ioctl = dscc4_ioctl;
925 d->tx_timeout = dscc4_tx_timeout;
926 d->watchdog_timeo = TX_TIMEOUT; 930 d->watchdog_timeo = TX_TIMEOUT;
927 SET_NETDEV_DEV(d, &pdev->dev); 931 SET_NETDEV_DEV(d, &pdev->dev);
928 932
@@ -1048,7 +1052,7 @@ static int dscc4_open(struct net_device *dev)
1048 struct dscc4_pci_priv *ppriv; 1052 struct dscc4_pci_priv *ppriv;
1049 int ret = -EAGAIN; 1053 int ret = -EAGAIN;
1050 1054
1051 if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit) 1055 if ((dscc4_loopback_check(dpriv) < 0))
1052 goto err; 1056 goto err;
1053 1057
1054 if ((ret = hdlc_open(dev))) 1058 if ((ret = hdlc_open(dev)))
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 48a2c9d2895..00945f7c1e9 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2424,6 +2424,15 @@ fst_init_card(struct fst_card_info *card)
2424 type_strings[card->type], card->irq, card->nports); 2424 type_strings[card->type], card->irq, card->nports);
2425} 2425}
2426 2426
2427static const struct net_device_ops fst_ops = {
2428 .ndo_open = fst_open,
2429 .ndo_stop = fst_close,
2430 .ndo_change_mtu = hdlc_change_mtu,
2431 .ndo_start_xmit = hdlc_start_xmit,
2432 .ndo_do_ioctl = fst_ioctl,
2433 .ndo_tx_timeout = fst_tx_timeout,
2434};
2435
2427/* 2436/*
2428 * Initialise card when detected. 2437 * Initialise card when detected.
2429 * Returns 0 to indicate success, or errno otherwise. 2438 * Returns 0 to indicate success, or errno otherwise.
@@ -2565,12 +2574,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 dev->base_addr = card->pci_conf; 2574 dev->base_addr = card->pci_conf;
2566 dev->irq = card->irq; 2575 dev->irq = card->irq;
2567 2576
2568 dev->tx_queue_len = FST_TX_QUEUE_LEN; 2577 dev->netdev_ops = &fst_ops;
2569 dev->open = fst_open; 2578 dev->tx_queue_len = FST_TX_QUEUE_LEN;
2570 dev->stop = fst_close; 2579 dev->watchdog_timeo = FST_TX_TIMEOUT;
2571 dev->do_ioctl = fst_ioctl;
2572 dev->watchdog_timeo = FST_TX_TIMEOUT;
2573 dev->tx_timeout = fst_tx_timeout;
2574 hdlc->attach = fst_attach; 2580 hdlc->attach = fst_attach;
2575 hdlc->xmit = fst_start_xmit; 2581 hdlc->xmit = fst_start_xmit;
2576 } 2582 }
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 08b3536944f..497b003d723 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
341 received = sca_rx_done(port, budget); 341 received = sca_rx_done(port, budget);
342 342
343 if (received < budget) { 343 if (received < budget) {
344 netif_rx_complete(napi); 344 napi_complete(napi);
345 enable_intr(port); 345 enable_intr(port);
346 } 346 }
347 347
@@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id)
359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { 359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
360 handled = 1; 360 handled = 1;
361 disable_intr(port); 361 disable_intr(port);
362 netif_rx_schedule(&port->napi); 362 napi_schedule(&port->napi);
363 } 363 }
364 } 364 }
365 365
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 1f2a140c9f7..5ce43720555 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -44,7 +44,7 @@ static const char* version = "HDLC support module revision 1.22";
44 44
45static struct hdlc_proto *first_proto; 45static struct hdlc_proto *first_proto;
46 46
47static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 47int hdlc_change_mtu(struct net_device *dev, int new_mtu)
48{ 48{
49 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) 49 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
50 return -EINVAL; 50 return -EINVAL;
@@ -52,15 +52,6 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
52 return 0; 52 return 0;
53} 53}
54 54
55
56
57static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
58{
59 return &dev->stats;
60}
61
62
63
64static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 55static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
65 struct packet_type *p, struct net_device *orig_dev) 56 struct packet_type *p, struct net_device *orig_dev)
66{ 57{
@@ -75,7 +66,15 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
75 return hdlc->proto->netif_rx(skb); 66 return hdlc->proto->netif_rx(skb);
76} 67}
77 68
69int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
70{
71 hdlc_device *hdlc = dev_to_hdlc(dev);
72
73 if (hdlc->proto->xmit)
74 return hdlc->proto->xmit(skb, dev);
78 75
76 return hdlc->xmit(skb, dev); /* call hardware driver directly */
77}
79 78
80static inline void hdlc_proto_start(struct net_device *dev) 79static inline void hdlc_proto_start(struct net_device *dev)
81{ 80{
@@ -102,11 +101,11 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
102 hdlc_device *hdlc; 101 hdlc_device *hdlc;
103 unsigned long flags; 102 unsigned long flags;
104 int on; 103 int on;
105 104
106 if (dev_net(dev) != &init_net) 105 if (dev_net(dev) != &init_net)
107 return NOTIFY_DONE; 106 return NOTIFY_DONE;
108 107
109 if (dev->get_stats != hdlc_get_stats) 108 if (!(dev->priv_flags & IFF_WAN_HDLC))
110 return NOTIFY_DONE; /* not an HDLC device */ 109 return NOTIFY_DONE; /* not an HDLC device */
111 110
112 if (event != NETDEV_CHANGE) 111 if (event != NETDEV_CHANGE)
@@ -233,15 +232,13 @@ static void hdlc_setup_dev(struct net_device *dev)
233 /* Re-init all variables changed by HDLC protocol drivers, 232 /* Re-init all variables changed by HDLC protocol drivers,
234 * including ether_setup() called from hdlc_raw_eth.c. 233 * including ether_setup() called from hdlc_raw_eth.c.
235 */ 234 */
236 dev->get_stats = hdlc_get_stats;
237 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 235 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
236 dev->priv_flags = IFF_WAN_HDLC;
238 dev->mtu = HDLC_MAX_MTU; 237 dev->mtu = HDLC_MAX_MTU;
239 dev->type = ARPHRD_RAWHDLC; 238 dev->type = ARPHRD_RAWHDLC;
240 dev->hard_header_len = 16; 239 dev->hard_header_len = 16;
241 dev->addr_len = 0; 240 dev->addr_len = 0;
242 dev->header_ops = &hdlc_null_ops; 241 dev->header_ops = &hdlc_null_ops;
243
244 dev->change_mtu = hdlc_change_mtu;
245} 242}
246 243
247static void hdlc_setup(struct net_device *dev) 244static void hdlc_setup(struct net_device *dev)
@@ -339,6 +336,8 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
339MODULE_DESCRIPTION("HDLC support module"); 336MODULE_DESCRIPTION("HDLC support module");
340MODULE_LICENSE("GPL v2"); 337MODULE_LICENSE("GPL v2");
341 338
339EXPORT_SYMBOL(hdlc_change_mtu);
340EXPORT_SYMBOL(hdlc_start_xmit);
342EXPORT_SYMBOL(hdlc_open); 341EXPORT_SYMBOL(hdlc_open);
343EXPORT_SYMBOL(hdlc_close); 342EXPORT_SYMBOL(hdlc_close);
344EXPORT_SYMBOL(hdlc_ioctl); 343EXPORT_SYMBOL(hdlc_ioctl);
@@ -350,7 +349,7 @@ EXPORT_SYMBOL(attach_hdlc_protocol);
350EXPORT_SYMBOL(detach_hdlc_protocol); 349EXPORT_SYMBOL(detach_hdlc_protocol);
351 350
352static struct packet_type hdlc_packet_type = { 351static struct packet_type hdlc_packet_type = {
353 .type = __constant_htons(ETH_P_HDLC), 352 .type = cpu_to_be16(ETH_P_HDLC),
354 .func = hdlc_rcv, 353 .func = hdlc_rcv,
355}; 354};
356 355
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 44e64b15dbd..cf5fd17ad70 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -117,7 +117,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
117 data->type = htonl(type); 117 data->type = htonl(type);
118 data->par1 = par1; 118 data->par1 = par1;
119 data->par2 = par2; 119 data->par2 = par2;
120 data->rel = __constant_htons(0xFFFF); 120 data->rel = cpu_to_be16(0xFFFF);
121 /* we will need do_div here if 1000 % HZ != 0 */ 121 /* we will need do_div here if 1000 % HZ != 0 */
122 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ)); 122 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
123 123
@@ -136,20 +136,20 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
136 struct hdlc_header *data = (struct hdlc_header*)skb->data; 136 struct hdlc_header *data = (struct hdlc_header*)skb->data;
137 137
138 if (skb->len < sizeof(struct hdlc_header)) 138 if (skb->len < sizeof(struct hdlc_header))
139 return __constant_htons(ETH_P_HDLC); 139 return cpu_to_be16(ETH_P_HDLC);
140 140
141 if (data->address != CISCO_MULTICAST && 141 if (data->address != CISCO_MULTICAST &&
142 data->address != CISCO_UNICAST) 142 data->address != CISCO_UNICAST)
143 return __constant_htons(ETH_P_HDLC); 143 return cpu_to_be16(ETH_P_HDLC);
144 144
145 switch(data->protocol) { 145 switch(data->protocol) {
146 case __constant_htons(ETH_P_IP): 146 case cpu_to_be16(ETH_P_IP):
147 case __constant_htons(ETH_P_IPX): 147 case cpu_to_be16(ETH_P_IPX):
148 case __constant_htons(ETH_P_IPV6): 148 case cpu_to_be16(ETH_P_IPV6):
149 skb_pull(skb, sizeof(struct hdlc_header)); 149 skb_pull(skb, sizeof(struct hdlc_header));
150 return data->protocol; 150 return data->protocol;
151 default: 151 default:
152 return __constant_htons(ETH_P_HDLC); 152 return cpu_to_be16(ETH_P_HDLC);
153 } 153 }
154} 154}
155 155
@@ -194,7 +194,7 @@ static int cisco_rx(struct sk_buff *skb)
194 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ 194 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
195 in_dev = dev->ip_ptr; 195 in_dev = dev->ip_ptr;
196 addr = 0; 196 addr = 0;
197 mask = __constant_htonl(~0); /* is the mask correct? */ 197 mask = ~cpu_to_be32(0); /* is the mask correct? */
198 198
199 if (in_dev != NULL) { 199 if (in_dev != NULL) {
200 struct in_ifaddr **ifap = &in_dev->ifa_list; 200 struct in_ifaddr **ifap = &in_dev->ifa_list;
@@ -382,7 +382,6 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
382 382
383 memcpy(&state(hdlc)->settings, &new_settings, size); 383 memcpy(&state(hdlc)->settings, &new_settings, size);
384 spin_lock_init(&state(hdlc)->lock); 384 spin_lock_init(&state(hdlc)->lock);
385 dev->hard_start_xmit = hdlc->xmit;
386 dev->header_ops = &cisco_header_ops; 385 dev->header_ops = &cisco_header_ops;
387 dev->type = ARPHRD_CISCO; 386 dev->type = ARPHRD_CISCO;
388 netif_dormant_on(dev); 387 netif_dormant_on(dev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index f1ddd7c3459..80053010109 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -278,31 +278,31 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
278 struct sk_buff *skb = *skb_p; 278 struct sk_buff *skb = *skb_p;
279 279
280 switch (skb->protocol) { 280 switch (skb->protocol) {
281 case __constant_htons(NLPID_CCITT_ANSI_LMI): 281 case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
282 head_len = 4; 282 head_len = 4;
283 skb_push(skb, head_len); 283 skb_push(skb, head_len);
284 skb->data[3] = NLPID_CCITT_ANSI_LMI; 284 skb->data[3] = NLPID_CCITT_ANSI_LMI;
285 break; 285 break;
286 286
287 case __constant_htons(NLPID_CISCO_LMI): 287 case cpu_to_be16(NLPID_CISCO_LMI):
288 head_len = 4; 288 head_len = 4;
289 skb_push(skb, head_len); 289 skb_push(skb, head_len);
290 skb->data[3] = NLPID_CISCO_LMI; 290 skb->data[3] = NLPID_CISCO_LMI;
291 break; 291 break;
292 292
293 case __constant_htons(ETH_P_IP): 293 case cpu_to_be16(ETH_P_IP):
294 head_len = 4; 294 head_len = 4;
295 skb_push(skb, head_len); 295 skb_push(skb, head_len);
296 skb->data[3] = NLPID_IP; 296 skb->data[3] = NLPID_IP;
297 break; 297 break;
298 298
299 case __constant_htons(ETH_P_IPV6): 299 case cpu_to_be16(ETH_P_IPV6):
300 head_len = 4; 300 head_len = 4;
301 skb_push(skb, head_len); 301 skb_push(skb, head_len);
302 skb->data[3] = NLPID_IPV6; 302 skb->data[3] = NLPID_IPV6;
303 break; 303 break;
304 304
305 case __constant_htons(ETH_P_802_3): 305 case cpu_to_be16(ETH_P_802_3):
306 head_len = 10; 306 head_len = 10;
307 if (skb_headroom(skb) < head_len) { 307 if (skb_headroom(skb) < head_len) {
308 struct sk_buff *skb2 = skb_realloc_headroom(skb, 308 struct sk_buff *skb2 = skb_realloc_headroom(skb,
@@ -426,7 +426,7 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
426 skb_put(skb, pad); 426 skb_put(skb, pad);
427 memset(skb->data + len, 0, pad); 427 memset(skb->data + len, 0, pad);
428 } 428 }
429 skb->protocol = __constant_htons(ETH_P_802_3); 429 skb->protocol = cpu_to_be16(ETH_P_802_3);
430 } 430 }
431 if (!fr_hard_header(&skb, pvc->dlci)) { 431 if (!fr_hard_header(&skb, pvc->dlci)) {
432 dev->stats.tx_bytes += skb->len; 432 dev->stats.tx_bytes += skb->len;
@@ -444,18 +444,6 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
444 return 0; 444 return 0;
445} 445}
446 446
447
448
449static int pvc_change_mtu(struct net_device *dev, int new_mtu)
450{
451 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
452 return -EINVAL;
453 dev->mtu = new_mtu;
454 return 0;
455}
456
457
458
459static inline void fr_log_dlci_active(pvc_device *pvc) 447static inline void fr_log_dlci_active(pvc_device *pvc)
460{ 448{
461 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", 449 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
@@ -508,10 +496,10 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
508 memset(skb->data, 0, len); 496 memset(skb->data, 0, len);
509 skb_reserve(skb, 4); 497 skb_reserve(skb, 4);
510 if (lmi == LMI_CISCO) { 498 if (lmi == LMI_CISCO) {
511 skb->protocol = __constant_htons(NLPID_CISCO_LMI); 499 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
512 fr_hard_header(&skb, LMI_CISCO_DLCI); 500 fr_hard_header(&skb, LMI_CISCO_DLCI);
513 } else { 501 } else {
514 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); 502 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
515 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 503 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
516 } 504 }
517 data = skb_tail_pointer(skb); 505 data = skb_tail_pointer(skb);
@@ -1068,6 +1056,14 @@ static void pvc_setup(struct net_device *dev)
1068 dev->addr_len = 2; 1056 dev->addr_len = 2;
1069} 1057}
1070 1058
1059static const struct net_device_ops pvc_ops = {
1060 .ndo_open = pvc_open,
1061 .ndo_stop = pvc_close,
1062 .ndo_change_mtu = hdlc_change_mtu,
1063 .ndo_start_xmit = pvc_xmit,
1064 .ndo_do_ioctl = pvc_ioctl,
1065};
1066
1071static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) 1067static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1072{ 1068{
1073 hdlc_device *hdlc = dev_to_hdlc(frad); 1069 hdlc_device *hdlc = dev_to_hdlc(frad);
@@ -1104,11 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1104 *(__be16*)dev->dev_addr = htons(dlci); 1100 *(__be16*)dev->dev_addr = htons(dlci);
1105 dlci_to_q922(dev->broadcast, dlci); 1101 dlci_to_q922(dev->broadcast, dlci);
1106 } 1102 }
1107 dev->hard_start_xmit = pvc_xmit; 1103 dev->netdev_ops = &pvc_ops;
1108 dev->open = pvc_open;
1109 dev->stop = pvc_close;
1110 dev->do_ioctl = pvc_ioctl;
1111 dev->change_mtu = pvc_change_mtu;
1112 dev->mtu = HDLC_MAX_MTU; 1104 dev->mtu = HDLC_MAX_MTU;
1113 dev->tx_queue_len = 0; 1105 dev->tx_queue_len = 0;
1114 dev->ml_priv = pvc; 1106 dev->ml_priv = pvc;
@@ -1260,8 +1252,6 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1260 state(hdlc)->dce_pvc_count = 0; 1252 state(hdlc)->dce_pvc_count = 0;
1261 } 1253 }
1262 memcpy(&state(hdlc)->settings, &new_settings, size); 1254 memcpy(&state(hdlc)->settings, &new_settings, size);
1263
1264 dev->hard_start_xmit = hdlc->xmit;
1265 dev->type = ARPHRD_FRAD; 1255 dev->type = ARPHRD_FRAD;
1266 return 0; 1256 return 0;
1267 1257
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 57fe714c1c7..72a7cdab424 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -150,11 +150,11 @@ static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
150 return htons(ETH_P_HDLC); 150 return htons(ETH_P_HDLC);
151 151
152 switch (data->protocol) { 152 switch (data->protocol) {
153 case __constant_htons(PID_IP): 153 case cpu_to_be16(PID_IP):
154 skb_pull(skb, sizeof(struct hdlc_header)); 154 skb_pull(skb, sizeof(struct hdlc_header));
155 return htons(ETH_P_IP); 155 return htons(ETH_P_IP);
156 156
157 case __constant_htons(PID_IPV6): 157 case cpu_to_be16(PID_IPV6):
158 skb_pull(skb, sizeof(struct hdlc_header)); 158 skb_pull(skb, sizeof(struct hdlc_header));
159 return htons(ETH_P_IPV6); 159 return htons(ETH_P_IPV6);
160 160
@@ -558,7 +558,6 @@ out:
558 return NET_RX_DROP; 558 return NET_RX_DROP;
559} 559}
560 560
561
562static void ppp_timer(unsigned long arg) 561static void ppp_timer(unsigned long arg)
563{ 562{
564 struct proto *proto = (struct proto *)arg; 563 struct proto *proto = (struct proto *)arg;
@@ -679,7 +678,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
679 ppp->keepalive_interval = 10; 678 ppp->keepalive_interval = 10;
680 ppp->keepalive_timeout = 60; 679 ppp->keepalive_timeout = 60;
681 680
682 dev->hard_start_xmit = hdlc->xmit;
683 dev->hard_header_len = sizeof(struct hdlc_header); 681 dev->hard_header_len = sizeof(struct hdlc_header);
684 dev->header_ops = &ppp_header_ops; 682 dev->header_ops = &ppp_header_ops;
685 dev->type = ARPHRD_PPP; 683 dev->type = ARPHRD_PPP;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 8612311748f..19f51fdd552 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -27,11 +27,9 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
27 27
28static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) 28static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
29{ 29{
30 return __constant_htons(ETH_P_IP); 30 return cpu_to_be16(ETH_P_IP);
31} 31}
32 32
33
34
35static struct hdlc_proto proto = { 33static struct hdlc_proto proto = {
36 .type_trans = raw_type_trans, 34 .type_trans = raw_type_trans,
37 .ioctl = raw_ioctl, 35 .ioctl = raw_ioctl,
@@ -86,7 +84,6 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
86 if (result) 84 if (result)
87 return result; 85 return result;
88 memcpy(hdlc->state, &new_settings, size); 86 memcpy(hdlc->state, &new_settings, size);
89 dev->hard_start_xmit = hdlc->xmit;
90 dev->type = ARPHRD_RAWHDLC; 87 dev->type = ARPHRD_RAWHDLC;
91 netif_dormant_off(dev); 88 netif_dormant_off(dev);
92 return 0; 89 return 0;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index a13fc320752..49e68f5ca5f 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -45,6 +45,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev)
45 45
46static struct hdlc_proto proto = { 46static struct hdlc_proto proto = {
47 .type_trans = eth_type_trans, 47 .type_trans = eth_type_trans,
48 .xmit = eth_tx,
48 .ioctl = raw_eth_ioctl, 49 .ioctl = raw_eth_ioctl,
49 .module = THIS_MODULE, 50 .module = THIS_MODULE,
50}; 51};
@@ -56,9 +57,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
56 const size_t size = sizeof(raw_hdlc_proto); 57 const size_t size = sizeof(raw_hdlc_proto);
57 raw_hdlc_proto new_settings; 58 raw_hdlc_proto new_settings;
58 hdlc_device *hdlc = dev_to_hdlc(dev); 59 hdlc_device *hdlc = dev_to_hdlc(dev);
59 int result; 60 int result, old_qlen;
60 int (*old_ch_mtu)(struct net_device *, int);
61 int old_qlen;
62 61
63 switch (ifr->ifr_settings.type) { 62 switch (ifr->ifr_settings.type) {
64 case IF_GET_PROTO: 63 case IF_GET_PROTO:
@@ -99,11 +98,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
99 if (result) 98 if (result)
100 return result; 99 return result;
101 memcpy(hdlc->state, &new_settings, size); 100 memcpy(hdlc->state, &new_settings, size);
102 dev->hard_start_xmit = eth_tx;
103 old_ch_mtu = dev->change_mtu;
104 old_qlen = dev->tx_queue_len; 101 old_qlen = dev->tx_queue_len;
105 ether_setup(dev); 102 ether_setup(dev);
106 dev->change_mtu = old_ch_mtu;
107 dev->tx_queue_len = old_qlen; 103 dev->tx_queue_len = old_qlen;
108 random_ether_addr(dev->dev_addr); 104 random_ether_addr(dev->dev_addr);
109 netif_dormant_off(dev); 105 netif_dormant_off(dev);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cbcbf6f0414..b1dc29ed158 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
184 .close = x25_close, 184 .close = x25_close,
185 .ioctl = x25_ioctl, 185 .ioctl = x25_ioctl,
186 .netif_rx = x25_rx, 186 .netif_rx = x25_rx,
187 .xmit = x25_xmit,
187 .module = THIS_MODULE, 188 .module = THIS_MODULE,
188}; 189};
189 190
@@ -213,7 +214,6 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
213 214
214 if ((result = attach_hdlc_protocol(dev, &proto, 0))) 215 if ((result = attach_hdlc_protocol(dev, &proto, 0)))
215 return result; 216 return result;
216 dev->hard_start_xmit = x25_xmit;
217 dev->type = ARPHRD_X25; 217 dev->type = ARPHRD_X25;
218 netif_dormant_off(dev); 218 netif_dormant_off(dev);
219 return 0; 219 return 0;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index af54f0cf1b3..567d4f5062d 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -173,6 +173,14 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
173 * Description block for a Comtrol Hostess SV11 card 173 * Description block for a Comtrol Hostess SV11 card
174 */ 174 */
175 175
176static const struct net_device_ops hostess_ops = {
177 .ndo_open = hostess_open,
178 .ndo_stop = hostess_close,
179 .ndo_change_mtu = hdlc_change_mtu,
180 .ndo_start_xmit = hdlc_start_xmit,
181 .ndo_do_ioctl = hostess_ioctl,
182};
183
176static struct z8530_dev *sv11_init(int iobase, int irq) 184static struct z8530_dev *sv11_init(int iobase, int irq)
177{ 185{
178 struct z8530_dev *sv; 186 struct z8530_dev *sv;
@@ -267,9 +275,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
267 275
268 dev_to_hdlc(netdev)->attach = hostess_attach; 276 dev_to_hdlc(netdev)->attach = hostess_attach;
269 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; 277 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
270 netdev->open = hostess_open; 278 netdev->netdev_ops = &hostess_ops;
271 netdev->stop = hostess_close;
272 netdev->do_ioctl = hostess_ioctl;
273 netdev->base_addr = iobase; 279 netdev->base_addr = iobase;
274 netdev->irq = irq; 280 netdev->irq = irq;
275 281
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0dbd85b0162..3bf7d3f447d 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev)
622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); 622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
623#endif 623#endif
624 qmgr_disable_irq(queue_ids[port->id].rx); 624 qmgr_disable_irq(queue_ids[port->id].rx);
625 netif_rx_schedule(&port->napi); 625 napi_schedule(&port->napi);
626} 626}
627 627
628static int hss_hdlc_poll(struct napi_struct *napi, int budget) 628static int hss_hdlc_poll(struct napi_struct *napi, int budget)
@@ -649,15 +649,15 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
649 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 649 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
650#if DEBUG_RX 650#if DEBUG_RX
651 printk(KERN_DEBUG "%s: hss_hdlc_poll" 651 printk(KERN_DEBUG "%s: hss_hdlc_poll"
652 " netif_rx_complete\n", dev->name); 652 " napi_complete\n", dev->name);
653#endif 653#endif
654 netif_rx_complete(napi); 654 napi_complete(napi);
655 qmgr_enable_irq(rxq); 655 qmgr_enable_irq(rxq);
656 if (!qmgr_stat_empty(rxq) && 656 if (!qmgr_stat_empty(rxq) &&
657 netif_rx_reschedule(napi)) { 657 napi_reschedule(napi)) {
658#if DEBUG_RX 658#if DEBUG_RX
659 printk(KERN_DEBUG "%s: hss_hdlc_poll" 659 printk(KERN_DEBUG "%s: hss_hdlc_poll"
660 " netif_rx_reschedule succeeded\n", 660 " napi_reschedule succeeded\n",
661 dev->name); 661 dev->name);
662#endif 662#endif
663 qmgr_disable_irq(rxq); 663 qmgr_disable_irq(rxq);
@@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev)
1069 hss_start_hdlc(port); 1069 hss_start_hdlc(port);
1070 1070
1071 /* we may already have RX data, enables IRQ */ 1071 /* we may already have RX data, enables IRQ */
1072 netif_rx_schedule(&port->napi); 1072 napi_schedule(&port->napi);
1073 return 0; 1073 return 0;
1074 1074
1075err_unlock: 1075err_unlock:
@@ -1230,6 +1230,14 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1230 * initialization 1230 * initialization
1231 ****************************************************************************/ 1231 ****************************************************************************/
1232 1232
1233static const struct net_device_ops hss_hdlc_ops = {
1234 .ndo_open = hss_hdlc_open,
1235 .ndo_stop = hss_hdlc_close,
1236 .ndo_change_mtu = hdlc_change_mtu,
1237 .ndo_start_xmit = hdlc_start_xmit,
1238 .ndo_do_ioctl = hss_hdlc_ioctl,
1239};
1240
1233static int __devinit hss_init_one(struct platform_device *pdev) 1241static int __devinit hss_init_one(struct platform_device *pdev)
1234{ 1242{
1235 struct port *port; 1243 struct port *port;
@@ -1254,9 +1262,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
1254 hdlc = dev_to_hdlc(dev); 1262 hdlc = dev_to_hdlc(dev);
1255 hdlc->attach = hss_hdlc_attach; 1263 hdlc->attach = hss_hdlc_attach;
1256 hdlc->xmit = hss_hdlc_xmit; 1264 hdlc->xmit = hss_hdlc_xmit;
1257 dev->open = hss_hdlc_open; 1265 dev->netdev_ops = &hss_hdlc_ops;
1258 dev->stop = hss_hdlc_close;
1259 dev->do_ioctl = hss_hdlc_ioctl;
1260 dev->tx_queue_len = 100; 1266 dev->tx_queue_len = 100;
1261 port->clock_type = CLOCK_EXT; 1267 port->clock_type = CLOCK_EXT;
1262 port->clock_rate = 2048000; 1268 port->clock_rate = 2048000;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 5b61b3eef45..da9dcf59de2 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -422,7 +422,7 @@ static int lapbeth_device_event(struct notifier_block *this,
422/* ------------------------------------------------------------------------ */ 422/* ------------------------------------------------------------------------ */
423 423
424static struct packet_type lapbeth_packet_type = { 424static struct packet_type lapbeth_packet_type = {
425 .type = __constant_htons(ETH_P_DEC), 425 .type = cpu_to_be16(ETH_P_DEC),
426 .func = lapbeth_rcv, 426 .func = lapbeth_rcv,
427}; 427};
428 428
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index feac3b99f8f..45b1822c962 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -806,6 +806,16 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding,
806 return -EINVAL; 806 return -EINVAL;
807} 807}
808 808
809static const struct net_device_ops lmc_ops = {
810 .ndo_open = lmc_open,
811 .ndo_stop = lmc_close,
812 .ndo_change_mtu = hdlc_change_mtu,
813 .ndo_start_xmit = hdlc_start_xmit,
814 .ndo_do_ioctl = lmc_ioctl,
815 .ndo_tx_timeout = lmc_driver_timeout,
816 .ndo_get_stats = lmc_get_stats,
817};
818
809static int __devinit lmc_init_one(struct pci_dev *pdev, 819static int __devinit lmc_init_one(struct pci_dev *pdev,
810 const struct pci_device_id *ent) 820 const struct pci_device_id *ent)
811{ 821{
@@ -849,11 +859,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
849 dev->type = ARPHRD_HDLC; 859 dev->type = ARPHRD_HDLC;
850 dev_to_hdlc(dev)->xmit = lmc_start_xmit; 860 dev_to_hdlc(dev)->xmit = lmc_start_xmit;
851 dev_to_hdlc(dev)->attach = lmc_attach; 861 dev_to_hdlc(dev)->attach = lmc_attach;
852 dev->open = lmc_open; 862 dev->netdev_ops = &lmc_ops;
853 dev->stop = lmc_close;
854 dev->get_stats = lmc_get_stats;
855 dev->do_ioctl = lmc_ioctl;
856 dev->tx_timeout = lmc_driver_timeout;
857 dev->watchdog_timeo = HZ; /* 1 second */ 863 dev->watchdog_timeo = HZ; /* 1 second */
858 dev->tx_queue_len = 100; 864 dev->tx_queue_len = 100;
859 sc->lmc_device = dev; 865 sc->lmc_device = dev;
@@ -1059,9 +1065,6 @@ static int lmc_open(struct net_device *dev)
1059 if ((err = lmc_proto_open(sc)) != 0) 1065 if ((err = lmc_proto_open(sc)) != 0)
1060 return err; 1066 return err;
1061 1067
1062 dev->do_ioctl = lmc_ioctl;
1063
1064
1065 netif_start_queue(dev); 1068 netif_start_queue(dev);
1066 sc->extra_stats.tx_tbusy0++; 1069 sc->extra_stats.tx_tbusy0++;
1067 1070
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 94b4c208b01..044a48175c4 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -51,30 +51,15 @@
51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
52{ 52{
53 lmc_trace(sc->lmc_device, "lmc_proto_attach in"); 53 lmc_trace(sc->lmc_device, "lmc_proto_attach in");
54 switch(sc->if_type){ 54 if (sc->if_type == LMC_NET) {
55 case LMC_PPP:
56 {
57 struct net_device *dev = sc->lmc_device;
58 dev->do_ioctl = lmc_ioctl;
59 }
60 break;
61 case LMC_NET:
62 {
63 struct net_device *dev = sc->lmc_device; 55 struct net_device *dev = sc->lmc_device;
64 /* 56 /*
65 * They set a few basics because they don't use HDLC 57 * They set a few basics because they don't use HDLC
66 */ 58 */
67 dev->flags |= IFF_POINTOPOINT; 59 dev->flags |= IFF_POINTOPOINT;
68
69 dev->hard_header_len = 0; 60 dev->hard_header_len = 0;
70 dev->addr_len = 0; 61 dev->addr_len = 0;
71 } 62 }
72 case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */
73 {
74 }
75 default:
76 break;
77 }
78 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 63 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
79} 64}
80 65
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 697715ae80f..83da596e205 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -324,7 +324,13 @@ static void n2_destroy_card(card_t *card)
324 kfree(card); 324 kfree(card);
325} 325}
326 326
327 327static const struct net_device_ops n2_ops = {
328 .ndo_open = n2_open,
329 .ndo_stop = n2_close,
330 .ndo_change_mtu = hdlc_change_mtu,
331 .ndo_start_xmit = hdlc_start_xmit,
332 .ndo_do_ioctl = n2_ioctl,
333};
328 334
329static int __init n2_run(unsigned long io, unsigned long irq, 335static int __init n2_run(unsigned long io, unsigned long irq,
330 unsigned long winbase, long valid0, long valid1) 336 unsigned long winbase, long valid0, long valid1)
@@ -460,9 +466,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
460 dev->mem_start = winbase; 466 dev->mem_start = winbase;
461 dev->mem_end = winbase + USE_WINDOWSIZE - 1; 467 dev->mem_end = winbase + USE_WINDOWSIZE - 1;
462 dev->tx_queue_len = 50; 468 dev->tx_queue_len = 50;
463 dev->do_ioctl = n2_ioctl; 469 dev->netdev_ops = &n2_ops;
464 dev->open = n2_open;
465 dev->stop = n2_close;
466 hdlc->attach = sca_attach; 470 hdlc->attach = sca_attach;
467 hdlc->xmit = sca_xmit; 471 hdlc->xmit = sca_xmit;
468 port->settings.clock_type = CLOCK_EXT; 472 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index f247e5d9002..60ece54bdd9 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -287,7 +287,13 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
287 kfree(card); 287 kfree(card);
288} 288}
289 289
290 290static const struct net_device_ops pc300_ops = {
291 .ndo_open = pc300_open,
292 .ndo_stop = pc300_close,
293 .ndo_change_mtu = hdlc_change_mtu,
294 .ndo_start_xmit = hdlc_start_xmit,
295 .ndo_do_ioctl = pc300_ioctl,
296};
291 297
292static int __devinit pc300_pci_init_one(struct pci_dev *pdev, 298static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
293 const struct pci_device_id *ent) 299 const struct pci_device_id *ent)
@@ -448,9 +454,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
448 dev->mem_start = ramphys; 454 dev->mem_start = ramphys;
449 dev->mem_end = ramphys + ramsize - 1; 455 dev->mem_end = ramphys + ramsize - 1;
450 dev->tx_queue_len = 50; 456 dev->tx_queue_len = 50;
451 dev->do_ioctl = pc300_ioctl; 457 dev->netdev_ops = &pc300_ops;
452 dev->open = pc300_open;
453 dev->stop = pc300_close;
454 hdlc->attach = sca_attach; 458 hdlc->attach = sca_attach;
455 hdlc->xmit = sca_xmit; 459 hdlc->xmit = sca_xmit;
456 port->settings.clock_type = CLOCK_EXT; 460 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 1104d3a692f..e035d8c57e1 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -265,7 +265,13 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
265 kfree(card); 265 kfree(card);
266} 266}
267 267
268 268static const struct net_device_ops pci200_ops = {
269 .ndo_open = pci200_open,
270 .ndo_stop = pci200_close,
271 .ndo_change_mtu = hdlc_change_mtu,
272 .ndo_start_xmit = hdlc_start_xmit,
273 .ndo_do_ioctl = pci200_ioctl,
274};
269 275
270static int __devinit pci200_pci_init_one(struct pci_dev *pdev, 276static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
271 const struct pci_device_id *ent) 277 const struct pci_device_id *ent)
@@ -395,9 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
395 dev->mem_start = ramphys; 401 dev->mem_start = ramphys;
396 dev->mem_end = ramphys + ramsize - 1; 402 dev->mem_end = ramphys + ramsize - 1;
397 dev->tx_queue_len = 50; 403 dev->tx_queue_len = 50;
398 dev->do_ioctl = pci200_ioctl; 404 dev->netdev_ops = &pci200_ops;
399 dev->open = pci200_open;
400 dev->stop = pci200_close;
401 hdlc->attach = sca_attach; 405 hdlc->attach = sca_attach;
402 hdlc->xmit = sca_xmit; 406 hdlc->xmit = sca_xmit;
403 port->settings.clock_type = CLOCK_EXT; 407 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0941a26f6e3..23b26902745 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -169,6 +169,14 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding,
169 return -EINVAL; 169 return -EINVAL;
170} 170}
171 171
172static const struct net_device_ops sealevel_ops = {
173 .ndo_open = sealevel_open,
174 .ndo_stop = sealevel_close,
175 .ndo_change_mtu = hdlc_change_mtu,
176 .ndo_start_xmit = hdlc_start_xmit,
177 .ndo_do_ioctl = sealevel_ioctl,
178};
179
172static int slvl_setup(struct slvl_device *sv, int iobase, int irq) 180static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
173{ 181{
174 struct net_device *dev = alloc_hdlcdev(sv); 182 struct net_device *dev = alloc_hdlcdev(sv);
@@ -177,9 +185,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
177 185
178 dev_to_hdlc(dev)->attach = sealevel_attach; 186 dev_to_hdlc(dev)->attach = sealevel_attach;
179 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; 187 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
180 dev->open = sealevel_open; 188 dev->netdev_ops = &sealevel_ops;
181 dev->stop = sealevel_close;
182 dev->do_ioctl = sealevel_ioctl;
183 dev->base_addr = iobase; 189 dev->base_addr = iobase;
184 dev->irq = irq; 190 dev->irq = irq;
185 191
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4bffb67ebca..887acb0dc80 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -547,6 +547,15 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
547 547
548#include "wanxlfw.inc" 548#include "wanxlfw.inc"
549 549
550static const struct net_device_ops wanxl_ops = {
551 .ndo_open = wanxl_open,
552 .ndo_stop = wanxl_close,
553 .ndo_change_mtu = hdlc_change_mtu,
554 .ndo_start_xmit = hdlc_start_xmit,
555 .ndo_do_ioctl = wanxl_ioctl,
556 .ndo_get_stats = wanxl_get_stats,
557};
558
550static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, 559static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
551 const struct pci_device_id *ent) 560 const struct pci_device_id *ent)
552{ 561{
@@ -777,12 +786,9 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
777 hdlc = dev_to_hdlc(dev); 786 hdlc = dev_to_hdlc(dev);
778 spin_lock_init(&port->lock); 787 spin_lock_init(&port->lock);
779 dev->tx_queue_len = 50; 788 dev->tx_queue_len = 50;
780 dev->do_ioctl = wanxl_ioctl; 789 dev->netdev_ops = &wanxl_ops;
781 dev->open = wanxl_open;
782 dev->stop = wanxl_close;
783 hdlc->attach = wanxl_attach; 790 hdlc->attach = wanxl_attach;
784 hdlc->xmit = wanxl_xmit; 791 hdlc->xmit = wanxl_xmit;
785 dev->get_stats = wanxl_get_stats;
786 port->card = card; 792 port->card = card;
787 port->node = i; 793 port->node = i;
788 get_status(port)->clocking = CLOCK_EXT; 794 get_status(port)->clocking = CLOCK_EXT;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 1d8271f34c3..ecd0cfaefdc 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -140,10 +140,10 @@
140 140
141 141
142static const __le32 i2400m_ACK_BARKER[4] = { 142static const __le32 i2400m_ACK_BARKER[4] = {
143 __constant_cpu_to_le32(I2400M_ACK_BARKER), 143 cpu_to_le32(I2400M_ACK_BARKER),
144 __constant_cpu_to_le32(I2400M_ACK_BARKER), 144 cpu_to_le32(I2400M_ACK_BARKER),
145 __constant_cpu_to_le32(I2400M_ACK_BARKER), 145 cpu_to_le32(I2400M_ACK_BARKER),
146 __constant_cpu_to_le32(I2400M_ACK_BARKER) 146 cpu_to_le32(I2400M_ACK_BARKER)
147}; 147};
148 148
149 149
@@ -771,8 +771,8 @@ static
771int i2400m_dnload_init_nonsigned(struct i2400m *i2400m) 771int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
772{ 772{
773#define POKE(a, d) { \ 773#define POKE(a, d) { \
774 .address = __constant_cpu_to_le32(a), \ 774 .address = cpu_to_le32(a), \
775 .data = __constant_cpu_to_le32(d) \ 775 .data = cpu_to_le32(d) \
776} 776}
777 static const struct { 777 static const struct {
778 __le32 address; 778 __le32 address;
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 067c871cc22..236f19ea4c8 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -664,17 +664,17 @@ extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
664extern void i2400m_tx_msg_sent(struct i2400m *); 664extern void i2400m_tx_msg_sent(struct i2400m *);
665 665
666static const __le32 i2400m_NBOOT_BARKER[4] = { 666static const __le32 i2400m_NBOOT_BARKER[4] = {
667 __constant_cpu_to_le32(I2400M_NBOOT_BARKER), 667 cpu_to_le32(I2400M_NBOOT_BARKER),
668 __constant_cpu_to_le32(I2400M_NBOOT_BARKER), 668 cpu_to_le32(I2400M_NBOOT_BARKER),
669 __constant_cpu_to_le32(I2400M_NBOOT_BARKER), 669 cpu_to_le32(I2400M_NBOOT_BARKER),
670 __constant_cpu_to_le32(I2400M_NBOOT_BARKER) 670 cpu_to_le32(I2400M_NBOOT_BARKER)
671}; 671};
672 672
673static const __le32 i2400m_SBOOT_BARKER[4] = { 673static const __le32 i2400m_SBOOT_BARKER[4] = {
674 __constant_cpu_to_le32(I2400M_SBOOT_BARKER), 674 cpu_to_le32(I2400M_SBOOT_BARKER),
675 __constant_cpu_to_le32(I2400M_SBOOT_BARKER), 675 cpu_to_le32(I2400M_SBOOT_BARKER),
676 __constant_cpu_to_le32(I2400M_SBOOT_BARKER), 676 cpu_to_le32(I2400M_SBOOT_BARKER),
677 __constant_cpu_to_le32(I2400M_SBOOT_BARKER) 677 cpu_to_le32(I2400M_SBOOT_BARKER)
678}; 678};
679 679
680 680
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 63fe708e8a3..be8be4d0709 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -419,7 +419,7 @@ void i2400m_rx_fake_eth_header(struct net_device *net_dev,
419 419
420 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); 420 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
421 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); 421 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest));
422 eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP); 422 eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
423} 423}
424 424
425 425
@@ -493,6 +493,14 @@ error_skb_realloc:
493 i2400m, buf, buf_len); 493 i2400m, buf, buf_len);
494} 494}
495 495
496static const struct net_device_ops i2400m_netdev_ops = {
497 .ndo_open = i2400m_open,
498 .ndo_stop = i2400m_stop,
499 .ndo_start_xmit = i2400m_hard_start_xmit,
500 .ndo_tx_timeout = i2400m_tx_timeout,
501 .ndo_change_mtu = i2400m_change_mtu,
502};
503
496 504
497/** 505/**
498 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data 506 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -513,11 +521,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
513 & (~IFF_BROADCAST /* i2400m is P2P */ 521 & (~IFF_BROADCAST /* i2400m is P2P */
514 & ~IFF_MULTICAST); 522 & ~IFF_MULTICAST);
515 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; 523 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
516 net_dev->open = i2400m_open; 524 net_dev->netdev_ops = &i2400m_netdev_ops;
517 net_dev->stop = i2400m_stop;
518 net_dev->hard_start_xmit = i2400m_hard_start_xmit;
519 net_dev->change_mtu = i2400m_change_mtu;
520 net_dev->tx_timeout = i2400m_tx_timeout;
521 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); 525 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
522} 526}
523EXPORT_SYMBOL_GPL(i2400m_netdev_setup); 527EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 1bfa283bbd8..123a5f8db6a 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -255,16 +255,16 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
255 container_of(i2400m, struct i2400ms, i2400m); 255 container_of(i2400m, struct i2400ms, i2400m);
256 struct device *dev = i2400m_dev(i2400m); 256 struct device *dev = i2400m_dev(i2400m);
257 static const __le32 i2400m_WARM_BOOT_BARKER[4] = { 257 static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
258 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 258 cpu_to_le32(I2400M_WARM_RESET_BARKER),
259 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 259 cpu_to_le32(I2400M_WARM_RESET_BARKER),
260 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 260 cpu_to_le32(I2400M_WARM_RESET_BARKER),
261 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 261 cpu_to_le32(I2400M_WARM_RESET_BARKER),
262 }; 262 };
263 static const __le32 i2400m_COLD_BOOT_BARKER[4] = { 263 static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
264 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 264 cpu_to_le32(I2400M_COLD_RESET_BARKER),
265 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 265 cpu_to_le32(I2400M_COLD_RESET_BARKER),
266 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 266 cpu_to_le32(I2400M_COLD_RESET_BARKER),
267 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 267 cpu_to_le32(I2400M_COLD_RESET_BARKER),
268 }; 268 };
269 269
270 if (rt == I2400M_RT_WARM) 270 if (rt == I2400M_RT_WARM)
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index c6d93465c7e..7c28610da6f 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -211,16 +211,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
211 container_of(i2400m, struct i2400mu, i2400m); 211 container_of(i2400m, struct i2400mu, i2400m);
212 struct device *dev = i2400m_dev(i2400m); 212 struct device *dev = i2400m_dev(i2400m);
213 static const __le32 i2400m_WARM_BOOT_BARKER[4] = { 213 static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
214 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 214 cpu_to_le32(I2400M_WARM_RESET_BARKER),
215 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 215 cpu_to_le32(I2400M_WARM_RESET_BARKER),
216 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 216 cpu_to_le32(I2400M_WARM_RESET_BARKER),
217 __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), 217 cpu_to_le32(I2400M_WARM_RESET_BARKER),
218 }; 218 };
219 static const __le32 i2400m_COLD_BOOT_BARKER[4] = { 219 static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
220 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 220 cpu_to_le32(I2400M_COLD_RESET_BARKER),
221 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 221 cpu_to_le32(I2400M_COLD_RESET_BARKER),
222 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 222 cpu_to_le32(I2400M_COLD_RESET_BARKER),
223 __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), 223 cpu_to_le32(I2400M_COLD_RESET_BARKER),
224 }; 224 };
225 225
226 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); 226 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e4f9f747de8..fe819a78571 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -151,6 +151,12 @@ config LIBERTAS_SDIO
151 ---help--- 151 ---help---
152 A driver for Marvell Libertas 8385 and 8686 SDIO devices. 152 A driver for Marvell Libertas 8385 and 8686 SDIO devices.
153 153
154config LIBERTAS_SPI
155 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
156 depends on LIBERTAS && SPI && GENERIC_GPIO
157 ---help---
158 A driver for Marvell Libertas 8686 SPI devices.
159
154config LIBERTAS_DEBUG 160config LIBERTAS_DEBUG
155 bool "Enable full debugging output in the Libertas module." 161 bool "Enable full debugging output in the Libertas module."
156 depends on LIBERTAS 162 depends on LIBERTAS
@@ -188,127 +194,6 @@ config AIRO
188 194
189 The driver can be compiled as a module and will be named "airo". 195 The driver can be compiled as a module and will be named "airo".
190 196
191config HERMES
192 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
193 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
194 select WIRELESS_EXT
195 select FW_LOADER
196 select CRYPTO
197 select CRYPTO_MICHAEL_MIC
198 ---help---
199 A driver for 802.11b wireless cards based on the "Hermes" or
200 Intersil HFA384x (Prism 2) MAC controller. This includes the vast
201 majority of the PCMCIA 802.11b cards (which are nearly all rebadges)
202 - except for the Cisco/Aironet cards. Cards supported include the
203 Apple Airport (not a PCMCIA card), WavelanIEEE/Orinoco,
204 Cabletron/EnteraSys Roamabout, ELSA AirLancer, MELCO Buffalo, Avaya,
205 IBM High Rate Wireless, Farralon Syyline, Samsung MagicLAN, Netgear
206 MA401, LinkSys WPC-11, D-Link DWL-650, 3Com AirConnect, Intel
207 IPW2011, and Symbol Spectrum24 High Rate amongst others.
208
209 This option includes the guts of the driver, but in order to
210 actually use a card you will also need to enable support for PCMCIA
211 Hermes cards, PLX9052 based PCI adaptors or the Apple Airport below.
212
213 You will also very likely also need the Wireless Tools in order to
214 configure your card and that /etc/pcmcia/wireless.opts works :
215 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
216
217config HERMES_CACHE_FW_ON_INIT
218 bool "Cache Hermes firmware on driver initialisation"
219 depends on HERMES
220 default y
221 ---help---
222 Say Y to cache any firmware required by the Hermes drivers
223 on startup. The firmware will remain cached until the
224 driver is unloaded. The cache uses 64K of RAM.
225
226 Otherwise load the firmware from userspace as required. In
227 this case the driver should be unloaded and restarted
228 whenever the firmware is changed.
229
230 If you are not sure, say Y.
231
232config APPLE_AIRPORT
233 tristate "Apple Airport support (built-in)"
234 depends on PPC_PMAC && HERMES
235 help
236 Say Y here to support the Airport 802.11b wireless Ethernet hardware
237 built into the Macintosh iBook and other recent PowerPC-based
238 Macintosh machines. This is essentially a Lucent Orinoco card with
239 a non-standard interface.
240
241 This driver does not support the Airport Extreme (802.11b/g). Use
242 the BCM43xx driver for Airport Extreme cards.
243
244config PLX_HERMES
245 tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.)"
246 depends on PCI && HERMES
247 help
248 Enable support for PCMCIA cards supported by the "Hermes" (aka
249 orinoco) driver when used in PLX9052 based PCI adaptors. These
250 adaptors are not a full PCMCIA controller but act as a more limited
251 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
252 802.11b PCMCIA cards can be used in desktop machines. The Netgear
253 MA301 is such an adaptor.
254
255config TMD_HERMES
256 tristate "Hermes in TMD7160 based PCI adaptor support"
257 depends on PCI && HERMES
258 help
259 Enable support for PCMCIA cards supported by the "Hermes" (aka
260 orinoco) driver when used in TMD7160 based PCI adaptors. These
261 adaptors are not a full PCMCIA controller but act as a more limited
262 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
263 802.11b PCMCIA cards can be used in desktop machines.
264
265config NORTEL_HERMES
266 tristate "Nortel emobility PCI adaptor support"
267 depends on PCI && HERMES
268 help
269 Enable support for PCMCIA cards supported by the "Hermes" (aka
270 orinoco) driver when used in Nortel emobility PCI adaptors. These
271 adaptors are not full PCMCIA controllers, but act as a more limited
272 PCI <-> PCMCIA bridge.
273
274config PCI_HERMES
275 tristate "Prism 2.5 PCI 802.11b adaptor support"
276 depends on PCI && HERMES
277 help
278 Enable support for PCI and mini-PCI 802.11b wireless NICs based on
279 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
280 PCMCIA cards bundled with PCI<->PCMCIA adaptors which are also
281 common. Some of the built-in wireless adaptors in laptops are of
282 this variety.
283
284config PCMCIA_HERMES
285 tristate "Hermes PCMCIA card support"
286 depends on PCMCIA && HERMES
287 ---help---
288 A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
289 as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
290 EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and
291 others). It should also be usable on various Prism II based cards
292 such as the Linksys, D-Link and Farallon Skyline. It should also
293 work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN.
294
295 You will very likely need the Wireless Tools in order to
296 configure your card and that /etc/pcmcia/wireless.opts works:
297 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
298
299config PCMCIA_SPECTRUM
300 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
301 depends on PCMCIA && HERMES
302 ---help---
303
304 This is a driver for 802.11b cards using RAM-loadable Symbol
305 firmware, such as Symbol Wireless Networker LA4100, CompactFlash
306 cards by Socket Communications and Intel PRO/Wireless 2011B.
307
308 This driver requires firmware download on startup. Utilities
309 for downloading Symbol firmware are available at
310 <http://sourceforge.net/projects/orinoco/>
311
312config ATMEL 197config ATMEL
313 tristate "Atmel at76c50x chipset 802.11b support" 198 tristate "Atmel at76c50x chipset 802.11b support"
314 depends on (PCI || PCMCIA) && WLAN_80211 199 depends on (PCI || PCMCIA) && WLAN_80211
@@ -590,5 +475,6 @@ source "drivers/net/wireless/b43/Kconfig"
590source "drivers/net/wireless/b43legacy/Kconfig" 475source "drivers/net/wireless/b43legacy/Kconfig"
591source "drivers/net/wireless/zd1211rw/Kconfig" 476source "drivers/net/wireless/zd1211rw/Kconfig"
592source "drivers/net/wireless/rt2x00/Kconfig" 477source "drivers/net/wireless/rt2x00/Kconfig"
478source "drivers/net/wireless/orinoco/Kconfig"
593 479
594endmenu 480endmenu
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index fc4322ca669..acda45838e9 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -496,39 +496,41 @@ typedef struct {
496 * so all rid access should use the read/writeXXXRid routines. 496 * so all rid access should use the read/writeXXXRid routines.
497 */ 497 */
498 498
499/* This is redundant for x86 archs, but it seems necessary for ARM */
500#pragma pack(1)
501
502/* This structure came from an email sent to me from an engineer at 499/* This structure came from an email sent to me from an engineer at
503 aironet for inclusion into this driver */ 500 aironet for inclusion into this driver */
504typedef struct { 501typedef struct WepKeyRid WepKeyRid;
502struct WepKeyRid {
505 __le16 len; 503 __le16 len;
506 __le16 kindex; 504 __le16 kindex;
507 u8 mac[ETH_ALEN]; 505 u8 mac[ETH_ALEN];
508 __le16 klen; 506 __le16 klen;
509 u8 key[16]; 507 u8 key[16];
510} WepKeyRid; 508} __attribute__ ((packed));
511 509
512/* These structures are from the Aironet's PC4500 Developers Manual */ 510/* These structures are from the Aironet's PC4500 Developers Manual */
513typedef struct { 511typedef struct Ssid Ssid;
512struct Ssid {
514 __le16 len; 513 __le16 len;
515 u8 ssid[32]; 514 u8 ssid[32];
516} Ssid; 515} __attribute__ ((packed));
517 516
518typedef struct { 517typedef struct SsidRid SsidRid;
518struct SsidRid {
519 __le16 len; 519 __le16 len;
520 Ssid ssids[3]; 520 Ssid ssids[3];
521} SsidRid; 521} __attribute__ ((packed));
522 522
523typedef struct { 523typedef struct ModulationRid ModulationRid;
524struct ModulationRid {
524 __le16 len; 525 __le16 len;
525 __le16 modulation; 526 __le16 modulation;
526#define MOD_DEFAULT cpu_to_le16(0) 527#define MOD_DEFAULT cpu_to_le16(0)
527#define MOD_CCK cpu_to_le16(1) 528#define MOD_CCK cpu_to_le16(1)
528#define MOD_MOK cpu_to_le16(2) 529#define MOD_MOK cpu_to_le16(2)
529} ModulationRid; 530} __attribute__ ((packed));
530 531
531typedef struct { 532typedef struct ConfigRid ConfigRid;
533struct ConfigRid {
532 __le16 len; /* sizeof(ConfigRid) */ 534 __le16 len; /* sizeof(ConfigRid) */
533 __le16 opmode; /* operating mode */ 535 __le16 opmode; /* operating mode */
534#define MODE_STA_IBSS cpu_to_le16(0) 536#define MODE_STA_IBSS cpu_to_le16(0)
@@ -649,9 +651,10 @@ typedef struct {
649#define MAGIC_STAY_IN_CAM (1<<10) 651#define MAGIC_STAY_IN_CAM (1<<10)
650 u8 magicControl; 652 u8 magicControl;
651 __le16 autoWake; 653 __le16 autoWake;
652} ConfigRid; 654} __attribute__ ((packed));
653 655
654typedef struct { 656typedef struct StatusRid StatusRid;
657struct StatusRid {
655 __le16 len; 658 __le16 len;
656 u8 mac[ETH_ALEN]; 659 u8 mac[ETH_ALEN];
657 __le16 mode; 660 __le16 mode;
@@ -707,21 +710,23 @@ typedef struct {
707#define STAT_LEAPFAILED 91 710#define STAT_LEAPFAILED 91
708#define STAT_LEAPTIMEDOUT 92 711#define STAT_LEAPTIMEDOUT 92
709#define STAT_LEAPCOMPLETE 93 712#define STAT_LEAPCOMPLETE 93
710} StatusRid; 713} __attribute__ ((packed));
711 714
712typedef struct { 715typedef struct StatsRid StatsRid;
716struct StatsRid {
713 __le16 len; 717 __le16 len;
714 __le16 spacer; 718 __le16 spacer;
715 __le32 vals[100]; 719 __le32 vals[100];
716} StatsRid; 720} __attribute__ ((packed));
717
718 721
719typedef struct { 722typedef struct APListRid APListRid;
723struct APListRid {
720 __le16 len; 724 __le16 len;
721 u8 ap[4][ETH_ALEN]; 725 u8 ap[4][ETH_ALEN];
722} APListRid; 726} __attribute__ ((packed));
723 727
724typedef struct { 728typedef struct CapabilityRid CapabilityRid;
729struct CapabilityRid {
725 __le16 len; 730 __le16 len;
726 char oui[3]; 731 char oui[3];
727 char zero; 732 char zero;
@@ -748,17 +753,18 @@ typedef struct {
748 __le16 bootBlockVer; 753 __le16 bootBlockVer;
749 __le16 requiredHard; 754 __le16 requiredHard;
750 __le16 extSoftCap; 755 __le16 extSoftCap;
751} CapabilityRid; 756} __attribute__ ((packed));
752
753 757
754/* Only present on firmware >= 5.30.17 */ 758/* Only present on firmware >= 5.30.17 */
755typedef struct { 759typedef struct BSSListRidExtra BSSListRidExtra;
760struct BSSListRidExtra {
756 __le16 unknown[4]; 761 __le16 unknown[4];
757 u8 fixed[12]; /* WLAN management frame */ 762 u8 fixed[12]; /* WLAN management frame */
758 u8 iep[624]; 763 u8 iep[624];
759} BSSListRidExtra; 764} __attribute__ ((packed));
760 765
761typedef struct { 766typedef struct BSSListRid BSSListRid;
767struct BSSListRid {
762 __le16 len; 768 __le16 len;
763 __le16 index; /* First is 0 and 0xffff means end of list */ 769 __le16 index; /* First is 0 and 0xffff means end of list */
764#define RADIO_FH 1 /* Frequency hopping radio type */ 770#define RADIO_FH 1 /* Frequency hopping radio type */
@@ -789,33 +795,37 @@ typedef struct {
789 795
790 /* Only present on firmware >= 5.30.17 */ 796 /* Only present on firmware >= 5.30.17 */
791 BSSListRidExtra extra; 797 BSSListRidExtra extra;
792} BSSListRid; 798} __attribute__ ((packed));
793 799
794typedef struct { 800typedef struct {
795 BSSListRid bss; 801 BSSListRid bss;
796 struct list_head list; 802 struct list_head list;
797} BSSListElement; 803} BSSListElement;
798 804
799typedef struct { 805typedef struct tdsRssiEntry tdsRssiEntry;
806struct tdsRssiEntry {
800 u8 rssipct; 807 u8 rssipct;
801 u8 rssidBm; 808 u8 rssidBm;
802} tdsRssiEntry; 809} __attribute__ ((packed));
803 810
804typedef struct { 811typedef struct tdsRssiRid tdsRssiRid;
812struct tdsRssiRid {
805 u16 len; 813 u16 len;
806 tdsRssiEntry x[256]; 814 tdsRssiEntry x[256];
807} tdsRssiRid; 815} __attribute__ ((packed));
808 816
809typedef struct { 817typedef struct MICRid MICRid;
810 u16 len; 818struct MICRid {
811 u16 state; 819 __le16 len;
812 u16 multicastValid; 820 __le16 state;
821 __le16 multicastValid;
813 u8 multicast[16]; 822 u8 multicast[16];
814 u16 unicastValid; 823 __le16 unicastValid;
815 u8 unicast[16]; 824 u8 unicast[16];
816} MICRid; 825} __attribute__ ((packed));
817 826
818typedef struct { 827typedef struct MICBuffer MICBuffer;
828struct MICBuffer {
819 __be16 typelen; 829 __be16 typelen;
820 830
821 union { 831 union {
@@ -830,15 +840,13 @@ typedef struct {
830 } u; 840 } u;
831 __be32 mic; 841 __be32 mic;
832 __be32 seq; 842 __be32 seq;
833} MICBuffer; 843} __attribute__ ((packed));
834 844
835typedef struct { 845typedef struct {
836 u8 da[ETH_ALEN]; 846 u8 da[ETH_ALEN];
837 u8 sa[ETH_ALEN]; 847 u8 sa[ETH_ALEN];
838} etherHead; 848} etherHead;
839 849
840#pragma pack()
841
842#define TXCTL_TXOK (1<<1) /* report if tx is ok */ 850#define TXCTL_TXOK (1<<1) /* report if tx is ok */
843#define TXCTL_TXEX (1<<2) /* report if tx fails */ 851#define TXCTL_TXEX (1<<2) /* report if tx fails */
844#define TXCTL_802_3 (0<<3) /* 802.3 packet */ 852#define TXCTL_802_3 (0<<3) /* 802.3 packet */
@@ -981,6 +989,14 @@ typedef struct {
981 dma_addr_t host_addr; 989 dma_addr_t host_addr;
982} TxFid; 990} TxFid;
983 991
992struct rx_hdr {
993 __le16 status, len;
994 u8 rssi[2];
995 u8 rate;
996 u8 freq;
997 __le16 tmp[4];
998} __attribute__ ((packed));
999
984typedef struct { 1000typedef struct {
985 unsigned int ctl: 15; 1001 unsigned int ctl: 15;
986 unsigned int rdy: 1; 1002 unsigned int rdy: 1;
@@ -1070,10 +1086,6 @@ static WifiCtlHdr wifictlhdr8023 = {
1070 } 1086 }
1071}; 1087};
1072 1088
1073// Frequency list (map channels to frequencies)
1074static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
1075 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
1076
1077// A few details needed for WEP (Wireless Equivalent Privacy) 1089// A few details needed for WEP (Wireless Equivalent Privacy)
1078#define MAX_KEY_SIZE 13 // 128 (?) bits 1090#define MAX_KEY_SIZE 13 // 128 (?) bits
1079#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP 1091#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP
@@ -1082,12 +1094,6 @@ typedef struct wep_key_t {
1082 u8 key[16]; /* 40-bit and 104-bit keys */ 1094 u8 key[16]; /* 40-bit and 104-bit keys */
1083} wep_key_t; 1095} wep_key_t;
1084 1096
1085/* Backward compatibility */
1086#ifndef IW_ENCODE_NOKEY
1087#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
1088#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
1089#endif /* IW_ENCODE_NOKEY */
1090
1091/* List of Wireless Handlers (new API) */ 1097/* List of Wireless Handlers (new API) */
1092static const struct iw_handler_def airo_handler_def; 1098static const struct iw_handler_def airo_handler_def;
1093 1099
@@ -1229,6 +1235,9 @@ struct airo_info {
1229#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE 1235#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
1230 char proc_name[IFNAMSIZ]; 1236 char proc_name[IFNAMSIZ];
1231 1237
1238 int wep_capable;
1239 int max_wep_idx;
1240
1232 /* WPA-related stuff */ 1241 /* WPA-related stuff */
1233 unsigned int bssListFirst; 1242 unsigned int bssListFirst;
1234 unsigned int bssListNext; 1243 unsigned int bssListNext;
@@ -1287,6 +1296,29 @@ static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
1287static void emmh32_final(emmh32_context *context, u8 digest[4]); 1296static void emmh32_final(emmh32_context *context, u8 digest[4]);
1288static int flashpchar(struct airo_info *ai,int byte,int dwelltime); 1297static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
1289 1298
1299static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
1300 struct crypto_cipher *tfm)
1301{
1302 /* If the current MIC context is valid and its key is the same as
1303 * the MIC register, there's nothing to do.
1304 */
1305 if (cur->valid && (memcmp(cur->key, key, key_len) == 0))
1306 return;
1307
1308 /* Age current mic Context */
1309 memcpy(old, cur, sizeof(*cur));
1310
1311 /* Initialize new context */
1312 memcpy(cur->key, key, key_len);
1313 cur->window = 33; /* Window always points to the middle */
1314 cur->rx = 0; /* Rx Sequence numbers */
1315 cur->tx = 0; /* Tx sequence numbers */
1316 cur->valid = 1; /* Key is now valid */
1317
1318 /* Give key to mic seed */
1319 emmh32_setseed(&cur->seed, key, key_len, tfm);
1320}
1321
1290/* micinit - Initialize mic seed */ 1322/* micinit - Initialize mic seed */
1291 1323
1292static void micinit(struct airo_info *ai) 1324static void micinit(struct airo_info *ai)
@@ -1297,49 +1329,26 @@ static void micinit(struct airo_info *ai)
1297 PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0); 1329 PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
1298 up(&ai->sem); 1330 up(&ai->sem);
1299 1331
1300 ai->micstats.enabled = (mic_rid.state & 0x00FF) ? 1 : 0; 1332 ai->micstats.enabled = (le16_to_cpu(mic_rid.state) & 0x00FF) ? 1 : 0;
1301 1333 if (!ai->micstats.enabled) {
1302 if (ai->micstats.enabled) { 1334 /* So next time we have a valid key and mic is enabled, we will
1303 /* Key must be valid and different */ 1335 * update the sequence number if the key is the same as before.
1304 if (mic_rid.multicastValid && (!ai->mod[0].mCtx.valid || 1336 */
1305 (memcmp (ai->mod[0].mCtx.key, mic_rid.multicast,
1306 sizeof(ai->mod[0].mCtx.key)) != 0))) {
1307 /* Age current mic Context */
1308 memcpy(&ai->mod[1].mCtx,&ai->mod[0].mCtx,sizeof(miccntx));
1309 /* Initialize new context */
1310 memcpy(&ai->mod[0].mCtx.key,mic_rid.multicast,sizeof(mic_rid.multicast));
1311 ai->mod[0].mCtx.window = 33; //Window always points to the middle
1312 ai->mod[0].mCtx.rx = 0; //Rx Sequence numbers
1313 ai->mod[0].mCtx.tx = 0; //Tx sequence numbers
1314 ai->mod[0].mCtx.valid = 1; //Key is now valid
1315
1316 /* Give key to mic seed */
1317 emmh32_setseed(&ai->mod[0].mCtx.seed,mic_rid.multicast,sizeof(mic_rid.multicast), ai->tfm);
1318 }
1319
1320 /* Key must be valid and different */
1321 if (mic_rid.unicastValid && (!ai->mod[0].uCtx.valid ||
1322 (memcmp(ai->mod[0].uCtx.key, mic_rid.unicast,
1323 sizeof(ai->mod[0].uCtx.key)) != 0))) {
1324 /* Age current mic Context */
1325 memcpy(&ai->mod[1].uCtx,&ai->mod[0].uCtx,sizeof(miccntx));
1326 /* Initialize new context */
1327 memcpy(&ai->mod[0].uCtx.key,mic_rid.unicast,sizeof(mic_rid.unicast));
1328
1329 ai->mod[0].uCtx.window = 33; //Window always points to the middle
1330 ai->mod[0].uCtx.rx = 0; //Rx Sequence numbers
1331 ai->mod[0].uCtx.tx = 0; //Tx sequence numbers
1332 ai->mod[0].uCtx.valid = 1; //Key is now valid
1333
1334 //Give key to mic seed
1335 emmh32_setseed(&ai->mod[0].uCtx.seed, mic_rid.unicast, sizeof(mic_rid.unicast), ai->tfm);
1336 }
1337 } else {
1338 /* So next time we have a valid key and mic is enabled, we will update
1339 * the sequence number if the key is the same as before.
1340 */
1341 ai->mod[0].uCtx.valid = 0; 1337 ai->mod[0].uCtx.valid = 0;
1342 ai->mod[0].mCtx.valid = 0; 1338 ai->mod[0].mCtx.valid = 0;
1339 return;
1340 }
1341
1342 if (mic_rid.multicastValid) {
1343 age_mic_context(&ai->mod[0].mCtx, &ai->mod[1].mCtx,
1344 mic_rid.multicast, sizeof(mic_rid.multicast),
1345 ai->tfm);
1346 }
1347
1348 if (mic_rid.unicastValid) {
1349 age_mic_context(&ai->mod[0].uCtx, &ai->mod[1].uCtx,
1350 mic_rid.unicast, sizeof(mic_rid.unicast),
1351 ai->tfm);
1343 } 1352 }
1344} 1353}
1345 1354
@@ -2730,28 +2739,6 @@ static void airo_networks_initialize(struct airo_info *ai)
2730 &ai->network_free_list); 2739 &ai->network_free_list);
2731} 2740}
2732 2741
2733static int airo_test_wpa_capable(struct airo_info *ai)
2734{
2735 int status;
2736 CapabilityRid cap_rid;
2737
2738 status = readCapabilityRid(ai, &cap_rid, 1);
2739 if (status != SUCCESS) return 0;
2740
2741 /* Only firmware versions 5.30.17 or better can do WPA */
2742 if (le16_to_cpu(cap_rid.softVer) > 0x530
2743 || (le16_to_cpu(cap_rid.softVer) == 0x530
2744 && le16_to_cpu(cap_rid.softSubVer) >= 17)) {
2745 airo_print_info("", "WPA is supported.");
2746 return 1;
2747 }
2748
2749 /* No WPA support */
2750 airo_print_info("", "WPA unsupported (only firmware versions 5.30.17"
2751 " and greater support WPA. Detected %s)", cap_rid.prodVer);
2752 return 0;
2753}
2754
2755static struct net_device *_init_airo_card( unsigned short irq, int port, 2742static struct net_device *_init_airo_card( unsigned short irq, int port,
2756 int is_pcmcia, struct pci_dev *pci, 2743 int is_pcmcia, struct pci_dev *pci,
2757 struct device *dmdev ) 2744 struct device *dmdev )
@@ -2759,6 +2746,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2759 struct net_device *dev; 2746 struct net_device *dev;
2760 struct airo_info *ai; 2747 struct airo_info *ai;
2761 int i, rc; 2748 int i, rc;
2749 CapabilityRid cap_rid;
2762 2750
2763 /* Create the network device object. */ 2751 /* Create the network device object. */
2764 dev = alloc_netdev(sizeof(*ai), "", ether_setup); 2752 dev = alloc_netdev(sizeof(*ai), "", ether_setup);
@@ -2828,7 +2816,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2828 } 2816 }
2829 2817
2830 if (probe) { 2818 if (probe) {
2831 if ( setup_card( ai, dev->dev_addr, 1 ) != SUCCESS ) { 2819 if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
2832 airo_print_err(dev->name, "MAC could not be enabled" ); 2820 airo_print_err(dev->name, "MAC could not be enabled" );
2833 rc = -EIO; 2821 rc = -EIO;
2834 goto err_out_map; 2822 goto err_out_map;
@@ -2838,28 +2826,50 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2838 set_bit(FLAG_FLASHING, &ai->flags); 2826 set_bit(FLAG_FLASHING, &ai->flags);
2839 } 2827 }
2840 2828
2829 strcpy(dev->name, "eth%d");
2830 rc = register_netdev(dev);
2831 if (rc) {
2832 airo_print_err(dev->name, "Couldn't register_netdev");
2833 goto err_out_map;
2834 }
2835 ai->wifidev = init_wifidev(ai, dev);
2836 if (!ai->wifidev)
2837 goto err_out_reg;
2838
2839 rc = readCapabilityRid(ai, &cap_rid, 1);
2840 if (rc != SUCCESS) {
2841 rc = -EIO;
2842 goto err_out_wifi;
2843 }
2844 /* WEP capability discovery */
2845 ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
2846 ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
2847
2848 airo_print_info(dev->name, "Firmware version %x.%x.%02x",
2849 ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
2850 (le16_to_cpu(cap_rid.softVer) & 0xFF),
2851 le16_to_cpu(cap_rid.softSubVer));
2852
2841 /* Test for WPA support */ 2853 /* Test for WPA support */
2842 if (airo_test_wpa_capable(ai)) { 2854 /* Only firmware versions 5.30.17 or better can do WPA */
2855 if (le16_to_cpu(cap_rid.softVer) > 0x530
2856 || (le16_to_cpu(cap_rid.softVer) == 0x530
2857 && le16_to_cpu(cap_rid.softSubVer) >= 17)) {
2858 airo_print_info(ai->dev->name, "WPA supported.");
2859
2843 set_bit(FLAG_WPA_CAPABLE, &ai->flags); 2860 set_bit(FLAG_WPA_CAPABLE, &ai->flags);
2844 ai->bssListFirst = RID_WPA_BSSLISTFIRST; 2861 ai->bssListFirst = RID_WPA_BSSLISTFIRST;
2845 ai->bssListNext = RID_WPA_BSSLISTNEXT; 2862 ai->bssListNext = RID_WPA_BSSLISTNEXT;
2846 ai->bssListRidLen = sizeof(BSSListRid); 2863 ai->bssListRidLen = sizeof(BSSListRid);
2847 } else { 2864 } else {
2865 airo_print_info(ai->dev->name, "WPA unsupported with firmware "
2866 "versions older than 5.30.17.");
2867
2848 ai->bssListFirst = RID_BSSLISTFIRST; 2868 ai->bssListFirst = RID_BSSLISTFIRST;
2849 ai->bssListNext = RID_BSSLISTNEXT; 2869 ai->bssListNext = RID_BSSLISTNEXT;
2850 ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); 2870 ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
2851 } 2871 }
2852 2872
2853 strcpy(dev->name, "eth%d");
2854 rc = register_netdev(dev);
2855 if (rc) {
2856 airo_print_err(dev->name, "Couldn't register_netdev");
2857 goto err_out_map;
2858 }
2859 ai->wifidev = init_wifidev(ai, dev);
2860 if (!ai->wifidev)
2861 goto err_out_reg;
2862
2863 set_bit(FLAG_REGISTERED,&ai->flags); 2873 set_bit(FLAG_REGISTERED,&ai->flags);
2864 airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr); 2874 airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
2865 2875
@@ -3127,314 +3137,354 @@ static int header_len(__le16 ctl)
3127 return 24; 3137 return 24;
3128} 3138}
3129 3139
3130static irqreturn_t airo_interrupt(int irq, void *dev_id) 3140static void airo_handle_cisco_mic(struct airo_info *ai)
3131{ 3141{
3132 struct net_device *dev = dev_id; 3142 if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) {
3143 set_bit(JOB_MIC, &ai->jobs);
3144 wake_up_interruptible(&ai->thr_wait);
3145 }
3146}
3147
3148/* Airo Status codes */
3149#define STAT_NOBEACON 0x8000 /* Loss of sync - missed beacons */
3150#define STAT_MAXRETRIES 0x8001 /* Loss of sync - max retries */
3151#define STAT_MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
3152#define STAT_FORCELOSS 0x8003 /* Loss of sync - host request */
3153#define STAT_TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
3154#define STAT_DEAUTH 0x8100 /* low byte is 802.11 reason code */
3155#define STAT_DISASSOC 0x8200 /* low byte is 802.11 reason code */
3156#define STAT_ASSOC_FAIL 0x8400 /* low byte is 802.11 reason code */
3157#define STAT_AUTH_FAIL 0x0300 /* low byte is 802.11 reason code */
3158#define STAT_ASSOC 0x0400 /* Associated */
3159#define STAT_REASSOC 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */
3160
3161static void airo_print_status(const char *devname, u16 status)
3162{
3163 u8 reason = status & 0xFF;
3164
3165 switch (status) {
3166 case STAT_NOBEACON:
3167 airo_print_dbg(devname, "link lost (missed beacons)");
3168 break;
3169 case STAT_MAXRETRIES:
3170 case STAT_MAXARL:
3171 airo_print_dbg(devname, "link lost (max retries)");
3172 break;
3173 case STAT_FORCELOSS:
3174 airo_print_dbg(devname, "link lost (local choice)");
3175 break;
3176 case STAT_TSFSYNC:
3177 airo_print_dbg(devname, "link lost (TSF sync lost)");
3178 break;
3179 case STAT_DEAUTH:
3180 airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
3181 break;
3182 case STAT_DISASSOC:
3183 airo_print_dbg(devname, "disassociated (reason: %d)", reason);
3184 break;
3185 case STAT_ASSOC_FAIL:
3186 airo_print_dbg(devname, "association failed (reason: %d)",
3187 reason);
3188 break;
3189 case STAT_AUTH_FAIL:
3190 airo_print_dbg(devname, "authentication failed (reason: %d)",
3191 reason);
3192 break;
3193 default:
3194 break;
3195 }
3196}
3197
3198static void airo_handle_link(struct airo_info *ai)
3199{
3200 union iwreq_data wrqu;
3201 int scan_forceloss = 0;
3133 u16 status; 3202 u16 status;
3134 u16 fid;
3135 struct airo_info *apriv = dev->ml_priv;
3136 u16 savedInterrupts = 0;
3137 int handled = 0;
3138 3203
3139 if (!netif_device_present(dev)) 3204 /* Get new status and acknowledge the link change */
3140 return IRQ_NONE; 3205 status = le16_to_cpu(IN4500(ai, LINKSTAT));
3206 OUT4500(ai, EVACK, EV_LINK);
3141 3207
3142 for (;;) { 3208 if ((status == STAT_FORCELOSS) && (ai->scan_timeout > 0))
3143 status = IN4500( apriv, EVSTAT ); 3209 scan_forceloss = 1;
3144 if ( !(status & STATUS_INTS) || status == 0xffff ) break;
3145 3210
3146 handled = 1; 3211 airo_print_status(ai->dev->name, status);
3147 3212
3148 if ( status & EV_AWAKE ) { 3213 if ((status == STAT_ASSOC) || (status == STAT_REASSOC)) {
3149 OUT4500( apriv, EVACK, EV_AWAKE ); 3214 if (auto_wep)
3150 OUT4500( apriv, EVACK, EV_AWAKE ); 3215 ai->expires = 0;
3151 } 3216 if (ai->list_bss_task)
3217 wake_up_process(ai->list_bss_task);
3218 set_bit(FLAG_UPDATE_UNI, &ai->flags);
3219 set_bit(FLAG_UPDATE_MULTI, &ai->flags);
3152 3220
3153 if (!savedInterrupts) { 3221 if (down_trylock(&ai->sem) != 0) {
3154 savedInterrupts = IN4500( apriv, EVINTEN ); 3222 set_bit(JOB_EVENT, &ai->jobs);
3155 OUT4500( apriv, EVINTEN, 0 ); 3223 wake_up_interruptible(&ai->thr_wait);
3224 } else
3225 airo_send_event(ai->dev);
3226 } else if (!scan_forceloss) {
3227 if (auto_wep && !ai->expires) {
3228 ai->expires = RUN_AT(3*HZ);
3229 wake_up_interruptible(&ai->thr_wait);
3156 } 3230 }
3157 3231
3158 if ( status & EV_MIC ) { 3232 /* Send event to user space */
3159 OUT4500( apriv, EVACK, EV_MIC ); 3233 memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
3160 if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { 3234 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
3161 set_bit(JOB_MIC, &apriv->jobs); 3235 wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
3162 wake_up_interruptible(&apriv->thr_wait); 3236 }
3163 } 3237}
3164 }
3165 if ( status & EV_LINK ) {
3166 union iwreq_data wrqu;
3167 int scan_forceloss = 0;
3168 /* The link status has changed, if you want to put a
3169 monitor hook in, do it here. (Remember that
3170 interrupts are still disabled!)
3171 */
3172 u16 newStatus = IN4500(apriv, LINKSTAT);
3173 OUT4500( apriv, EVACK, EV_LINK);
3174 /* Here is what newStatus means: */
3175#define NOBEACON 0x8000 /* Loss of sync - missed beacons */
3176#define MAXRETRIES 0x8001 /* Loss of sync - max retries */
3177#define MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
3178#define FORCELOSS 0x8003 /* Loss of sync - host request */
3179#define TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
3180#define DEAUTH 0x8100 /* Deauthentication (low byte is reason code) */
3181#define DISASS 0x8200 /* Disassociation (low byte is reason code) */
3182#define ASSFAIL 0x8400 /* Association failure (low byte is reason
3183 code) */
3184#define AUTHFAIL 0x0300 /* Authentication failure (low byte is reason
3185 code) */
3186#define ASSOCIATED 0x0400 /* Associated */
3187#define REASSOCIATED 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */
3188#define RC_RESERVED 0 /* Reserved return code */
3189#define RC_NOREASON 1 /* Unspecified reason */
3190#define RC_AUTHINV 2 /* Previous authentication invalid */
3191#define RC_DEAUTH 3 /* Deauthenticated because sending station is
3192 leaving */
3193#define RC_NOACT 4 /* Disassociated due to inactivity */
3194#define RC_MAXLOAD 5 /* Disassociated because AP is unable to handle
3195 all currently associated stations */
3196#define RC_BADCLASS2 6 /* Class 2 frame received from
3197 non-Authenticated station */
3198#define RC_BADCLASS3 7 /* Class 3 frame received from
3199 non-Associated station */
3200#define RC_STATLEAVE 8 /* Disassociated because sending station is
3201 leaving BSS */
3202#define RC_NOAUTH 9 /* Station requesting (Re)Association is not
3203 Authenticated with the responding station */
3204 if (newStatus == FORCELOSS && apriv->scan_timeout > 0)
3205 scan_forceloss = 1;
3206 if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) {
3207 if (auto_wep)
3208 apriv->expires = 0;
3209 if (apriv->list_bss_task)
3210 wake_up_process(apriv->list_bss_task);
3211 set_bit(FLAG_UPDATE_UNI, &apriv->flags);
3212 set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
3213
3214 if (down_trylock(&apriv->sem) != 0) {
3215 set_bit(JOB_EVENT, &apriv->jobs);
3216 wake_up_interruptible(&apriv->thr_wait);
3217 } else
3218 airo_send_event(dev);
3219 } else if (!scan_forceloss) {
3220 if (auto_wep && !apriv->expires) {
3221 apriv->expires = RUN_AT(3*HZ);
3222 wake_up_interruptible(&apriv->thr_wait);
3223 }
3224 3238
3225 /* Send event to user space */ 3239static void airo_handle_rx(struct airo_info *ai)
3226 memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); 3240{
3227 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 3241 struct sk_buff *skb = NULL;
3228 wireless_send_event(dev, SIOCGIWAP, &wrqu,NULL); 3242 __le16 fc, v, *buffer, tmpbuf[4];
3229 } 3243 u16 len, hdrlen = 0, gap, fid;
3230 } 3244 struct rx_hdr hdr;
3245 int success = 0;
3231 3246
3232 /* Check to see if there is something to receive */ 3247 if (test_bit(FLAG_MPI, &ai->flags)) {
3233 if ( status & EV_RX ) { 3248 if (test_bit(FLAG_802_11, &ai->flags))
3234 struct sk_buff *skb = NULL; 3249 mpi_receive_802_11(ai);
3235 __le16 fc, v; 3250 else
3236 u16 len, hdrlen = 0; 3251 mpi_receive_802_3(ai);
3237#pragma pack(1) 3252 OUT4500(ai, EVACK, EV_RX);
3238 struct { 3253 return;
3239 __le16 status, len; 3254 }
3240 u8 rssi[2];
3241 u8 rate;
3242 u8 freq;
3243 __le16 tmp[4];
3244 } hdr;
3245#pragma pack()
3246 u16 gap;
3247 __le16 tmpbuf[4];
3248 __le16 *buffer;
3249
3250 if (test_bit(FLAG_MPI,&apriv->flags)) {
3251 if (test_bit(FLAG_802_11, &apriv->flags))
3252 mpi_receive_802_11(apriv);
3253 else
3254 mpi_receive_802_3(apriv);
3255 OUT4500(apriv, EVACK, EV_RX);
3256 goto exitrx;
3257 }
3258 3255
3259 fid = IN4500( apriv, RXFID ); 3256 fid = IN4500(ai, RXFID);
3260
3261 /* Get the packet length */
3262 if (test_bit(FLAG_802_11, &apriv->flags)) {
3263 bap_setup (apriv, fid, 4, BAP0);
3264 bap_read (apriv, (__le16*)&hdr, sizeof(hdr), BAP0);
3265 /* Bad CRC. Ignore packet */
3266 if (le16_to_cpu(hdr.status) & 2)
3267 hdr.len = 0;
3268 if (apriv->wifidev == NULL)
3269 hdr.len = 0;
3270 } else {
3271 bap_setup (apriv, fid, 0x36, BAP0);
3272 bap_read (apriv, &hdr.len, 2, BAP0);
3273 }
3274 len = le16_to_cpu(hdr.len);
3275 3257
3276 if (len > AIRO_DEF_MTU) { 3258 /* Get the packet length */
3277 airo_print_err(apriv->dev->name, "Bad size %d", len); 3259 if (test_bit(FLAG_802_11, &ai->flags)) {
3278 goto badrx; 3260 bap_setup (ai, fid, 4, BAP0);
3279 } 3261 bap_read (ai, (__le16*)&hdr, sizeof(hdr), BAP0);
3280 if (len == 0) 3262 /* Bad CRC. Ignore packet */
3281 goto badrx; 3263 if (le16_to_cpu(hdr.status) & 2)
3264 hdr.len = 0;
3265 if (ai->wifidev == NULL)
3266 hdr.len = 0;
3267 } else {
3268 bap_setup(ai, fid, 0x36, BAP0);
3269 bap_read(ai, &hdr.len, 2, BAP0);
3270 }
3271 len = le16_to_cpu(hdr.len);
3282 3272
3283 if (test_bit(FLAG_802_11, &apriv->flags)) { 3273 if (len > AIRO_DEF_MTU) {
3284 bap_read (apriv, &fc, sizeof(fc), BAP0); 3274 airo_print_err(ai->dev->name, "Bad size %d", len);
3285 hdrlen = header_len(fc); 3275 goto done;
3286 } else 3276 }
3287 hdrlen = ETH_ALEN * 2; 3277 if (len == 0)
3278 goto done;
3288 3279
3289 skb = dev_alloc_skb( len + hdrlen + 2 + 2 ); 3280 if (test_bit(FLAG_802_11, &ai->flags)) {
3290 if ( !skb ) { 3281 bap_read(ai, &fc, sizeof (fc), BAP0);
3291 dev->stats.rx_dropped++; 3282 hdrlen = header_len(fc);
3292 goto badrx; 3283 } else
3293 } 3284 hdrlen = ETH_ALEN * 2;
3294 skb_reserve(skb, 2); /* This way the IP header is aligned */ 3285
3295 buffer = (__le16*)skb_put (skb, len + hdrlen); 3286 skb = dev_alloc_skb(len + hdrlen + 2 + 2);
3296 if (test_bit(FLAG_802_11, &apriv->flags)) { 3287 if (!skb) {
3297 buffer[0] = fc; 3288 ai->dev->stats.rx_dropped++;
3298 bap_read (apriv, buffer + 1, hdrlen - 2, BAP0); 3289 goto done;
3299 if (hdrlen == 24) 3290 }
3300 bap_read (apriv, tmpbuf, 6, BAP0); 3291
3301 3292 skb_reserve(skb, 2); /* This way the IP header is aligned */
3302 bap_read (apriv, &v, sizeof(v), BAP0); 3293 buffer = (__le16 *) skb_put(skb, len + hdrlen);
3303 gap = le16_to_cpu(v); 3294 if (test_bit(FLAG_802_11, &ai->flags)) {
3304 if (gap) { 3295 buffer[0] = fc;
3305 if (gap <= 8) { 3296 bap_read(ai, buffer + 1, hdrlen - 2, BAP0);
3306 bap_read (apriv, tmpbuf, gap, BAP0); 3297 if (hdrlen == 24)
3307 } else { 3298 bap_read(ai, tmpbuf, 6, BAP0);
3308 airo_print_err(apriv->dev->name, "gaplen too " 3299
3309 "big. Problems will follow..."); 3300 bap_read(ai, &v, sizeof(v), BAP0);
3310 } 3301 gap = le16_to_cpu(v);
3311 } 3302 if (gap) {
3312 bap_read (apriv, buffer + hdrlen/2, len, BAP0); 3303 if (gap <= 8) {
3304 bap_read(ai, tmpbuf, gap, BAP0);
3313 } else { 3305 } else {
3314 MICBuffer micbuf; 3306 airo_print_err(ai->dev->name, "gaplen too "
3315 bap_read (apriv, buffer, ETH_ALEN*2, BAP0); 3307 "big. Problems will follow...");
3316 if (apriv->micstats.enabled) { 3308 }
3317 bap_read (apriv,(__le16*)&micbuf,sizeof(micbuf),BAP0); 3309 }
3318 if (ntohs(micbuf.typelen) > 0x05DC) 3310 bap_read(ai, buffer + hdrlen/2, len, BAP0);
3319 bap_setup (apriv, fid, 0x44, BAP0); 3311 } else {
3320 else { 3312 MICBuffer micbuf;
3321 if (len <= sizeof(micbuf)) 3313
3322 goto badmic; 3314 bap_read(ai, buffer, ETH_ALEN * 2, BAP0);
3323 3315 if (ai->micstats.enabled) {
3324 len -= sizeof(micbuf); 3316 bap_read(ai, (__le16 *) &micbuf, sizeof (micbuf), BAP0);
3325 skb_trim (skb, len + hdrlen); 3317 if (ntohs(micbuf.typelen) > 0x05DC)
3326 } 3318 bap_setup(ai, fid, 0x44, BAP0);
3327 } 3319 else {
3328 bap_read(apriv,buffer+ETH_ALEN,len,BAP0); 3320 if (len <= sizeof (micbuf)) {
3329 if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { 3321 dev_kfree_skb_irq(skb);
3330badmic: 3322 goto done;
3331 dev_kfree_skb_irq (skb);
3332badrx:
3333 OUT4500( apriv, EVACK, EV_RX);
3334 goto exitrx;
3335 } 3323 }
3324
3325 len -= sizeof(micbuf);
3326 skb_trim(skb, len + hdrlen);
3336 } 3327 }
3328 }
3329
3330 bap_read(ai, buffer + ETH_ALEN, len, BAP0);
3331 if (decapsulate(ai, &micbuf, (etherHead*) buffer, len))
3332 dev_kfree_skb_irq (skb);
3333 else
3334 success = 1;
3335 }
3336
3337#ifdef WIRELESS_SPY 3337#ifdef WIRELESS_SPY
3338 if (apriv->spy_data.spy_number > 0) { 3338 if (success && (ai->spy_data.spy_number > 0)) {
3339 char *sa; 3339 char *sa;
3340 struct iw_quality wstats; 3340 struct iw_quality wstats;
3341 /* Prepare spy data : addr + qual */ 3341
3342 if (!test_bit(FLAG_802_11, &apriv->flags)) { 3342 /* Prepare spy data : addr + qual */
3343 sa = (char*)buffer + 6; 3343 if (!test_bit(FLAG_802_11, &ai->flags)) {
3344 bap_setup (apriv, fid, 8, BAP0); 3344 sa = (char *) buffer + 6;
3345 bap_read (apriv, (__le16*)hdr.rssi, 2, BAP0); 3345 bap_setup(ai, fid, 8, BAP0);
3346 } else 3346 bap_read(ai, (__le16 *) hdr.rssi, 2, BAP0);
3347 sa = (char*)buffer + 10; 3347 } else
3348 wstats.qual = hdr.rssi[0]; 3348 sa = (char *) buffer + 10;
3349 if (apriv->rssi) 3349 wstats.qual = hdr.rssi[0];
3350 wstats.level = 0x100 - apriv->rssi[hdr.rssi[1]].rssidBm; 3350 if (ai->rssi)
3351 else 3351 wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
3352 wstats.level = (hdr.rssi[1] + 321) / 2; 3352 else
3353 wstats.noise = apriv->wstats.qual.noise; 3353 wstats.level = (hdr.rssi[1] + 321) / 2;
3354 wstats.updated = IW_QUAL_LEVEL_UPDATED 3354 wstats.noise = ai->wstats.qual.noise;
3355 | IW_QUAL_QUAL_UPDATED 3355 wstats.updated = IW_QUAL_LEVEL_UPDATED
3356 | IW_QUAL_DBM; 3356 | IW_QUAL_QUAL_UPDATED
3357 /* Update spy records */ 3357 | IW_QUAL_DBM;
3358 wireless_spy_update(dev, sa, &wstats); 3358 /* Update spy records */
3359 } 3359 wireless_spy_update(ai->dev, sa, &wstats);
3360 }
3360#endif /* WIRELESS_SPY */ 3361#endif /* WIRELESS_SPY */
3361 OUT4500( apriv, EVACK, EV_RX);
3362 3362
3363 if (test_bit(FLAG_802_11, &apriv->flags)) { 3363done:
3364 skb_reset_mac_header(skb); 3364 OUT4500(ai, EVACK, EV_RX);
3365 skb->pkt_type = PACKET_OTHERHOST; 3365
3366 skb->dev = apriv->wifidev; 3366 if (success) {
3367 skb->protocol = htons(ETH_P_802_2); 3367 if (test_bit(FLAG_802_11, &ai->flags)) {
3368 } else 3368 skb_reset_mac_header(skb);
3369 skb->protocol = eth_type_trans(skb,dev); 3369 skb->pkt_type = PACKET_OTHERHOST;
3370 skb->ip_summed = CHECKSUM_NONE; 3370 skb->dev = ai->wifidev;
3371 skb->protocol = htons(ETH_P_802_2);
3372 } else
3373 skb->protocol = eth_type_trans(skb, ai->dev);
3374 skb->ip_summed = CHECKSUM_NONE;
3375
3376 netif_rx(skb);
3377 }
3378}
3379
3380static void airo_handle_tx(struct airo_info *ai, u16 status)
3381{
3382 int i, len = 0, index = -1;
3383 u16 fid;
3371 3384
3372 netif_rx( skb ); 3385 if (test_bit(FLAG_MPI, &ai->flags)) {
3386 unsigned long flags;
3387
3388 if (status & EV_TXEXC)
3389 get_tx_error(ai, -1);
3390
3391 spin_lock_irqsave(&ai->aux_lock, flags);
3392 if (!skb_queue_empty(&ai->txq)) {
3393 spin_unlock_irqrestore(&ai->aux_lock,flags);
3394 mpi_send_packet(ai->dev);
3395 } else {
3396 clear_bit(FLAG_PENDING_XMIT, &ai->flags);
3397 spin_unlock_irqrestore(&ai->aux_lock,flags);
3398 netif_wake_queue(ai->dev);
3373 } 3399 }
3374exitrx: 3400 OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
3401 return;
3402 }
3375 3403
3376 /* Check to see if a packet has been transmitted */ 3404 fid = IN4500(ai, TXCOMPLFID);
3377 if ( status & ( EV_TX|EV_TXCPY|EV_TXEXC ) ) {
3378 int i;
3379 int len = 0;
3380 int index = -1;
3381
3382 if (test_bit(FLAG_MPI,&apriv->flags)) {
3383 unsigned long flags;
3384
3385 if (status & EV_TXEXC)
3386 get_tx_error(apriv, -1);
3387 spin_lock_irqsave(&apriv->aux_lock, flags);
3388 if (!skb_queue_empty(&apriv->txq)) {
3389 spin_unlock_irqrestore(&apriv->aux_lock,flags);
3390 mpi_send_packet (dev);
3391 } else {
3392 clear_bit(FLAG_PENDING_XMIT, &apriv->flags);
3393 spin_unlock_irqrestore(&apriv->aux_lock,flags);
3394 netif_wake_queue (dev);
3395 }
3396 OUT4500( apriv, EVACK,
3397 status & (EV_TX|EV_TXCPY|EV_TXEXC));
3398 goto exittx;
3399 }
3400 3405
3401 fid = IN4500(apriv, TXCOMPLFID); 3406 for(i = 0; i < MAX_FIDS; i++) {
3407 if ((ai->fids[i] & 0xffff) == fid) {
3408 len = ai->fids[i] >> 16;
3409 index = i;
3410 }
3411 }
3402 3412
3403 for( i = 0; i < MAX_FIDS; i++ ) { 3413 if (index != -1) {
3404 if ( ( apriv->fids[i] & 0xffff ) == fid ) { 3414 if (status & EV_TXEXC)
3405 len = apriv->fids[i] >> 16; 3415 get_tx_error(ai, index);
3406 index = i; 3416
3407 } 3417 OUT4500(ai, EVACK, status & (EV_TX | EV_TXEXC));
3408 } 3418
3409 if (index != -1) { 3419 /* Set up to be used again */
3410 if (status & EV_TXEXC) 3420 ai->fids[index] &= 0xffff;
3411 get_tx_error(apriv, index); 3421 if (index < MAX_FIDS / 2) {
3412 OUT4500( apriv, EVACK, status & (EV_TX | EV_TXEXC)); 3422 if (!test_bit(FLAG_PENDING_XMIT, &ai->flags))
3413 /* Set up to be used again */ 3423 netif_wake_queue(ai->dev);
3414 apriv->fids[index] &= 0xffff; 3424 } else {
3415 if (index < MAX_FIDS / 2) { 3425 if (!test_bit(FLAG_PENDING_XMIT11, &ai->flags))
3416 if (!test_bit(FLAG_PENDING_XMIT, &apriv->flags)) 3426 netif_wake_queue(ai->wifidev);
3417 netif_wake_queue(dev); 3427 }
3418 } else { 3428 } else {
3419 if (!test_bit(FLAG_PENDING_XMIT11, &apriv->flags)) 3429 OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
3420 netif_wake_queue(apriv->wifidev); 3430 airo_print_err(ai->dev->name, "Unallocated FID was used to xmit");
3421 } 3431 }
3422 } else { 3432}
3423 OUT4500( apriv, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); 3433
3424 airo_print_err(apriv->dev->name, "Unallocated FID was " 3434static irqreturn_t airo_interrupt(int irq, void *dev_id)
3425 "used to xmit" ); 3435{
3426 } 3436 struct net_device *dev = dev_id;
3437 u16 status, savedInterrupts = 0;
3438 struct airo_info *ai = dev->ml_priv;
3439 int handled = 0;
3440
3441 if (!netif_device_present(dev))
3442 return IRQ_NONE;
3443
3444 for (;;) {
3445 status = IN4500(ai, EVSTAT);
3446 if (!(status & STATUS_INTS) || (status == 0xffff))
3447 break;
3448
3449 handled = 1;
3450
3451 if (status & EV_AWAKE) {
3452 OUT4500(ai, EVACK, EV_AWAKE);
3453 OUT4500(ai, EVACK, EV_AWAKE);
3454 }
3455
3456 if (!savedInterrupts) {
3457 savedInterrupts = IN4500(ai, EVINTEN);
3458 OUT4500(ai, EVINTEN, 0);
3459 }
3460
3461 if (status & EV_MIC) {
3462 OUT4500(ai, EVACK, EV_MIC);
3463 airo_handle_cisco_mic(ai);
3427 } 3464 }
3428exittx: 3465
3429 if ( status & ~STATUS_INTS & ~IGNORE_INTS ) 3466 if (status & EV_LINK) {
3430 airo_print_warn(apriv->dev->name, "Got weird status %x", 3467 /* Link status changed */
3468 airo_handle_link(ai);
3469 }
3470
3471 /* Check to see if there is something to receive */
3472 if (status & EV_RX)
3473 airo_handle_rx(ai);
3474
3475 /* Check to see if a packet has been transmitted */
3476 if (status & (EV_TX | EV_TXCPY | EV_TXEXC))
3477 airo_handle_tx(ai, status);
3478
3479 if ( status & ~STATUS_INTS & ~IGNORE_INTS ) {
3480 airo_print_warn(ai->dev->name, "Got weird status %x",
3431 status & ~STATUS_INTS & ~IGNORE_INTS ); 3481 status & ~STATUS_INTS & ~IGNORE_INTS );
3482 }
3432 } 3483 }
3433 3484
3434 if (savedInterrupts) 3485 if (savedInterrupts)
3435 OUT4500( apriv, EVINTEN, savedInterrupts ); 3486 OUT4500(ai, EVINTEN, savedInterrupts);
3436 3487
3437 /* done.. */
3438 return IRQ_RETVAL(handled); 3488 return IRQ_RETVAL(handled);
3439} 3489}
3440 3490
@@ -3613,18 +3663,10 @@ static void mpi_receive_802_11(struct airo_info *ai)
3613 struct sk_buff *skb = NULL; 3663 struct sk_buff *skb = NULL;
3614 u16 len, hdrlen = 0; 3664 u16 len, hdrlen = 0;
3615 __le16 fc; 3665 __le16 fc;
3616#pragma pack(1) 3666 struct rx_hdr hdr;
3617 struct {
3618 __le16 status, len;
3619 u8 rssi[2];
3620 u8 rate;
3621 u8 freq;
3622 __le16 tmp[4];
3623 } hdr;
3624#pragma pack()
3625 u16 gap; 3667 u16 gap;
3626 u16 *buffer; 3668 u16 *buffer;
3627 char *ptr = ai->rxfids[0].virtual_host_addr+4; 3669 char *ptr = ai->rxfids[0].virtual_host_addr + 4;
3628 3670
3629 memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); 3671 memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
3630 memcpy ((char *)&hdr, ptr, sizeof(hdr)); 3672 memcpy ((char *)&hdr, ptr, sizeof(hdr));
@@ -3691,6 +3733,7 @@ static void mpi_receive_802_11(struct airo_info *ai)
3691 skb->protocol = htons(ETH_P_802_2); 3733 skb->protocol = htons(ETH_P_802_2);
3692 skb->ip_summed = CHECKSUM_NONE; 3734 skb->ip_summed = CHECKSUM_NONE;
3693 netif_rx( skb ); 3735 netif_rx( skb );
3736
3694badrx: 3737badrx:
3695 if (rxd.valid == 0) { 3738 if (rxd.valid == 0) {
3696 rxd.valid = 1; 3739 rxd.valid = 1;
@@ -5131,55 +5174,98 @@ static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
5131 return rc; 5174 return rc;
5132} 5175}
5133 5176
5134/* Returns the length of the key at the index. If index == 0xffff 5177/* Returns the WEP key at the specified index, or -1 if that key does
5135 * the index of the transmit key is returned. If the key doesn't exist, 5178 * not exist. The buffer is assumed to be at least 16 bytes in length.
5136 * -1 will be returned.
5137 */ 5179 */
5138static int get_wep_key(struct airo_info *ai, u16 index) { 5180static int get_wep_key(struct airo_info *ai, u16 index, char *buf, u16 buflen)
5181{
5139 WepKeyRid wkr; 5182 WepKeyRid wkr;
5140 int rc; 5183 int rc;
5141 __le16 lastindex; 5184 __le16 lastindex;
5142 5185
5143 rc = readWepKeyRid(ai, &wkr, 1, 1); 5186 rc = readWepKeyRid(ai, &wkr, 1, 1);
5144 if (rc == SUCCESS) do { 5187 if (rc != SUCCESS)
5188 return -1;
5189 do {
5145 lastindex = wkr.kindex; 5190 lastindex = wkr.kindex;
5146 if (wkr.kindex == cpu_to_le16(index)) { 5191 if (le16_to_cpu(wkr.kindex) == index) {
5147 if (index == 0xffff) { 5192 int klen = min_t(int, buflen, le16_to_cpu(wkr.klen));
5148 return wkr.mac[0]; 5193 memcpy(buf, wkr.key, klen);
5149 } 5194 return klen;
5150 return le16_to_cpu(wkr.klen);
5151 } 5195 }
5152 readWepKeyRid(ai, &wkr, 0, 1); 5196 rc = readWepKeyRid(ai, &wkr, 0, 1);
5197 if (rc != SUCCESS)
5198 return -1;
5153 } while (lastindex != wkr.kindex); 5199 } while (lastindex != wkr.kindex);
5154 return -1; 5200 return -1;
5155} 5201}
5156 5202
5157static int set_wep_key(struct airo_info *ai, u16 index, 5203static int get_wep_tx_idx(struct airo_info *ai)
5158 const char *key, u16 keylen, int perm, int lock ) 5204{
5205 WepKeyRid wkr;
5206 int rc;
5207 __le16 lastindex;
5208
5209 rc = readWepKeyRid(ai, &wkr, 1, 1);
5210 if (rc != SUCCESS)
5211 return -1;
5212 do {
5213 lastindex = wkr.kindex;
5214 if (wkr.kindex == cpu_to_le16(0xffff))
5215 return wkr.mac[0];
5216 rc = readWepKeyRid(ai, &wkr, 0, 1);
5217 if (rc != SUCCESS)
5218 return -1;
5219 } while (lastindex != wkr.kindex);
5220 return -1;
5221}
5222
5223static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
5224 u16 keylen, int perm, int lock)
5159{ 5225{
5160 static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 5226 static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
5161 WepKeyRid wkr; 5227 WepKeyRid wkr;
5228 int rc;
5162 5229
5163 memset(&wkr, 0, sizeof(wkr));
5164 if (keylen == 0) { 5230 if (keylen == 0) {
5165// We are selecting which key to use 5231 airo_print_err(ai->dev->name, "%s: key length to set was zero",
5166 wkr.len = cpu_to_le16(sizeof(wkr)); 5232 __func__);
5167 wkr.kindex = cpu_to_le16(0xffff); 5233 return -1;
5168 wkr.mac[0] = (char)index;
5169 if (perm) ai->defindex = (char)index;
5170 } else {
5171// We are actually setting the key
5172 wkr.len = cpu_to_le16(sizeof(wkr));
5173 wkr.kindex = cpu_to_le16(index);
5174 wkr.klen = cpu_to_le16(keylen);
5175 memcpy( wkr.key, key, keylen );
5176 memcpy( wkr.mac, macaddr, ETH_ALEN );
5177 } 5234 }
5178 5235
5236 memset(&wkr, 0, sizeof(wkr));
5237 wkr.len = cpu_to_le16(sizeof(wkr));
5238 wkr.kindex = cpu_to_le16(index);
5239 wkr.klen = cpu_to_le16(keylen);
5240 memcpy(wkr.key, key, keylen);
5241 memcpy(wkr.mac, macaddr, ETH_ALEN);
5242
5179 if (perm) disable_MAC(ai, lock); 5243 if (perm) disable_MAC(ai, lock);
5180 writeWepKeyRid(ai, &wkr, perm, lock); 5244 rc = writeWepKeyRid(ai, &wkr, perm, lock);
5181 if (perm) enable_MAC(ai, lock); 5245 if (perm) enable_MAC(ai, lock);
5182 return 0; 5246 return rc;
5247}
5248
5249static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
5250{
5251 WepKeyRid wkr;
5252 int rc;
5253
5254 memset(&wkr, 0, sizeof(wkr));
5255 wkr.len = cpu_to_le16(sizeof(wkr));
5256 wkr.kindex = cpu_to_le16(0xffff);
5257 wkr.mac[0] = (char)index;
5258
5259 if (perm) {
5260 ai->defindex = (char)index;
5261 disable_MAC(ai, lock);
5262 }
5263
5264 rc = writeWepKeyRid(ai, &wkr, perm, lock);
5265
5266 if (perm)
5267 enable_MAC(ai, lock);
5268 return rc;
5183} 5269}
5184 5270
5185static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { 5271static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
@@ -5187,7 +5273,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5187 struct proc_dir_entry *dp = PDE(inode); 5273 struct proc_dir_entry *dp = PDE(inode);
5188 struct net_device *dev = dp->data; 5274 struct net_device *dev = dp->data;
5189 struct airo_info *ai = dev->ml_priv; 5275 struct airo_info *ai = dev->ml_priv;
5190 int i; 5276 int i, rc;
5191 char key[16]; 5277 char key[16];
5192 u16 index = 0; 5278 u16 index = 0;
5193 int j = 0; 5279 int j = 0;
@@ -5201,7 +5287,12 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5201 (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) { 5287 (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
5202 index = data->wbuffer[0] - '0'; 5288 index = data->wbuffer[0] - '0';
5203 if (data->wbuffer[1] == '\n') { 5289 if (data->wbuffer[1] == '\n') {
5204 set_wep_key(ai, index, NULL, 0, 1, 1); 5290 rc = set_wep_tx_idx(ai, index, 1, 1);
5291 if (rc < 0) {
5292 airo_print_err(ai->dev->name, "failed to set "
5293 "WEP transmit index to %d: %d.",
5294 index, rc);
5295 }
5205 return; 5296 return;
5206 } 5297 }
5207 j = 2; 5298 j = 2;
@@ -5220,7 +5311,12 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5220 break; 5311 break;
5221 } 5312 }
5222 } 5313 }
5223 set_wep_key(ai, index, key, i/3, 1, 1); 5314
5315 rc = set_wep_key(ai, index, key, i/3, 1, 1);
5316 if (rc < 0) {
5317 airo_print_err(ai->dev->name, "failed to set WEP key at index "
5318 "%d: %d.", index, rc);
5319 }
5224} 5320}
5225 5321
5226static int proc_wepkey_open( struct inode *inode, struct file *file ) 5322static int proc_wepkey_open( struct inode *inode, struct file *file )
@@ -5451,13 +5547,13 @@ static void timer_func( struct net_device *dev ) {
5451 break; 5547 break;
5452 case AUTH_SHAREDKEY: 5548 case AUTH_SHAREDKEY:
5453 if (apriv->keyindex < auto_wep) { 5549 if (apriv->keyindex < auto_wep) {
5454 set_wep_key(apriv, apriv->keyindex, NULL, 0, 0, 0); 5550 set_wep_tx_idx(apriv, apriv->keyindex, 0, 0);
5455 apriv->config.authType = AUTH_SHAREDKEY; 5551 apriv->config.authType = AUTH_SHAREDKEY;
5456 apriv->keyindex++; 5552 apriv->keyindex++;
5457 } else { 5553 } else {
5458 /* Drop to ENCRYPT */ 5554 /* Drop to ENCRYPT */
5459 apriv->keyindex = 0; 5555 apriv->keyindex = 0;
5460 set_wep_key(apriv, apriv->defindex, NULL, 0, 0, 0); 5556 set_wep_tx_idx(apriv, apriv->defindex, 0, 0);
5461 apriv->config.authType = AUTH_ENCRYPT; 5557 apriv->config.authType = AUTH_ENCRYPT;
5462 } 5558 }
5463 break; 5559 break;
@@ -5725,16 +5821,12 @@ static int airo_set_freq(struct net_device *dev,
5725 int rc = -EINPROGRESS; /* Call commit handler */ 5821 int rc = -EINPROGRESS; /* Call commit handler */
5726 5822
5727 /* If setting by frequency, convert to a channel */ 5823 /* If setting by frequency, convert to a channel */
5728 if((fwrq->e == 1) && 5824 if(fwrq->e == 1) {
5729 (fwrq->m >= (int) 2.412e8) &&
5730 (fwrq->m <= (int) 2.487e8)) {
5731 int f = fwrq->m / 100000; 5825 int f = fwrq->m / 100000;
5732 int c = 0; 5826
5733 while((c < 14) && (f != frequency_list[c]))
5734 c++;
5735 /* Hack to fall through... */ 5827 /* Hack to fall through... */
5736 fwrq->e = 0; 5828 fwrq->e = 0;
5737 fwrq->m = c + 1; 5829 fwrq->m = ieee80211_freq_to_dsss_chan(f);
5738 } 5830 }
5739 /* Setting by channel number */ 5831 /* Setting by channel number */
5740 if((fwrq->m > 1000) || (fwrq->e > 0)) 5832 if((fwrq->m > 1000) || (fwrq->e > 0))
@@ -5778,7 +5870,7 @@ static int airo_get_freq(struct net_device *dev,
5778 5870
5779 ch = le16_to_cpu(status_rid.channel); 5871 ch = le16_to_cpu(status_rid.channel);
5780 if((ch > 0) && (ch < 15)) { 5872 if((ch > 0) && (ch < 15)) {
5781 fwrq->m = frequency_list[ch - 1] * 100000; 5873 fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000;
5782 fwrq->e = 1; 5874 fwrq->e = 1;
5783 } else { 5875 } else {
5784 fwrq->m = ch; 5876 fwrq->m = ch;
@@ -6234,11 +6326,9 @@ static int airo_get_mode(struct net_device *dev,
6234 return 0; 6326 return 0;
6235} 6327}
6236 6328
6237static inline int valid_index(CapabilityRid *p, int index) 6329static inline int valid_index(struct airo_info *ai, int index)
6238{ 6330{
6239 if (index < 0) 6331 return (index >= 0) && (index <= ai->max_wep_idx);
6240 return 0;
6241 return index < (p->softCap & cpu_to_le16(0x80) ? 4 : 1);
6242} 6332}
6243 6333
6244/*------------------------------------------------------------------*/ 6334/*------------------------------------------------------------------*/
@@ -6251,16 +6341,13 @@ static int airo_set_encode(struct net_device *dev,
6251 char *extra) 6341 char *extra)
6252{ 6342{
6253 struct airo_info *local = dev->ml_priv; 6343 struct airo_info *local = dev->ml_priv;
6254 CapabilityRid cap_rid; /* Card capability info */ 6344 int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1);
6255 int perm = ( dwrq->flags & IW_ENCODE_TEMP ? 0 : 1 );
6256 __le16 currentAuthType = local->config.authType; 6345 __le16 currentAuthType = local->config.authType;
6346 int rc = 0;
6257 6347
6258 /* Is WEP supported ? */ 6348 if (!local->wep_capable)
6259 readCapabilityRid(local, &cap_rid, 1);
6260 /* Older firmware doesn't support this...
6261 if(!(cap_rid.softCap & cpu_to_le16(2))) {
6262 return -EOPNOTSUPP; 6349 return -EOPNOTSUPP;
6263 } */ 6350
6264 readConfigRid(local, 1); 6351 readConfigRid(local, 1);
6265 6352
6266 /* Basic checking: do we have a key to set ? 6353 /* Basic checking: do we have a key to set ?
@@ -6272,14 +6359,21 @@ static int airo_set_encode(struct net_device *dev,
6272 if (dwrq->length > 0) { 6359 if (dwrq->length > 0) {
6273 wep_key_t key; 6360 wep_key_t key;
6274 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 6361 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
6275 int current_index = get_wep_key(local, 0xffff); 6362 int current_index;
6363
6276 /* Check the size of the key */ 6364 /* Check the size of the key */
6277 if (dwrq->length > MAX_KEY_SIZE) { 6365 if (dwrq->length > MAX_KEY_SIZE) {
6278 return -EINVAL; 6366 return -EINVAL;
6279 } 6367 }
6368
6369 current_index = get_wep_tx_idx(local);
6370 if (current_index < 0)
6371 current_index = 0;
6372
6280 /* Check the index (none -> use current) */ 6373 /* Check the index (none -> use current) */
6281 if (!valid_index(&cap_rid, index)) 6374 if (!valid_index(local, index))
6282 index = current_index; 6375 index = current_index;
6376
6283 /* Set the length */ 6377 /* Set the length */
6284 if (dwrq->length > MIN_KEY_SIZE) 6378 if (dwrq->length > MIN_KEY_SIZE)
6285 key.len = MAX_KEY_SIZE; 6379 key.len = MAX_KEY_SIZE;
@@ -6296,7 +6390,13 @@ static int airo_set_encode(struct net_device *dev,
6296 /* Copy the key in the driver */ 6390 /* Copy the key in the driver */
6297 memcpy(key.key, extra, dwrq->length); 6391 memcpy(key.key, extra, dwrq->length);
6298 /* Send the key to the card */ 6392 /* Send the key to the card */
6299 set_wep_key(local, index, key.key, key.len, perm, 1); 6393 rc = set_wep_key(local, index, key.key, key.len, perm, 1);
6394 if (rc < 0) {
6395 airo_print_err(local->dev->name, "failed to set"
6396 " WEP key at index %d: %d.",
6397 index, rc);
6398 return rc;
6399 }
6300 } 6400 }
6301 /* WE specify that if a valid key is set, encryption 6401 /* WE specify that if a valid key is set, encryption
6302 * should be enabled (user may turn it off later) 6402 * should be enabled (user may turn it off later)
@@ -6308,12 +6408,19 @@ static int airo_set_encode(struct net_device *dev,
6308 } else { 6408 } else {
6309 /* Do we want to just set the transmit key index ? */ 6409 /* Do we want to just set the transmit key index ? */
6310 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 6410 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
6311 if (valid_index(&cap_rid, index)) { 6411 if (valid_index(local, index)) {
6312 set_wep_key(local, index, NULL, 0, perm, 1); 6412 rc = set_wep_tx_idx(local, index, perm, 1);
6313 } else 6413 if (rc < 0) {
6414 airo_print_err(local->dev->name, "failed to set"
6415 " WEP transmit index to %d: %d.",
6416 index, rc);
6417 return rc;
6418 }
6419 } else {
6314 /* Don't complain if only change the mode */ 6420 /* Don't complain if only change the mode */
6315 if (!(dwrq->flags & IW_ENCODE_MODE)) 6421 if (!(dwrq->flags & IW_ENCODE_MODE))
6316 return -EINVAL; 6422 return -EINVAL;
6423 }
6317 } 6424 }
6318 /* Read the flags */ 6425 /* Read the flags */
6319 if(dwrq->flags & IW_ENCODE_DISABLED) 6426 if(dwrq->flags & IW_ENCODE_DISABLED)
@@ -6339,14 +6446,13 @@ static int airo_get_encode(struct net_device *dev,
6339{ 6446{
6340 struct airo_info *local = dev->ml_priv; 6447 struct airo_info *local = dev->ml_priv;
6341 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 6448 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
6342 CapabilityRid cap_rid; /* Card capability info */ 6449 u8 buf[16];
6343 6450
6344 /* Is it supported ? */ 6451 if (!local->wep_capable)
6345 readCapabilityRid(local, &cap_rid, 1);
6346 if(!(cap_rid.softCap & cpu_to_le16(2))) {
6347 return -EOPNOTSUPP; 6452 return -EOPNOTSUPP;
6348 } 6453
6349 readConfigRid(local, 1); 6454 readConfigRid(local, 1);
6455
6350 /* Check encryption mode */ 6456 /* Check encryption mode */
6351 switch(local->config.authType) { 6457 switch(local->config.authType) {
6352 case AUTH_ENCRYPT: 6458 case AUTH_ENCRYPT:
@@ -6365,14 +6471,17 @@ static int airo_get_encode(struct net_device *dev,
6365 memset(extra, 0, 16); 6471 memset(extra, 0, 16);
6366 6472
6367 /* Which key do we want ? -1 -> tx index */ 6473 /* Which key do we want ? -1 -> tx index */
6368 if (!valid_index(&cap_rid, index)) 6474 if (!valid_index(local, index)) {
6369 index = get_wep_key(local, 0xffff); 6475 index = get_wep_tx_idx(local);
6476 if (index < 0)
6477 index = 0;
6478 }
6370 dwrq->flags |= index + 1; 6479 dwrq->flags |= index + 1;
6480
6371 /* Copy the key to the user buffer */ 6481 /* Copy the key to the user buffer */
6372 dwrq->length = get_wep_key(local, index); 6482 dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf));
6373 if (dwrq->length > 16) { 6483 memcpy(extra, buf, dwrq->length);
6374 dwrq->length=0; 6484
6375 }
6376 return 0; 6485 return 0;
6377} 6486}
6378 6487
@@ -6388,28 +6497,27 @@ static int airo_set_encodeext(struct net_device *dev,
6388 struct airo_info *local = dev->ml_priv; 6497 struct airo_info *local = dev->ml_priv;
6389 struct iw_point *encoding = &wrqu->encoding; 6498 struct iw_point *encoding = &wrqu->encoding;
6390 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6499 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6391 CapabilityRid cap_rid; /* Card capability info */
6392 int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 ); 6500 int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 );
6393 __le16 currentAuthType = local->config.authType; 6501 __le16 currentAuthType = local->config.authType;
6394 int idx, key_len, alg = ext->alg, set_key = 1; 6502 int idx, key_len, alg = ext->alg, set_key = 1, rc;
6395 wep_key_t key; 6503 wep_key_t key;
6396 6504
6397 /* Is WEP supported ? */ 6505 if (!local->wep_capable)
6398 readCapabilityRid(local, &cap_rid, 1);
6399 /* Older firmware doesn't support this...
6400 if(!(cap_rid.softCap & cpu_to_le16(2))) {
6401 return -EOPNOTSUPP; 6506 return -EOPNOTSUPP;
6402 } */ 6507
6403 readConfigRid(local, 1); 6508 readConfigRid(local, 1);
6404 6509
6405 /* Determine and validate the key index */ 6510 /* Determine and validate the key index */
6406 idx = encoding->flags & IW_ENCODE_INDEX; 6511 idx = encoding->flags & IW_ENCODE_INDEX;
6407 if (idx) { 6512 if (idx) {
6408 if (!valid_index(&cap_rid, idx - 1)) 6513 if (!valid_index(local, idx - 1))
6409 return -EINVAL; 6514 return -EINVAL;
6410 idx--; 6515 idx--;
6411 } else 6516 } else {
6412 idx = get_wep_key(local, 0xffff); 6517 idx = get_wep_tx_idx(local);
6518 if (idx < 0)
6519 idx = 0;
6520 }
6413 6521
6414 if (encoding->flags & IW_ENCODE_DISABLED) 6522 if (encoding->flags & IW_ENCODE_DISABLED)
6415 alg = IW_ENCODE_ALG_NONE; 6523 alg = IW_ENCODE_ALG_NONE;
@@ -6418,7 +6526,13 @@ static int airo_set_encodeext(struct net_device *dev,
6418 /* Only set transmit key index here, actual 6526 /* Only set transmit key index here, actual
6419 * key is set below if needed. 6527 * key is set below if needed.
6420 */ 6528 */
6421 set_wep_key(local, idx, NULL, 0, perm, 1); 6529 rc = set_wep_tx_idx(local, idx, perm, 1);
6530 if (rc < 0) {
6531 airo_print_err(local->dev->name, "failed to set "
6532 "WEP transmit index to %d: %d.",
6533 idx, rc);
6534 return rc;
6535 }
6422 set_key = ext->key_len > 0 ? 1 : 0; 6536 set_key = ext->key_len > 0 ? 1 : 0;
6423 } 6537 }
6424 6538
@@ -6444,7 +6558,12 @@ static int airo_set_encodeext(struct net_device *dev,
6444 return -EINVAL; 6558 return -EINVAL;
6445 } 6559 }
6446 /* Send the key to the card */ 6560 /* Send the key to the card */
6447 set_wep_key(local, idx, key.key, key.len, perm, 1); 6561 rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
6562 if (rc < 0) {
6563 airo_print_err(local->dev->name, "failed to set WEP key"
6564 " at index %d: %d.", idx, rc);
6565 return rc;
6566 }
6448 } 6567 }
6449 6568
6450 /* Read the flags */ 6569 /* Read the flags */
@@ -6474,14 +6593,12 @@ static int airo_get_encodeext(struct net_device *dev,
6474 struct airo_info *local = dev->ml_priv; 6593 struct airo_info *local = dev->ml_priv;
6475 struct iw_point *encoding = &wrqu->encoding; 6594 struct iw_point *encoding = &wrqu->encoding;
6476 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6595 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6477 CapabilityRid cap_rid; /* Card capability info */
6478 int idx, max_key_len; 6596 int idx, max_key_len;
6597 u8 buf[16];
6479 6598
6480 /* Is it supported ? */ 6599 if (!local->wep_capable)
6481 readCapabilityRid(local, &cap_rid, 1);
6482 if(!(cap_rid.softCap & cpu_to_le16(2))) {
6483 return -EOPNOTSUPP; 6600 return -EOPNOTSUPP;
6484 } 6601
6485 readConfigRid(local, 1); 6602 readConfigRid(local, 1);
6486 6603
6487 max_key_len = encoding->length - sizeof(*ext); 6604 max_key_len = encoding->length - sizeof(*ext);
@@ -6490,11 +6607,14 @@ static int airo_get_encodeext(struct net_device *dev,
6490 6607
6491 idx = encoding->flags & IW_ENCODE_INDEX; 6608 idx = encoding->flags & IW_ENCODE_INDEX;
6492 if (idx) { 6609 if (idx) {
6493 if (!valid_index(&cap_rid, idx - 1)) 6610 if (!valid_index(local, idx - 1))
6494 return -EINVAL; 6611 return -EINVAL;
6495 idx--; 6612 idx--;
6496 } else 6613 } else {
6497 idx = get_wep_key(local, 0xffff); 6614 idx = get_wep_tx_idx(local);
6615 if (idx < 0)
6616 idx = 0;
6617 }
6498 6618
6499 encoding->flags = idx + 1; 6619 encoding->flags = idx + 1;
6500 memset(ext, 0, sizeof(*ext)); 6620 memset(ext, 0, sizeof(*ext));
@@ -6517,10 +6637,8 @@ static int airo_get_encodeext(struct net_device *dev,
6517 memset(extra, 0, 16); 6637 memset(extra, 0, 16);
6518 6638
6519 /* Copy the key to the user buffer */ 6639 /* Copy the key to the user buffer */
6520 ext->key_len = get_wep_key(local, idx); 6640 ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
6521 if (ext->key_len > 16) { 6641 memcpy(extra, buf, ext->key_len);
6522 ext->key_len=0;
6523 }
6524 6642
6525 return 0; 6643 return 0;
6526} 6644}
@@ -6795,8 +6913,8 @@ static int airo_get_range(struct net_device *dev,
6795 k = 0; 6913 k = 0;
6796 for(i = 0; i < 14; i++) { 6914 for(i = 0; i < 14; i++) {
6797 range->freq[k].i = i + 1; /* List index */ 6915 range->freq[k].i = i + 1; /* List index */
6798 range->freq[k].m = frequency_list[i] * 100000; 6916 range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
6799 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ 6917 range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
6800 } 6918 }
6801 range->num_frequency = k; 6919 range->num_frequency = k;
6802 6920
@@ -7189,10 +7307,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
7189 /* Add frequency */ 7307 /* Add frequency */
7190 iwe.cmd = SIOCGIWFREQ; 7308 iwe.cmd = SIOCGIWFREQ;
7191 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 7309 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
7192 /* iwe.u.freq.m containt the channel (starting 1), our 7310 iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000;
7193 * frequency_list array start at index 0...
7194 */
7195 iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
7196 iwe.u.freq.e = 1; 7311 iwe.u.freq.e = 1;
7197 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 7312 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
7198 &iwe, IW_EV_FREQ_LEN); 7313 &iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 183ffc8e62c..b9af2b84c05 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -165,9 +165,6 @@
165#define AR5K_INI_VAL_XR 0 165#define AR5K_INI_VAL_XR 0
166#define AR5K_INI_VAL_MAX 5 166#define AR5K_INI_VAL_MAX 5
167 167
168#define AR5K_RF5111_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
169#define AR5K_RF5112_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
170
171/* Used for BSSID etc manipulation */ 168/* Used for BSSID etc manipulation */
172#define AR5K_LOW_ID(_a)( \ 169#define AR5K_LOW_ID(_a)( \
173(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \ 170(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
@@ -225,6 +222,7 @@
225#endif 222#endif
226 223
227/* Initial values */ 224/* Initial values */
225#define AR5K_INIT_CYCRSSI_THR1 2
228#define AR5K_INIT_TX_LATENCY 502 226#define AR5K_INIT_TX_LATENCY 502
229#define AR5K_INIT_USEC 39 227#define AR5K_INIT_USEC 39
230#define AR5K_INIT_USEC_TURBO 79 228#define AR5K_INIT_USEC_TURBO 79
@@ -316,7 +314,7 @@ struct ath5k_srev_name {
316#define AR5K_SREV_AR5424 0x90 /* Condor */ 314#define AR5K_SREV_AR5424 0x90 /* Condor */
317#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ 315#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
318#define AR5K_SREV_AR5414 0xa0 /* Eagle */ 316#define AR5K_SREV_AR5414 0xa0 /* Eagle */
319#define AR5K_SREV_AR2415 0xb0 /* Cobra */ 317#define AR5K_SREV_AR2415 0xb0 /* Talon */
320#define AR5K_SREV_AR5416 0xc0 /* PCI-E */ 318#define AR5K_SREV_AR5416 0xc0 /* PCI-E */
321#define AR5K_SREV_AR5418 0xca /* PCI-E */ 319#define AR5K_SREV_AR5418 0xca /* PCI-E */
322#define AR5K_SREV_AR2425 0xe0 /* Swan */ 320#define AR5K_SREV_AR2425 0xe0 /* Swan */
@@ -334,7 +332,7 @@ struct ath5k_srev_name {
334#define AR5K_SREV_RAD_2112B 0x46 332#define AR5K_SREV_RAD_2112B 0x46
335#define AR5K_SREV_RAD_2413 0x50 333#define AR5K_SREV_RAD_2413 0x50
336#define AR5K_SREV_RAD_5413 0x60 334#define AR5K_SREV_RAD_5413 0x60
337#define AR5K_SREV_RAD_2316 0x70 335#define AR5K_SREV_RAD_2316 0x70 /* Cobra SoC */
338#define AR5K_SREV_RAD_2317 0x80 336#define AR5K_SREV_RAD_2317 0x80
339#define AR5K_SREV_RAD_5424 0xa0 /* Mostly same as 5413 */ 337#define AR5K_SREV_RAD_5424 0xa0 /* Mostly same as 5413 */
340#define AR5K_SREV_RAD_2425 0xa2 338#define AR5K_SREV_RAD_2425 0xa2
@@ -342,7 +340,8 @@ struct ath5k_srev_name {
342 340
343#define AR5K_SREV_PHY_5211 0x30 341#define AR5K_SREV_PHY_5211 0x30
344#define AR5K_SREV_PHY_5212 0x41 342#define AR5K_SREV_PHY_5212 0x41
345#define AR5K_SREV_PHY_2112B 0x43 343#define AR5K_SREV_PHY_5212A 0x42
344#define AR5K_SREV_PHY_5212B 0x43
346#define AR5K_SREV_PHY_2413 0x45 345#define AR5K_SREV_PHY_2413 0x45
347#define AR5K_SREV_PHY_5413 0x61 346#define AR5K_SREV_PHY_5413 0x61
348#define AR5K_SREV_PHY_2425 0x70 347#define AR5K_SREV_PHY_2425 0x70
@@ -649,49 +648,21 @@ struct ath5k_beacon_state {
649 648
650enum ath5k_rfgain { 649enum ath5k_rfgain {
651 AR5K_RFGAIN_INACTIVE = 0, 650 AR5K_RFGAIN_INACTIVE = 0,
651 AR5K_RFGAIN_ACTIVE,
652 AR5K_RFGAIN_READ_REQUESTED, 652 AR5K_RFGAIN_READ_REQUESTED,
653 AR5K_RFGAIN_NEED_CHANGE, 653 AR5K_RFGAIN_NEED_CHANGE,
654}; 654};
655 655
656#define AR5K_GAIN_CRN_FIX_BITS_5111 4
657#define AR5K_GAIN_CRN_FIX_BITS_5112 7
658#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
659#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
660#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
661#define AR5K_GAIN_CCK_PROBE_CORR 5
662#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
663#define AR5K_GAIN_STEP_COUNT 10
664#define AR5K_GAIN_PARAM_TX_CLIP 0
665#define AR5K_GAIN_PARAM_PD_90 1
666#define AR5K_GAIN_PARAM_PD_84 2
667#define AR5K_GAIN_PARAM_GAIN_SEL 3
668#define AR5K_GAIN_PARAM_MIX_ORN 0
669#define AR5K_GAIN_PARAM_PD_138 1
670#define AR5K_GAIN_PARAM_PD_137 2
671#define AR5K_GAIN_PARAM_PD_136 3
672#define AR5K_GAIN_PARAM_PD_132 4
673#define AR5K_GAIN_PARAM_PD_131 5
674#define AR5K_GAIN_PARAM_PD_130 6
675#define AR5K_GAIN_CHECK_ADJUST(_g) \
676 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
677
678struct ath5k_gain_opt_step {
679 s16 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
680 s32 gos_gain;
681};
682
683struct ath5k_gain { 656struct ath5k_gain {
684 u32 g_step_idx; 657 u8 g_step_idx;
685 u32 g_current; 658 u8 g_current;
686 u32 g_target; 659 u8 g_target;
687 u32 g_low; 660 u8 g_low;
688 u32 g_high; 661 u8 g_high;
689 u32 g_f_corr; 662 u8 g_f_corr;
690 u32 g_active; 663 u8 g_state;
691 const struct ath5k_gain_opt_step *g_step;
692}; 664};
693 665
694
695/********************\ 666/********************\
696 COMMON DEFINITIONS 667 COMMON DEFINITIONS
697\********************/ 668\********************/
@@ -1053,7 +1024,6 @@ struct ath5k_hw {
1053 bool ah_running; 1024 bool ah_running;
1054 bool ah_single_chip; 1025 bool ah_single_chip;
1055 bool ah_combined_mic; 1026 bool ah_combined_mic;
1056 enum ath5k_rfgain ah_rf_gain;
1057 1027
1058 u32 ah_mac_srev; 1028 u32 ah_mac_srev;
1059 u16 ah_mac_version; 1029 u16 ah_mac_version;
@@ -1061,7 +1031,6 @@ struct ath5k_hw {
1061 u16 ah_phy_revision; 1031 u16 ah_phy_revision;
1062 u16 ah_radio_5ghz_revision; 1032 u16 ah_radio_5ghz_revision;
1063 u16 ah_radio_2ghz_revision; 1033 u16 ah_radio_2ghz_revision;
1064 u32 ah_phy_spending;
1065 1034
1066 enum ath5k_version ah_version; 1035 enum ath5k_version ah_version;
1067 enum ath5k_radio ah_radio; 1036 enum ath5k_radio ah_radio;
@@ -1112,8 +1081,9 @@ struct ath5k_hw {
1112 u32 ah_txq_isr; 1081 u32 ah_txq_isr;
1113 u32 *ah_rf_banks; 1082 u32 *ah_rf_banks;
1114 size_t ah_rf_banks_size; 1083 size_t ah_rf_banks_size;
1084 size_t ah_rf_regs_count;
1115 struct ath5k_gain ah_gain; 1085 struct ath5k_gain ah_gain;
1116 u32 ah_offset[AR5K_MAX_RF_BANKS]; 1086 u8 ah_offset[AR5K_MAX_RF_BANKS];
1117 1087
1118 struct { 1088 struct {
1119 u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE]; 1089 u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE];
@@ -1186,6 +1156,7 @@ extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_l
1186/* EEPROM access functions */ 1156/* EEPROM access functions */
1187extern int ath5k_eeprom_init(struct ath5k_hw *ah); 1157extern int ath5k_eeprom_init(struct ath5k_hw *ah);
1188extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1158extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1159extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1189 1160
1190/* Protocol Control Unit Functions */ 1161/* Protocol Control Unit Functions */
1191extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1162extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
@@ -1206,6 +1177,7 @@ extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1206/* Beacon control functions */ 1177/* Beacon control functions */
1207extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); 1178extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
1208extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1179extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1180extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1209extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1181extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1210extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); 1182extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1211#if 0 1183#if 0
@@ -1260,10 +1232,12 @@ extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1260extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1232extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1261 1233
1262/* Initialize RF */ 1234/* Initialize RF */
1263extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode); 1235extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
1264extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq); 1236 struct ieee80211_channel *channel,
1265extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah); 1237 unsigned int mode);
1266extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah); 1238extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
1239extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
1240extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1267/* PHY/RF channel functions */ 1241/* PHY/RF channel functions */
1268extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1242extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1269extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1243extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
@@ -1285,6 +1259,7 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power);
1285 1259
1286/* 1260/*
1287 * Translate usec to hw clock units 1261 * Translate usec to hw clock units
1262 * TODO: Half/quarter rate
1288 */ 1263 */
1289static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo) 1264static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1290{ 1265{
@@ -1293,6 +1268,7 @@ static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1293 1268
1294/* 1269/*
1295 * Translate hw clock units to usec 1270 * Translate hw clock units to usec
1271 * TODO: Half/quarter rate
1296 */ 1272 */
1297static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo) 1273static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1298{ 1274{
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
index dea378f7673..05bc5cb44e8 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -169,7 +169,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
169 ah->ah_single_chip = false; 169 ah->ah_single_chip = false;
170 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, 170 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
171 CHANNEL_2GHZ); 171 CHANNEL_2GHZ);
172 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
173 break; 172 break;
174 case AR5K_SREV_RAD_5112: 173 case AR5K_SREV_RAD_5112:
175 case AR5K_SREV_RAD_2112: 174 case AR5K_SREV_RAD_2112:
@@ -177,38 +176,31 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
177 ah->ah_single_chip = false; 176 ah->ah_single_chip = false;
178 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, 177 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
179 CHANNEL_2GHZ); 178 CHANNEL_2GHZ);
180 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
181 break; 179 break;
182 case AR5K_SREV_RAD_2413: 180 case AR5K_SREV_RAD_2413:
183 ah->ah_radio = AR5K_RF2413; 181 ah->ah_radio = AR5K_RF2413;
184 ah->ah_single_chip = true; 182 ah->ah_single_chip = true;
185 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
186 break; 183 break;
187 case AR5K_SREV_RAD_5413: 184 case AR5K_SREV_RAD_5413:
188 ah->ah_radio = AR5K_RF5413; 185 ah->ah_radio = AR5K_RF5413;
189 ah->ah_single_chip = true; 186 ah->ah_single_chip = true;
190 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
191 break; 187 break;
192 case AR5K_SREV_RAD_2316: 188 case AR5K_SREV_RAD_2316:
193 ah->ah_radio = AR5K_RF2316; 189 ah->ah_radio = AR5K_RF2316;
194 ah->ah_single_chip = true; 190 ah->ah_single_chip = true;
195 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
196 break; 191 break;
197 case AR5K_SREV_RAD_2317: 192 case AR5K_SREV_RAD_2317:
198 ah->ah_radio = AR5K_RF2317; 193 ah->ah_radio = AR5K_RF2317;
199 ah->ah_single_chip = true; 194 ah->ah_single_chip = true;
200 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2317;
201 break; 195 break;
202 case AR5K_SREV_RAD_5424: 196 case AR5K_SREV_RAD_5424:
203 if (ah->ah_mac_version == AR5K_SREV_AR2425 || 197 if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
204 ah->ah_mac_version == AR5K_SREV_AR2417){ 198 ah->ah_mac_version == AR5K_SREV_AR2417){
205 ah->ah_radio = AR5K_RF2425; 199 ah->ah_radio = AR5K_RF2425;
206 ah->ah_single_chip = true; 200 ah->ah_single_chip = true;
207 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
208 } else { 201 } else {
209 ah->ah_radio = AR5K_RF5413; 202 ah->ah_radio = AR5K_RF5413;
210 ah->ah_single_chip = true; 203 ah->ah_single_chip = true;
211 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
212 } 204 }
213 break; 205 break;
214 default: 206 default:
@@ -227,29 +219,25 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
227 ah->ah_radio = AR5K_RF2425; 219 ah->ah_radio = AR5K_RF2425;
228 ah->ah_single_chip = true; 220 ah->ah_single_chip = true;
229 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425; 221 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
230 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
231 } else if (srev == AR5K_SREV_AR5213A && 222 } else if (srev == AR5K_SREV_AR5213A &&
232 ah->ah_phy_revision == AR5K_SREV_PHY_2112B) { 223 ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
233 ah->ah_radio = AR5K_RF5112; 224 ah->ah_radio = AR5K_RF5112;
234 ah->ah_single_chip = false; 225 ah->ah_single_chip = false;
235 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2112B; 226 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
236 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) { 227 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
237 ah->ah_radio = AR5K_RF2316; 228 ah->ah_radio = AR5K_RF2316;
238 ah->ah_single_chip = true; 229 ah->ah_single_chip = true;
239 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316; 230 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
240 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
241 } else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) || 231 } else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
242 ah->ah_phy_revision == AR5K_SREV_PHY_5413) { 232 ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
243 ah->ah_radio = AR5K_RF5413; 233 ah->ah_radio = AR5K_RF5413;
244 ah->ah_single_chip = true; 234 ah->ah_single_chip = true;
245 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413; 235 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
246 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
247 } else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) || 236 } else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
248 ah->ah_phy_revision == AR5K_SREV_PHY_2413) { 237 ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
249 ah->ah_radio = AR5K_RF2413; 238 ah->ah_radio = AR5K_RF2413;
250 ah->ah_single_chip = true; 239 ah->ah_single_chip = true;
251 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413; 240 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
252 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
253 } else { 241 } else {
254 ATH5K_ERR(sc, "Couldn't identify radio revision.\n"); 242 ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
255 ret = -ENODEV; 243 ret = -ENODEV;
@@ -331,7 +319,7 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
331 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 319 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
332 ath5k_hw_set_opmode(ah); 320 ath5k_hw_set_opmode(ah);
333 321
334 ath5k_hw_set_rfgain_opt(ah); 322 ath5k_hw_rfgain_opt_init(ah);
335 323
336 return ah; 324 return ah;
337err_free: 325err_free:
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 1d77ee9d6e9..6837ca9f383 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -232,13 +232,14 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
232 int mc_count, struct dev_mc_list *mclist); 232 int mc_count, struct dev_mc_list *mclist);
233static int ath5k_set_key(struct ieee80211_hw *hw, 233static int ath5k_set_key(struct ieee80211_hw *hw,
234 enum set_key_cmd cmd, 234 enum set_key_cmd cmd,
235 const u8 *local_addr, const u8 *addr, 235 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
236 struct ieee80211_key_conf *key); 236 struct ieee80211_key_conf *key);
237static int ath5k_get_stats(struct ieee80211_hw *hw, 237static int ath5k_get_stats(struct ieee80211_hw *hw,
238 struct ieee80211_low_level_stats *stats); 238 struct ieee80211_low_level_stats *stats);
239static int ath5k_get_tx_stats(struct ieee80211_hw *hw, 239static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
240 struct ieee80211_tx_queue_stats *stats); 240 struct ieee80211_tx_queue_stats *stats);
241static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 241static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
242static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
242static void ath5k_reset_tsf(struct ieee80211_hw *hw); 243static void ath5k_reset_tsf(struct ieee80211_hw *hw);
243static int ath5k_beacon_update(struct ath5k_softc *sc, 244static int ath5k_beacon_update(struct ath5k_softc *sc,
244 struct sk_buff *skb); 245 struct sk_buff *skb);
@@ -261,6 +262,7 @@ static struct ieee80211_ops ath5k_hw_ops = {
261 .conf_tx = NULL, 262 .conf_tx = NULL,
262 .get_tx_stats = ath5k_get_tx_stats, 263 .get_tx_stats = ath5k_get_tx_stats,
263 .get_tsf = ath5k_get_tsf, 264 .get_tsf = ath5k_get_tsf,
265 .set_tsf = ath5k_set_tsf,
264 .reset_tsf = ath5k_reset_tsf, 266 .reset_tsf = ath5k_reset_tsf,
265 .bss_info_changed = ath5k_bss_info_changed, 267 .bss_info_changed = ath5k_bss_info_changed,
266}; 268};
@@ -308,6 +310,19 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
308 bf->skb = NULL; 310 bf->skb = NULL;
309} 311}
310 312
313static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
314 struct ath5k_buf *bf)
315{
316 BUG_ON(!bf);
317 if (!bf->skb)
318 return;
319 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
320 PCI_DMA_FROMDEVICE);
321 dev_kfree_skb_any(bf->skb);
322 bf->skb = NULL;
323}
324
325
311/* Queues setup */ 326/* Queues setup */
312static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, 327static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
313 int qtype, int subtype); 328 int qtype, int subtype);
@@ -347,9 +362,9 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
347} 362}
348 363
349/* Interrupt handling */ 364/* Interrupt handling */
350static int ath5k_init(struct ath5k_softc *sc, bool is_resume); 365static int ath5k_init(struct ath5k_softc *sc);
351static int ath5k_stop_locked(struct ath5k_softc *sc); 366static int ath5k_stop_locked(struct ath5k_softc *sc);
352static int ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend); 367static int ath5k_stop_hw(struct ath5k_softc *sc);
353static irqreturn_t ath5k_intr(int irq, void *dev_id); 368static irqreturn_t ath5k_intr(int irq, void *dev_id);
354static void ath5k_tasklet_reset(unsigned long data); 369static void ath5k_tasklet_reset(unsigned long data);
355 370
@@ -653,8 +668,6 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
653 668
654 ath5k_led_off(sc); 669 ath5k_led_off(sc);
655 670
656 ath5k_stop_hw(sc, true);
657
658 free_irq(pdev->irq, sc); 671 free_irq(pdev->irq, sc);
659 pci_save_state(pdev); 672 pci_save_state(pdev);
660 pci_disable_device(pdev); 673 pci_disable_device(pdev);
@@ -689,14 +702,9 @@ ath5k_pci_resume(struct pci_dev *pdev)
689 goto err_no_irq; 702 goto err_no_irq;
690 } 703 }
691 704
692 err = ath5k_init(sc, true);
693 if (err)
694 goto err_irq;
695 ath5k_led_enable(sc); 705 ath5k_led_enable(sc);
696
697 return 0; 706 return 0;
698err_irq: 707
699 free_irq(pdev->irq, sc);
700err_no_irq: 708err_no_irq:
701 pci_disable_device(pdev); 709 pci_disable_device(pdev);
702 return err; 710 return err;
@@ -1188,6 +1196,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1188 struct ieee80211_rate *rate; 1196 struct ieee80211_rate *rate;
1189 unsigned int mrr_rate[3], mrr_tries[3]; 1197 unsigned int mrr_rate[3], mrr_tries[3];
1190 int i, ret; 1198 int i, ret;
1199 u16 hw_rate;
1200 u16 cts_rate = 0;
1201 u16 duration = 0;
1202 u8 rc_flags;
1191 1203
1192 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 1204 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
1193 1205
@@ -1195,11 +1207,30 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1195 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 1207 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
1196 PCI_DMA_TODEVICE); 1208 PCI_DMA_TODEVICE);
1197 1209
1210 rate = ieee80211_get_tx_rate(sc->hw, info);
1211
1198 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1212 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1199 flags |= AR5K_TXDESC_NOACK; 1213 flags |= AR5K_TXDESC_NOACK;
1200 1214
1215 rc_flags = info->control.rates[0].flags;
1216 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
1217 rate->hw_value_short : rate->hw_value;
1218
1201 pktlen = skb->len; 1219 pktlen = skb->len;
1202 1220
1221 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1222 flags |= AR5K_TXDESC_RTSENA;
1223 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1224 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
1225 sc->vif, pktlen, info));
1226 }
1227 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1228 flags |= AR5K_TXDESC_CTSENA;
1229 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1230 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
1231 sc->vif, pktlen, info));
1232 }
1233
1203 if (info->control.hw_key) { 1234 if (info->control.hw_key) {
1204 keyidx = info->control.hw_key->hw_key_idx; 1235 keyidx = info->control.hw_key->hw_key_idx;
1205 pktlen += info->control.hw_key->icv_len; 1236 pktlen += info->control.hw_key->icv_len;
@@ -1207,8 +1238,9 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1207 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1238 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1208 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1239 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
1209 (sc->power_level * 2), 1240 (sc->power_level * 2),
1210 ieee80211_get_tx_rate(sc->hw, info)->hw_value, 1241 hw_rate,
1211 info->control.rates[0].count, keyidx, 0, flags, 0, 0); 1242 info->control.rates[0].count, keyidx, 0, flags,
1243 cts_rate, duration);
1212 if (ret) 1244 if (ret)
1213 goto err_unmap; 1245 goto err_unmap;
1214 1246
@@ -1324,7 +1356,7 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1324 list_for_each_entry(bf, &sc->txbuf, list) 1356 list_for_each_entry(bf, &sc->txbuf, list)
1325 ath5k_txbuf_free(sc, bf); 1357 ath5k_txbuf_free(sc, bf);
1326 list_for_each_entry(bf, &sc->rxbuf, list) 1358 list_for_each_entry(bf, &sc->rxbuf, list)
1327 ath5k_txbuf_free(sc, bf); 1359 ath5k_rxbuf_free(sc, bf);
1328 1360
1329 /* Free memory associated with all descriptors */ 1361 /* Free memory associated with all descriptors */
1330 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 1362 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
@@ -2177,10 +2209,6 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2177 * 2209 *
2178 * @sc: struct ath5k_softc pointer we are operating on 2210 * @sc: struct ath5k_softc pointer we are operating on
2179 * 2211 *
2180 * When operating in station mode we want to receive a BMISS interrupt when we
2181 * stop seeing beacons from the AP we've associated with so we can look for
2182 * another AP to associate with.
2183 *
2184 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2212 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2185 * interrupts to detect TSF updates only. 2213 * interrupts to detect TSF updates only.
2186 */ 2214 */
@@ -2193,9 +2221,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2193 sc->bmisscount = 0; 2221 sc->bmisscount = 0;
2194 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2222 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2195 2223
2196 if (sc->opmode == NL80211_IFTYPE_STATION) { 2224 if (sc->opmode == NL80211_IFTYPE_ADHOC ||
2197 sc->imask |= AR5K_INT_BMISS;
2198 } else if (sc->opmode == NL80211_IFTYPE_ADHOC ||
2199 sc->opmode == NL80211_IFTYPE_MESH_POINT || 2225 sc->opmode == NL80211_IFTYPE_MESH_POINT ||
2200 sc->opmode == NL80211_IFTYPE_AP) { 2226 sc->opmode == NL80211_IFTYPE_AP) {
2201 /* 2227 /*
@@ -2228,18 +2254,13 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2228\********************/ 2254\********************/
2229 2255
2230static int 2256static int
2231ath5k_init(struct ath5k_softc *sc, bool is_resume) 2257ath5k_init(struct ath5k_softc *sc)
2232{ 2258{
2233 struct ath5k_hw *ah = sc->ah; 2259 struct ath5k_hw *ah = sc->ah;
2234 int ret, i; 2260 int ret, i;
2235 2261
2236 mutex_lock(&sc->lock); 2262 mutex_lock(&sc->lock);
2237 2263
2238 if (is_resume && !test_bit(ATH_STAT_STARTED, sc->status))
2239 goto out_ok;
2240
2241 __clear_bit(ATH_STAT_STARTED, sc->status);
2242
2243 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); 2264 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2244 2265
2245 /* 2266 /*
@@ -2271,15 +2292,12 @@ ath5k_init(struct ath5k_softc *sc, bool is_resume)
2271 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) 2292 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2272 ath5k_hw_reset_key(ah, i); 2293 ath5k_hw_reset_key(ah, i);
2273 2294
2274 __set_bit(ATH_STAT_STARTED, sc->status);
2275
2276 /* Set ack to be sent at low bit-rates */ 2295 /* Set ack to be sent at low bit-rates */
2277 ath5k_hw_set_ack_bitrate_high(ah, false); 2296 ath5k_hw_set_ack_bitrate_high(ah, false);
2278 2297
2279 mod_timer(&sc->calib_tim, round_jiffies(jiffies + 2298 mod_timer(&sc->calib_tim, round_jiffies(jiffies +
2280 msecs_to_jiffies(ath5k_calinterval * 1000))); 2299 msecs_to_jiffies(ath5k_calinterval * 1000)));
2281 2300
2282out_ok:
2283 ret = 0; 2301 ret = 0;
2284done: 2302done:
2285 mmiowb(); 2303 mmiowb();
@@ -2334,7 +2352,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2334 * stop is preempted). 2352 * stop is preempted).
2335 */ 2353 */
2336static int 2354static int
2337ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend) 2355ath5k_stop_hw(struct ath5k_softc *sc)
2338{ 2356{
2339 int ret; 2357 int ret;
2340 2358
@@ -2365,8 +2383,6 @@ ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend)
2365 } 2383 }
2366 } 2384 }
2367 ath5k_txbuf_free(sc, sc->bbuf); 2385 ath5k_txbuf_free(sc, sc->bbuf);
2368 if (!is_suspend)
2369 __clear_bit(ATH_STAT_STARTED, sc->status);
2370 2386
2371 mmiowb(); 2387 mmiowb();
2372 mutex_unlock(&sc->lock); 2388 mutex_unlock(&sc->lock);
@@ -2457,6 +2473,7 @@ ath5k_intr(int irq, void *dev_id)
2457 | AR5K_INT_TXERR | AR5K_INT_TXEOL)) 2473 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2458 tasklet_schedule(&sc->txtq); 2474 tasklet_schedule(&sc->txtq);
2459 if (status & AR5K_INT_BMISS) { 2475 if (status & AR5K_INT_BMISS) {
2476 /* TODO */
2460 } 2477 }
2461 if (status & AR5K_INT_MIB) { 2478 if (status & AR5K_INT_MIB) {
2462 /* 2479 /*
@@ -2496,7 +2513,7 @@ ath5k_calibrate(unsigned long data)
2496 ieee80211_frequency_to_channel(sc->curchan->center_freq), 2513 ieee80211_frequency_to_channel(sc->curchan->center_freq),
2497 sc->curchan->hw_value); 2514 sc->curchan->hw_value);
2498 2515
2499 if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) { 2516 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2500 /* 2517 /*
2501 * Rfgain is out of bounds, reset the chip 2518 * Rfgain is out of bounds, reset the chip
2502 * to load new gain values. 2519 * to load new gain values.
@@ -2619,6 +2636,17 @@ ath5k_init_leds(struct ath5k_softc *sc)
2619 sc->led_pin = 1; 2636 sc->led_pin = 1;
2620 sc->led_on = 1; /* active high */ 2637 sc->led_on = 1; /* active high */
2621 } 2638 }
2639 /*
2640 * Pin 3 on Foxconn chips used in Acer Aspire One (0x105b:e008) and
2641 * in emachines notebooks with AMBIT subsystem.
2642 */
2643 if (pdev->subsystem_vendor == PCI_VENDOR_ID_FOXCONN ||
2644 pdev->subsystem_vendor == PCI_VENDOR_ID_AMBIT) {
2645 __set_bit(ATH_STAT_LEDSOFT, sc->status);
2646 sc->led_pin = 3;
2647 sc->led_on = 0; /* active low */
2648 }
2649
2622 if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) 2650 if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
2623 goto out; 2651 goto out;
2624 2652
@@ -2766,12 +2794,12 @@ ath5k_reset_wake(struct ath5k_softc *sc)
2766 2794
2767static int ath5k_start(struct ieee80211_hw *hw) 2795static int ath5k_start(struct ieee80211_hw *hw)
2768{ 2796{
2769 return ath5k_init(hw->priv, false); 2797 return ath5k_init(hw->priv);
2770} 2798}
2771 2799
2772static void ath5k_stop(struct ieee80211_hw *hw) 2800static void ath5k_stop(struct ieee80211_hw *hw)
2773{ 2801{
2774 ath5k_stop_hw(hw->priv, false); 2802 ath5k_stop_hw(hw->priv);
2775} 2803}
2776 2804
2777static int ath5k_add_interface(struct ieee80211_hw *hw, 2805static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -2856,7 +2884,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2856{ 2884{
2857 struct ath5k_softc *sc = hw->priv; 2885 struct ath5k_softc *sc = hw->priv;
2858 struct ath5k_hw *ah = sc->ah; 2886 struct ath5k_hw *ah = sc->ah;
2859 int ret; 2887 int ret = 0;
2860 2888
2861 mutex_lock(&sc->lock); 2889 mutex_lock(&sc->lock);
2862 if (sc->vif != vif) { 2890 if (sc->vif != vif) {
@@ -2882,9 +2910,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2882 } 2910 }
2883 ath5k_beacon_update(sc, beacon); 2911 ath5k_beacon_update(sc, beacon);
2884 } 2912 }
2885 mutex_unlock(&sc->lock);
2886 2913
2887 return ath5k_reset_wake(sc);
2888unlock: 2914unlock:
2889 mutex_unlock(&sc->lock); 2915 mutex_unlock(&sc->lock);
2890 return ret; 2916 return ret;
@@ -3020,8 +3046,8 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3020 3046
3021static int 3047static int
3022ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3048ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3023 const u8 *local_addr, const u8 *addr, 3049 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3024 struct ieee80211_key_conf *key) 3050 struct ieee80211_key_conf *key)
3025{ 3051{
3026 struct ath5k_softc *sc = hw->priv; 3052 struct ath5k_softc *sc = hw->priv;
3027 int ret = 0; 3053 int ret = 0;
@@ -3044,7 +3070,8 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3044 3070
3045 switch (cmd) { 3071 switch (cmd) {
3046 case SET_KEY: 3072 case SET_KEY:
3047 ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, addr); 3073 ret = ath5k_hw_set_key(sc->ah, key->keyidx, key,
3074 sta ? sta->addr : NULL);
3048 if (ret) { 3075 if (ret) {
3049 ATH5K_ERR(sc, "can't set the key\n"); 3076 ATH5K_ERR(sc, "can't set the key\n");
3050 goto unlock; 3077 goto unlock;
@@ -3104,6 +3131,14 @@ ath5k_get_tsf(struct ieee80211_hw *hw)
3104} 3131}
3105 3132
3106static void 3133static void
3134ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3135{
3136 struct ath5k_softc *sc = hw->priv;
3137
3138 ath5k_hw_set_tsf64(sc->ah, tsf);
3139}
3140
3141static void
3107ath5k_reset_tsf(struct ieee80211_hw *hw) 3142ath5k_reset_tsf(struct ieee80211_hw *hw)
3108{ 3143{
3109 struct ath5k_softc *sc = hw->priv; 3144 struct ath5k_softc *sc = hw->priv;
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index facc60ddada..c0fb8b5c42f 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -148,8 +148,7 @@ struct ath5k_softc {
148 u8 bssidmask[ETH_ALEN]; 148 u8 bssidmask[ETH_ALEN];
149 149
150 unsigned int led_pin, /* GPIO pin for driving LED */ 150 unsigned int led_pin, /* GPIO pin for driving LED */
151 led_on, /* pin setting for LED on */ 151 led_on; /* pin setting for LED on */
152 led_off; /* off time for current blink */
153 152
154 struct tasklet_struct restq; /* reset tasklet */ 153 struct tasklet_struct restq; /* reset tasklet */
155 154
diff --git a/drivers/net/wireless/ath5k/caps.c b/drivers/net/wireless/ath5k/caps.c
index 150f5ed204a..367a6c7d3cc 100644
--- a/drivers/net/wireless/ath5k/caps.c
+++ b/drivers/net/wireless/ath5k/caps.c
@@ -85,7 +85,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
85 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is 85 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
86 * connected */ 86 * connected */
87 if (AR5K_EEPROM_HDR_11B(ee_header) || 87 if (AR5K_EEPROM_HDR_11B(ee_header) ||
88 AR5K_EEPROM_HDR_11G(ee_header)) { 88 (AR5K_EEPROM_HDR_11G(ee_header) &&
89 ah->ah_version != AR5K_AR5211)) {
89 /* 2312 */ 90 /* 2312 */
90 ah->ah_capabilities.cap_range.range_2ghz_min = 2412; 91 ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
91 ah->ah_capabilities.cap_range.range_2ghz_max = 2732; 92 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
@@ -94,7 +95,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
94 __set_bit(AR5K_MODE_11B, 95 __set_bit(AR5K_MODE_11B,
95 ah->ah_capabilities.cap_mode); 96 ah->ah_capabilities.cap_mode);
96 97
97 if (AR5K_EEPROM_HDR_11G(ee_header)) 98 if (AR5K_EEPROM_HDR_11G(ee_header) &&
99 ah->ah_version != AR5K_AR5211)
98 __set_bit(AR5K_MODE_11G, 100 __set_bit(AR5K_MODE_11G,
99 ah->ah_capabilities.cap_mode); 101 ah->ah_capabilities.cap_mode);
100 } 102 }
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index ccaeb5c219d..413ed689cd5 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -165,7 +165,7 @@ static int reg_show(struct seq_file *seq, void *p)
165 return 0; 165 return 0;
166} 166}
167 167
168static struct seq_operations register_seq_ops = { 168static const struct seq_operations register_seq_ops = {
169 .start = reg_start, 169 .start = reg_start,
170 .next = reg_next, 170 .next = reg_next,
171 .stop = reg_stop, 171 .stop = reg_stop,
@@ -193,43 +193,6 @@ static const struct file_operations fops_registers = {
193}; 193};
194 194
195 195
196/* debugfs: TSF */
197
198static ssize_t read_file_tsf(struct file *file, char __user *user_buf,
199 size_t count, loff_t *ppos)
200{
201 struct ath5k_softc *sc = file->private_data;
202 char buf[100];
203 snprintf(buf, sizeof(buf), "0x%016llx\n",
204 (unsigned long long)ath5k_hw_get_tsf64(sc->ah));
205 return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
206}
207
208static ssize_t write_file_tsf(struct file *file,
209 const char __user *userbuf,
210 size_t count, loff_t *ppos)
211{
212 struct ath5k_softc *sc = file->private_data;
213 char buf[20];
214
215 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
216 return -EFAULT;
217
218 if (strncmp(buf, "reset", 5) == 0) {
219 ath5k_hw_reset_tsf(sc->ah);
220 printk(KERN_INFO "debugfs reset TSF\n");
221 }
222 return count;
223}
224
225static const struct file_operations fops_tsf = {
226 .read = read_file_tsf,
227 .write = write_file_tsf,
228 .open = ath5k_debugfs_open,
229 .owner = THIS_MODULE,
230};
231
232
233/* debugfs: beacons */ 196/* debugfs: beacons */
234 197
235static ssize_t read_file_beacon(struct file *file, char __user *user_buf, 198static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
@@ -423,9 +386,6 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
423 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO, 386 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO,
424 sc->debug.debugfs_phydir, sc, &fops_registers); 387 sc->debug.debugfs_phydir, sc, &fops_registers);
425 388
426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", S_IWUSR | S_IRUGO,
427 sc->debug.debugfs_phydir, sc, &fops_tsf);
428
429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO, 389 sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO,
430 sc->debug.debugfs_phydir, sc, &fops_beacon); 390 sc->debug.debugfs_phydir, sc, &fops_beacon);
431 391
@@ -444,7 +404,6 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
444{ 404{
445 debugfs_remove(sc->debug.debugfs_debug); 405 debugfs_remove(sc->debug.debugfs_debug);
446 debugfs_remove(sc->debug.debugfs_registers); 406 debugfs_remove(sc->debug.debugfs_registers);
447 debugfs_remove(sc->debug.debugfs_tsf);
448 debugfs_remove(sc->debug.debugfs_beacon); 407 debugfs_remove(sc->debug.debugfs_beacon);
449 debugfs_remove(sc->debug.debugfs_reset); 408 debugfs_remove(sc->debug.debugfs_reset);
450 debugfs_remove(sc->debug.debugfs_phydir); 409 debugfs_remove(sc->debug.debugfs_phydir);
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath5k/debug.h
index ffc52939330..66f69f04e55 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath5k/debug.h
@@ -72,7 +72,6 @@ struct ath5k_dbg_info {
72 struct dentry *debugfs_phydir; 72 struct dentry *debugfs_phydir;
73 struct dentry *debugfs_debug; 73 struct dentry *debugfs_debug;
74 struct dentry *debugfs_registers; 74 struct dentry *debugfs_registers;
75 struct dentry *debugfs_tsf;
76 struct dentry *debugfs_beacon; 75 struct dentry *debugfs_beacon;
77 struct dentry *debugfs_reset; 76 struct dentry *debugfs_reset;
78}; 77};
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
index 1cb7edfae62..a54ee7e4967 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -137,6 +137,18 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
137 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { 137 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
138 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0); 138 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
139 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1); 139 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
140
141 /* XXX: Don't know which versions include these two */
142 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2);
143
144 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3)
145 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3);
146
147 if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) {
148 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4);
149 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5);
150 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6);
151 }
140 } 152 }
141 153
142 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) { 154 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
@@ -192,7 +204,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
192 204
193 /* Get antenna modes */ 205 /* Get antenna modes */
194 ah->ah_antenna[mode][0] = 206 ah->ah_antenna[mode][0] =
195 (ee->ee_ant_control[mode][0] << 4) | 0x1; 207 (ee->ee_ant_control[mode][0] << 4);
196 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] = 208 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
197 ee->ee_ant_control[mode][1] | 209 ee->ee_ant_control[mode][1] |
198 (ee->ee_ant_control[mode][2] << 6) | 210 (ee->ee_ant_control[mode][2] << 6) |
@@ -213,7 +225,8 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
213} 225}
214 226
215/* 227/*
216 * Read supported modes from eeprom 228 * Read supported modes and some mode-specific calibration data
229 * from eeprom
217 */ 230 */
218static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, 231static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
219 unsigned int mode) 232 unsigned int mode)
@@ -315,6 +328,9 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
315 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0) 328 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0)
316 goto done; 329 goto done;
317 330
331 /* Note: >= v5 have bg freq piers on another location
332 * so these freq piers are ignored for >= v5 (should be 0xff
333 * anyway) */
318 switch(mode) { 334 switch(mode) {
319 case AR5K_EEPROM_MODE_11A: 335 case AR5K_EEPROM_MODE_11A:
320 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1) 336 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1)
@@ -442,7 +458,7 @@ ath5k_eeprom_read_turbo_modes(struct ath5k_hw *ah,
442 return 0; 458 return 0;
443} 459}
444 460
445 461/* Read mode-specific data (except power calibration data) */
446static int 462static int
447ath5k_eeprom_init_modes(struct ath5k_hw *ah) 463ath5k_eeprom_init_modes(struct ath5k_hw *ah)
448{ 464{
@@ -488,12 +504,22 @@ ath5k_eeprom_init_modes(struct ath5k_hw *ah)
488 return 0; 504 return 0;
489} 505}
490 506
507/* Used to match PCDAC steps with power values on RF5111 chips
508 * (eeprom versions < 4). For RF5111 we have 10 pre-defined PCDAC
509 * steps that match with the power values we read from eeprom. On
510 * older eeprom versions (< 3.2) these steps are equaly spaced at
511 * 10% of the pcdac curve -until the curve reaches it's maximum-
512 * (10 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
513 * these 10 steps are spaced in a different way. This function returns
514 * the pcdac steps based on eeprom version and curve min/max so that we
515 * can have pcdac/pwr points.
516 */
491static inline void 517static inline void
492ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) 518ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
493{ 519{
494 const static u16 intercepts3[] = 520 static const u16 intercepts3[] =
495 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; 521 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
496 const static u16 intercepts3_2[] = 522 static const u16 intercepts3_2[] =
497 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; 523 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
498 const u16 *ip; 524 const u16 *ip;
499 int i; 525 int i;
@@ -507,37 +533,48 @@ ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
507 *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100; 533 *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100;
508} 534}
509 535
536/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
537 * frequency mask) */
510static inline int 538static inline int
511ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, 539ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
512 struct ath5k_chan_pcal_info *pc, u8 *count) 540 struct ath5k_chan_pcal_info *pc, unsigned int mode)
513{ 541{
542 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
514 int o = *offset; 543 int o = *offset;
515 int i = 0; 544 int i = 0;
516 u8 f1, f2; 545 u8 freq1, freq2;
517 int ret; 546 int ret;
518 u16 val; 547 u16 val;
519 548
520 while(i < max) { 549 while(i < max) {
521 AR5K_EEPROM_READ(o++, val); 550 AR5K_EEPROM_READ(o++, val);
522 551
523 f1 = (val >> 8) & 0xff; 552 freq1 = (val >> 8) & 0xff;
524 f2 = val & 0xff; 553 freq2 = val & 0xff;
525 554
526 if (f1) 555 if (freq1) {
527 pc[i++].freq = f1; 556 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
557 freq1, mode);
558 ee->ee_n_piers[mode]++;
559 }
528 560
529 if (f2) 561 if (freq2) {
530 pc[i++].freq = f2; 562 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
563 freq2, mode);
564 ee->ee_n_piers[mode]++;
565 }
531 566
532 if (!f1 || !f2) 567 if (!freq1 || !freq2)
533 break; 568 break;
534 } 569 }
570
571 /* return new offset */
535 *offset = o; 572 *offset = o;
536 *count = i;
537 573
538 return 0; 574 return 0;
539} 575}
540 576
577/* Read frequency piers for 802.11a */
541static int 578static int
542ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) 579ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
543{ 580{
@@ -550,7 +587,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
550 if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { 587 if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
551 ath5k_eeprom_read_freq_list(ah, &offset, 588 ath5k_eeprom_read_freq_list(ah, &offset,
552 AR5K_EEPROM_N_5GHZ_CHAN, pcal, 589 AR5K_EEPROM_N_5GHZ_CHAN, pcal,
553 &ee->ee_n_piers[AR5K_EEPROM_MODE_11A]); 590 AR5K_EEPROM_MODE_11A);
554 } else { 591 } else {
555 mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version); 592 mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version);
556 593
@@ -577,23 +614,25 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
577 614
578 AR5K_EEPROM_READ(offset++, val); 615 AR5K_EEPROM_READ(offset++, val);
579 pcal[9].freq |= (val >> 10) & 0x3f; 616 pcal[9].freq |= (val >> 10) & 0x3f;
617
618 /* Fixed number of piers */
580 ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10; 619 ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10;
581 }
582 620
583 for(i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i += 1) { 621 for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) {
584 pcal[i].freq = ath5k_eeprom_bin2freq(ee, 622 pcal[i].freq = ath5k_eeprom_bin2freq(ee,
585 pcal[i].freq, AR5K_EEPROM_MODE_11A); 623 pcal[i].freq, AR5K_EEPROM_MODE_11A);
624 }
586 } 625 }
587 626
588 return 0; 627 return 0;
589} 628}
590 629
630/* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */
591static inline int 631static inline int
592ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset) 632ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
593{ 633{
594 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 634 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
595 struct ath5k_chan_pcal_info *pcal; 635 struct ath5k_chan_pcal_info *pcal;
596 int i;
597 636
598 switch(mode) { 637 switch(mode) {
599 case AR5K_EEPROM_MODE_11B: 638 case AR5K_EEPROM_MODE_11B:
@@ -608,23 +647,25 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
608 647
609 ath5k_eeprom_read_freq_list(ah, &offset, 648 ath5k_eeprom_read_freq_list(ah, &offset,
610 AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal, 649 AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal,
611 &ee->ee_n_piers[mode]); 650 mode);
612 for(i = 0; i < AR5K_EEPROM_N_2GHZ_CHAN_2413; i += 1) {
613 pcal[i].freq = ath5k_eeprom_bin2freq(ee,
614 pcal[i].freq, mode);
615 }
616 651
617 return 0; 652 return 0;
618} 653}
619 654
620 655/* Read power calibration for RF5111 chips
656 * For RF5111 we have an XPD -eXternal Power Detector- curve
657 * for each calibrated channel. Each curve has PCDAC steps on
658 * x axis and power on y axis and looks like a logarithmic
659 * function. To recreate the curve and pass the power values
660 * on the pcdac table, we read 10 points here and interpolate later.
661 */
621static int 662static int
622ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) 663ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
623{ 664{
624 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 665 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
625 struct ath5k_chan_pcal_info *pcal; 666 struct ath5k_chan_pcal_info *pcal;
626 int offset, ret; 667 int offset, ret;
627 int i, j; 668 int i;
628 u16 val; 669 u16 val;
629 670
630 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); 671 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
@@ -704,16 +745,22 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
704 745
705 ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min, 746 ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min,
706 cdata->pcdac_max, cdata->pcdac); 747 cdata->pcdac_max, cdata->pcdac);
707
708 for (j = 0; j < AR5K_EEPROM_N_PCDAC; j++) {
709 cdata->pwr[j] = (u16)
710 (AR5K_EEPROM_POWER_STEP * cdata->pwr[j]);
711 }
712 } 748 }
713 749
714 return 0; 750 return 0;
715} 751}
716 752
753/* Read power calibration for RF5112 chips
754 * For RF5112 we have 4 XPD -eXternal Power Detector- curves
755 * for each calibrated channel on 0, -6, -12 and -18dbm but we only
756 * use the higher (3) and the lower (0) curves. Each curve has PCDAC
757 * steps on x axis and power on y axis and looks like a linear
758 * function. To recreate the curve and pass the power values
759 * on the pcdac table, we read 4 points for xpd 0 and 3 points
760 * for xpd 3 here and interpolate later.
761 *
762 * Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
763 */
717static int 764static int
718ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) 765ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
719{ 766{
@@ -790,7 +837,7 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
790 837
791 /* PCDAC steps 838 /* PCDAC steps
792 * corresponding to the above power 839 * corresponding to the above power
793 * measurements (static) */ 840 * measurements (fixed) */
794 chan_pcal_info->pcdac_x3[0] = 20; 841 chan_pcal_info->pcdac_x3[0] = 20;
795 chan_pcal_info->pcdac_x3[1] = 35; 842 chan_pcal_info->pcdac_x3[1] = 35;
796 chan_pcal_info->pcdac_x3[2] = 63; 843 chan_pcal_info->pcdac_x3[2] = 63;
@@ -814,6 +861,13 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
814 return 0; 861 return 0;
815} 862}
816 863
864/* For RF2413 power calibration data doesn't start on a fixed location and
865 * if a mode is not supported, it's section is missing -not zeroed-.
866 * So we need to calculate the starting offset for each section by using
867 * these two functions */
868
869/* Return the size of each section based on the mode and the number of pd
870 * gains available (maximum 4). */
817static inline unsigned int 871static inline unsigned int
818ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode) 872ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode)
819{ 873{
@@ -826,6 +880,8 @@ ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode)
826 return sz; 880 return sz;
827} 881}
828 882
883/* Return the starting offset for a section based on the modes supported
884 * and each section's size. */
829static unsigned int 885static unsigned int
830ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) 886ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
831{ 887{
@@ -834,11 +890,13 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
834 switch(mode) { 890 switch(mode) {
835 case AR5K_EEPROM_MODE_11G: 891 case AR5K_EEPROM_MODE_11G:
836 if (AR5K_EEPROM_HDR_11B(ee->ee_header)) 892 if (AR5K_EEPROM_HDR_11B(ee->ee_header))
837 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + 2; 893 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) +
894 AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
838 /* fall through */ 895 /* fall through */
839 case AR5K_EEPROM_MODE_11B: 896 case AR5K_EEPROM_MODE_11B:
840 if (AR5K_EEPROM_HDR_11A(ee->ee_header)) 897 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
841 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + 5; 898 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) +
899 AR5K_EEPROM_N_5GHZ_CHAN / 2;
842 /* fall through */ 900 /* fall through */
843 case AR5K_EEPROM_MODE_11A: 901 case AR5K_EEPROM_MODE_11A:
844 break; 902 break;
@@ -849,6 +907,17 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
849 return offset; 907 return offset;
850} 908}
851 909
910/* Read power calibration for RF2413 chips
911 * For RF2413 we have a PDDAC table (Power Detector) instead
912 * of a PCDAC and 4 pd gain curves for each calibrated channel.
913 * Each curve has PDDAC steps on x axis and power on y axis and
914 * looks like an exponential function. To recreate the curves
915 * we read here the points and interpolate later. Note that
916 * in most cases only higher and lower curves are used (like
917 * RF5112) but vendors have the oportunity to include all 4
918 * curves on eeprom. The final curve (higher power) has an extra
919 * point for better accuracy like RF5112.
920 */
852static int 921static int
853ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) 922ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
854{ 923{
@@ -868,6 +937,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
868 ee->ee_pd_gains[mode] = pd_gains; 937 ee->ee_pd_gains[mode] = pd_gains;
869 938
870 offset = ath5k_cal_data_offset_2413(ee, mode); 939 offset = ath5k_cal_data_offset_2413(ee, mode);
940 ee->ee_n_piers[mode] = 0;
871 switch (mode) { 941 switch (mode) {
872 case AR5K_EEPROM_MODE_11A: 942 case AR5K_EEPROM_MODE_11A:
873 if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) 943 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
@@ -1163,6 +1233,20 @@ static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned
1163 return 0; 1233 return 0;
1164} 1234}
1165 1235
1236/*
1237 * Read per channel calibration info from EEPROM
1238 *
1239 * This info is used to calibrate the baseband power table. Imagine
1240 * that for each channel there is a power curve that's hw specific
1241 * (depends on amplifier etc) and we try to "correct" this curve using
1242 * offests we pass on to phy chip (baseband -> before amplifier) so that
1243 * it can use accurate power values when setting tx power (takes amplifier's
1244 * performance on each channel into account).
1245 *
1246 * EEPROM provides us with the offsets for some pre-calibrated channels
1247 * and we have to interpolate to create the full table for these channels and
1248 * also the table for any channel.
1249 */
1166static int 1250static int
1167ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) 1251ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1168{ 1252{
@@ -1193,7 +1277,7 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1193 return 0; 1277 return 0;
1194} 1278}
1195 1279
1196/* Read conformance test limits */ 1280/* Read conformance test limits used for regulatory control */
1197static int 1281static int
1198ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) 1282ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
1199{ 1283{
@@ -1328,6 +1412,7 @@ ath5k_eeprom_init(struct ath5k_hw *ah)
1328 1412
1329 return 0; 1413 return 0;
1330} 1414}
1415
1331/* 1416/*
1332 * Read the MAC address from eeprom 1417 * Read the MAC address from eeprom
1333 */ 1418 */
@@ -1364,3 +1449,14 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1364 return 0; 1449 return 0;
1365} 1450}
1366 1451
1452bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah)
1453{
1454 u16 data;
1455
1456 ath5k_hw_eeprom_read(ah, AR5K_EEPROM_IS_HB63, &data);
1457
1458 if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && data)
1459 return true;
1460 else
1461 return false;
1462}
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
index 09eb7d0176a..1deebc0257d 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -25,6 +25,7 @@
25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */ 25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */ 26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
27 27
28#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
28#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ 29#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
29#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */ 30#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
30#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ 31#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
diff --git a/drivers/net/wireless/ath5k/gpio.c b/drivers/net/wireless/ath5k/gpio.c
index b77205adc18..64a27e73d02 100644
--- a/drivers/net/wireless/ath5k/gpio.c
+++ b/drivers/net/wireless/ath5k/gpio.c
@@ -83,7 +83,7 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
83int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) 83int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
84{ 84{
85 ATH5K_TRACE(ah->ah_sc); 85 ATH5K_TRACE(ah->ah_sc);
86 if (gpio > AR5K_NUM_GPIO) 86 if (gpio >= AR5K_NUM_GPIO)
87 return -EINVAL; 87 return -EINVAL;
88 88
89 ath5k_hw_reg_write(ah, 89 ath5k_hw_reg_write(ah,
@@ -99,7 +99,7 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
99int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) 99int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
100{ 100{
101 ATH5K_TRACE(ah->ah_sc); 101 ATH5K_TRACE(ah->ah_sc);
102 if (gpio > AR5K_NUM_GPIO) 102 if (gpio >= AR5K_NUM_GPIO)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 ath5k_hw_reg_write(ah, 105 ath5k_hw_reg_write(ah,
@@ -115,7 +115,7 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
115u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) 115u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
116{ 116{
117 ATH5K_TRACE(ah->ah_sc); 117 ATH5K_TRACE(ah->ah_sc);
118 if (gpio > AR5K_NUM_GPIO) 118 if (gpio >= AR5K_NUM_GPIO)
119 return 0xffffffff; 119 return 0xffffffff;
120 120
121 /* GPIO input magic */ 121 /* GPIO input magic */
@@ -131,7 +131,7 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
131 u32 data; 131 u32 data;
132 ATH5K_TRACE(ah->ah_sc); 132 ATH5K_TRACE(ah->ah_sc);
133 133
134 if (gpio > AR5K_NUM_GPIO) 134 if (gpio >= AR5K_NUM_GPIO)
135 return -EINVAL; 135 return -EINVAL;
136 136
137 /* GPIO output magic */ 137 /* GPIO output magic */
@@ -154,7 +154,7 @@ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
154 u32 data; 154 u32 data;
155 155
156 ATH5K_TRACE(ah->ah_sc); 156 ATH5K_TRACE(ah->ah_sc);
157 if (gpio > AR5K_NUM_GPIO) 157 if (gpio >= AR5K_NUM_GPIO)
158 return; 158 return;
159 159
160 /* 160 /*
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 450bd6e945f..44886434187 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -2,7 +2,7 @@
2 * Initial register settings functions 2 * Initial register settings functions
3 * 3 *
4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
@@ -340,7 +340,7 @@ static const struct ath5k_ini ar5211_ini[] = {
340 * common on all cards/modes. 340 * common on all cards/modes.
341 * Note: Table is rewritten during 341 * Note: Table is rewritten during
342 * txpower setup later using calibration 342 * txpower setup later using calibration
343 * data etc. so next write is non-common 343 * data etc. so next write is non-common */
344 { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff }, 344 { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff },
345 { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff }, 345 { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff },
346 { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff }, 346 { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff },
@@ -371,7 +371,7 @@ static const struct ath5k_ini ar5211_ini[] = {
371 { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff }, 371 { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff },
372 { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff }, 372 { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff },
373 { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff }, 373 { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff },
374 { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff },*/ 374 { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff },
375 { AR5K_PHY_CCKTXCTL, 0x00000000 }, 375 { AR5K_PHY_CCKTXCTL, 0x00000000 },
376 { AR5K_PHY(642), 0x503e4646 }, 376 { AR5K_PHY(642), 0x503e4646 },
377 { AR5K_PHY_GAIN_2GHZ, 0x6480416c }, 377 { AR5K_PHY_GAIN_2GHZ, 0x6480416c },
@@ -386,85 +386,85 @@ static const struct ath5k_ini ar5211_ini[] = {
386}; 386};
387 387
388/* Initial mode-specific settings for AR5211 388/* Initial mode-specific settings for AR5211
389 * XXX: how about g / gTurbo ? RF5111 supports it, how about AR5211 ? 389 * 5211 supports OFDM-only g (draft g) but we
390 * Maybe 5211 supports OFDM-only g but we need to test it ! 390 * need to test it !
391 */ 391 */
392static const struct ath5k_ini_mode ar5211_ini_mode[] = { 392static const struct ath5k_ini_mode ar5211_ini_mode[] = {
393 { AR5K_TXCFG, 393 { AR5K_TXCFG,
394 /* a aTurbo b */ 394 /* a aTurbo b g (OFDM) */
395 { 0x00000015, 0x00000015, 0x0000001d } }, 395 { 0x00000015, 0x00000015, 0x0000001d, 0x00000015 } },
396 { AR5K_QUEUE_DFS_LOCAL_IFS(0), 396 { AR5K_QUEUE_DFS_LOCAL_IFS(0),
397 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 397 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
398 { AR5K_QUEUE_DFS_LOCAL_IFS(1), 398 { AR5K_QUEUE_DFS_LOCAL_IFS(1),
399 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 399 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
400 { AR5K_QUEUE_DFS_LOCAL_IFS(2), 400 { AR5K_QUEUE_DFS_LOCAL_IFS(2),
401 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 401 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
402 { AR5K_QUEUE_DFS_LOCAL_IFS(3), 402 { AR5K_QUEUE_DFS_LOCAL_IFS(3),
403 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 403 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
404 { AR5K_QUEUE_DFS_LOCAL_IFS(4), 404 { AR5K_QUEUE_DFS_LOCAL_IFS(4),
405 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 405 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
406 { AR5K_QUEUE_DFS_LOCAL_IFS(5), 406 { AR5K_QUEUE_DFS_LOCAL_IFS(5),
407 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 407 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
408 { AR5K_QUEUE_DFS_LOCAL_IFS(6), 408 { AR5K_QUEUE_DFS_LOCAL_IFS(6),
409 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 409 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
410 { AR5K_QUEUE_DFS_LOCAL_IFS(7), 410 { AR5K_QUEUE_DFS_LOCAL_IFS(7),
411 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 411 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
412 { AR5K_QUEUE_DFS_LOCAL_IFS(8), 412 { AR5K_QUEUE_DFS_LOCAL_IFS(8),
413 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 413 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
414 { AR5K_QUEUE_DFS_LOCAL_IFS(9), 414 { AR5K_QUEUE_DFS_LOCAL_IFS(9),
415 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f } }, 415 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
416 { AR5K_DCU_GBL_IFS_SLOT, 416 { AR5K_DCU_GBL_IFS_SLOT,
417 { 0x00000168, 0x000001e0, 0x000001b8 } }, 417 { 0x00000168, 0x000001e0, 0x000001b8, 0x00000168 } },
418 { AR5K_DCU_GBL_IFS_SIFS, 418 { AR5K_DCU_GBL_IFS_SIFS,
419 { 0x00000230, 0x000001e0, 0x000000b0 } }, 419 { 0x00000230, 0x000001e0, 0x000000b0, 0x00000230 } },
420 { AR5K_DCU_GBL_IFS_EIFS, 420 { AR5K_DCU_GBL_IFS_EIFS,
421 { 0x00000d98, 0x00001180, 0x00001f48 } }, 421 { 0x00000d98, 0x00001180, 0x00001f48, 0x00000d98 } },
422 { AR5K_DCU_GBL_IFS_MISC, 422 { AR5K_DCU_GBL_IFS_MISC,
423 { 0x0000a0e0, 0x00014068, 0x00005880 } }, 423 { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000a0e0 } },
424 { AR5K_TIME_OUT, 424 { AR5K_TIME_OUT,
425 { 0x04000400, 0x08000800, 0x20003000 } }, 425 { 0x04000400, 0x08000800, 0x20003000, 0x04000400 } },
426 { AR5K_USEC_5211, 426 { AR5K_USEC_5211,
427 { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95 } }, 427 { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95, 0x0e8d8fa7 } },
428 { AR5K_PHY_TURBO, 428 { AR5K_PHY_TURBO,
429 { 0x00000000, 0x00000003, 0x00000000 } }, 429 { 0x00000000, 0x00000003, 0x00000000, 0x00000000 } },
430 { AR5K_PHY(8), 430 { AR5K_PHY(8),
431 { 0x02020200, 0x02020200, 0x02010200 } }, 431 { 0x02020200, 0x02020200, 0x02010200, 0x02020200 } },
432 { AR5K_PHY(9), 432 { AR5K_PHY(9),
433 { 0x00000e0e, 0x00000e0e, 0x00000707 } }, 433 { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e } },
434 { AR5K_PHY(10), 434 { AR5K_PHY(10),
435 { 0x0a020001, 0x0a020001, 0x05010000 } }, 435 { 0x0a020001, 0x0a020001, 0x05010000, 0x0a020001 } },
436 { AR5K_PHY(13), 436 { AR5K_PHY(13),
437 { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 437 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
438 { AR5K_PHY(14), 438 { AR5K_PHY(14),
439 { 0x00000007, 0x00000007, 0x0000000b } }, 439 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b } },
440 { AR5K_PHY(17), 440 { AR5K_PHY(17),
441 { 0x1372169c, 0x137216a5, 0x137216a8 } }, 441 { 0x1372169c, 0x137216a5, 0x137216a8, 0x1372169c } },
442 { AR5K_PHY(18), 442 { AR5K_PHY(18),
443 { 0x0018ba67, 0x0018ba67, 0x0018ba69 } }, 443 { 0x0018ba67, 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
444 { AR5K_PHY(20), 444 { AR5K_PHY(20),
445 { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, 445 { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
446 { AR5K_PHY_SIG, 446 { AR5K_PHY_SIG,
447 { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e } }, 447 { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
448 { AR5K_PHY_AGCCOARSE, 448 { AR5K_PHY_AGCCOARSE,
449 { 0x31375d5e, 0x31375d5e, 0x313a5d5e } }, 449 { 0x31375d5e, 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
450 { AR5K_PHY_AGCCTL, 450 { AR5K_PHY_AGCCTL,
451 { 0x0000bd10, 0x0000bd10, 0x0000bd38 } }, 451 { 0x0000bd10, 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
452 { AR5K_PHY_NF, 452 { AR5K_PHY_NF,
453 { 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, 453 { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
454 { AR5K_PHY_RX_DELAY, 454 { AR5K_PHY_RX_DELAY,
455 { 0x00002710, 0x00002710, 0x0000157c } }, 455 { 0x00002710, 0x00002710, 0x0000157c, 0x00002710 } },
456 { AR5K_PHY(70), 456 { AR5K_PHY(70),
457 { 0x00000190, 0x00000190, 0x00000084 } }, 457 { 0x00000190, 0x00000190, 0x00000084, 0x00000190 } },
458 { AR5K_PHY_FRAME_CTL_5211, 458 { AR5K_PHY_FRAME_CTL_5211,
459 { 0x6fe01020, 0x6fe01020, 0x6fe00920 } }, 459 { 0x6fe01020, 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
460 { AR5K_PHY_PCDAC_TXPOWER_BASE_5211, 460 { AR5K_PHY_PCDAC_TXPOWER_BASE,
461 { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff } }, 461 { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
462 { AR5K_RF_BUFFER_CONTROL_4, 462 { AR5K_RF_BUFFER_CONTROL_4,
463 { 0x00000010, 0x00000014, 0x00000010 } }, 463 { 0x00000010, 0x00000014, 0x00000010, 0x00000010 } },
464}; 464};
465 465
466/* Initial register settings for AR5212 */ 466/* Initial register settings for AR5212 */
467static const struct ath5k_ini ar5212_ini[] = { 467static const struct ath5k_ini ar5212_ini_common_start[] = {
468 { AR5K_RXDP, 0x00000000 }, 468 { AR5K_RXDP, 0x00000000 },
469 { AR5K_RXCFG, 0x00000005 }, 469 { AR5K_RXCFG, 0x00000005 },
470 { AR5K_MIBC, 0x00000000 }, 470 { AR5K_MIBC, 0x00000000 },
@@ -485,91 +485,83 @@ static const struct ath5k_ini ar5212_ini[] = {
485 { AR5K_QUEUE_TXDP(9), 0x00000000 }, 485 { AR5K_QUEUE_TXDP(9), 0x00000000 },
486 { AR5K_DCU_FP, 0x00000000 }, 486 { AR5K_DCU_FP, 0x00000000 },
487 { AR5K_DCU_TXP, 0x00000000 }, 487 { AR5K_DCU_TXP, 0x00000000 },
488 { AR5K_DCU_TX_FILTER_0_BASE, 0x00000000 }, 488 /* Tx filter table 0 (32 entries) */
489 /* Unknown table */ 489 { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */
490 { 0x1078, 0x00000000 }, 490 { AR5K_DCU_TX_FILTER_0(1), 0x00000000 },
491 { 0x10b8, 0x00000000 }, 491 { AR5K_DCU_TX_FILTER_0(2), 0x00000000 },
492 { 0x10f8, 0x00000000 }, 492 { AR5K_DCU_TX_FILTER_0(3), 0x00000000 },
493 { 0x1138, 0x00000000 }, 493 { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */
494 { 0x1178, 0x00000000 }, 494 { AR5K_DCU_TX_FILTER_0(5), 0x00000000 },
495 { 0x11b8, 0x00000000 }, 495 { AR5K_DCU_TX_FILTER_0(6), 0x00000000 },
496 { 0x11f8, 0x00000000 }, 496 { AR5K_DCU_TX_FILTER_0(7), 0x00000000 },
497 { 0x1238, 0x00000000 }, 497 { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */
498 { 0x1278, 0x00000000 }, 498 { AR5K_DCU_TX_FILTER_0(9), 0x00000000 },
499 { 0x12b8, 0x00000000 }, 499 { AR5K_DCU_TX_FILTER_0(10), 0x00000000 },
500 { 0x12f8, 0x00000000 }, 500 { AR5K_DCU_TX_FILTER_0(11), 0x00000000 },
501 { 0x1338, 0x00000000 }, 501 { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */
502 { 0x1378, 0x00000000 }, 502 { AR5K_DCU_TX_FILTER_0(13), 0x00000000 },
503 { 0x13b8, 0x00000000 }, 503 { AR5K_DCU_TX_FILTER_0(14), 0x00000000 },
504 { 0x13f8, 0x00000000 }, 504 { AR5K_DCU_TX_FILTER_0(15), 0x00000000 },
505 { 0x1438, 0x00000000 }, 505 { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */
506 { 0x1478, 0x00000000 }, 506 { AR5K_DCU_TX_FILTER_0(17), 0x00000000 },
507 { 0x14b8, 0x00000000 }, 507 { AR5K_DCU_TX_FILTER_0(18), 0x00000000 },
508 { 0x14f8, 0x00000000 }, 508 { AR5K_DCU_TX_FILTER_0(19), 0x00000000 },
509 { 0x1538, 0x00000000 }, 509 { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */
510 { 0x1578, 0x00000000 }, 510 { AR5K_DCU_TX_FILTER_0(21), 0x00000000 },
511 { 0x15b8, 0x00000000 }, 511 { AR5K_DCU_TX_FILTER_0(22), 0x00000000 },
512 { 0x15f8, 0x00000000 }, 512 { AR5K_DCU_TX_FILTER_0(23), 0x00000000 },
513 { 0x1638, 0x00000000 }, 513 { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */
514 { 0x1678, 0x00000000 }, 514 { AR5K_DCU_TX_FILTER_0(25), 0x00000000 },
515 { 0x16b8, 0x00000000 }, 515 { AR5K_DCU_TX_FILTER_0(26), 0x00000000 },
516 { 0x16f8, 0x00000000 }, 516 { AR5K_DCU_TX_FILTER_0(27), 0x00000000 },
517 { 0x1738, 0x00000000 }, 517 { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */
518 { 0x1778, 0x00000000 }, 518 { AR5K_DCU_TX_FILTER_0(29), 0x00000000 },
519 { 0x17b8, 0x00000000 }, 519 { AR5K_DCU_TX_FILTER_0(30), 0x00000000 },
520 { 0x17f8, 0x00000000 }, 520 { AR5K_DCU_TX_FILTER_0(31), 0x00000000 },
521 { 0x103c, 0x00000000 }, 521 /* Tx filter table 1 (16 entries) */
522 { 0x107c, 0x00000000 }, 522 { AR5K_DCU_TX_FILTER_1(0), 0x00000000 },
523 { 0x10bc, 0x00000000 }, 523 { AR5K_DCU_TX_FILTER_1(1), 0x00000000 },
524 { 0x10fc, 0x00000000 }, 524 { AR5K_DCU_TX_FILTER_1(2), 0x00000000 },
525 { 0x113c, 0x00000000 }, 525 { AR5K_DCU_TX_FILTER_1(3), 0x00000000 },
526 { 0x117c, 0x00000000 }, 526 { AR5K_DCU_TX_FILTER_1(4), 0x00000000 },
527 { 0x11bc, 0x00000000 }, 527 { AR5K_DCU_TX_FILTER_1(5), 0x00000000 },
528 { 0x11fc, 0x00000000 }, 528 { AR5K_DCU_TX_FILTER_1(6), 0x00000000 },
529 { 0x123c, 0x00000000 }, 529 { AR5K_DCU_TX_FILTER_1(7), 0x00000000 },
530 { 0x127c, 0x00000000 }, 530 { AR5K_DCU_TX_FILTER_1(8), 0x00000000 },
531 { 0x12bc, 0x00000000 }, 531 { AR5K_DCU_TX_FILTER_1(9), 0x00000000 },
532 { 0x12fc, 0x00000000 }, 532 { AR5K_DCU_TX_FILTER_1(10), 0x00000000 },
533 { 0x133c, 0x00000000 }, 533 { AR5K_DCU_TX_FILTER_1(11), 0x00000000 },
534 { 0x137c, 0x00000000 }, 534 { AR5K_DCU_TX_FILTER_1(12), 0x00000000 },
535 { 0x13bc, 0x00000000 }, 535 { AR5K_DCU_TX_FILTER_1(13), 0x00000000 },
536 { 0x13fc, 0x00000000 }, 536 { AR5K_DCU_TX_FILTER_1(14), 0x00000000 },
537 { 0x143c, 0x00000000 }, 537 { AR5K_DCU_TX_FILTER_1(15), 0x00000000 },
538 { 0x147c, 0x00000000 }, 538 { AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
539 { AR5K_DCU_TX_FILTER_SET, 0x00000000 },
539 { AR5K_DCU_TX_FILTER_CLR, 0x00000000 }, 540 { AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
540 { AR5K_DCU_TX_FILTER_SET, 0x00000000 }, 541 { AR5K_DCU_TX_FILTER_SET, 0x00000000 },
541 { AR5K_STA_ID1, 0x00000000 }, 542 { AR5K_STA_ID1, 0x00000000 },
542 { AR5K_BSS_ID0, 0x00000000 }, 543 { AR5K_BSS_ID0, 0x00000000 },
543 { AR5K_BSS_ID1, 0x00000000 }, 544 { AR5K_BSS_ID1, 0x00000000 },
544 /*{ AR5K_RSSI_THR, 0x00000000 },*/ /* Found on SuperAG cards */ 545 { AR5K_BEACON_5211, 0x00000000 },
545 { AR5K_BEACON_5211, 0x00000000 }, /* Found on SuperAG cards */ 546 { AR5K_CFP_PERIOD_5211, 0x00000000 },
546 { AR5K_CFP_PERIOD_5211, 0x00000000 }, /* Found on SuperAG cards */ 547 { AR5K_TIMER0_5211, 0x00000030 },
547 { AR5K_TIMER0_5211, 0x00000030 }, /* Found on SuperAG cards */ 548 { AR5K_TIMER1_5211, 0x0007ffff },
548 { AR5K_TIMER1_5211, 0x0007ffff }, /* Found on SuperAG cards */ 549 { AR5K_TIMER2_5211, 0x01ffffff },
549 { AR5K_TIMER2_5211, 0x01ffffff }, /* Found on SuperAG cards */ 550 { AR5K_TIMER3_5211, 0x00000031 },
550 { AR5K_TIMER3_5211, 0x00000031 }, /* Found on SuperAG cards */ 551 { AR5K_CFP_DUR_5211, 0x00000000 },
551 { AR5K_CFP_DUR_5211, 0x00000000 }, /* Found on SuperAG cards */
552 { AR5K_RX_FILTER_5211, 0x00000000 }, 552 { AR5K_RX_FILTER_5211, 0x00000000 },
553 { AR5K_DIAG_SW_5211, 0x00000000 }, 553 { AR5K_DIAG_SW_5211, 0x00000000 },
554 { AR5K_ADDAC_TEST, 0x00000000 }, 554 { AR5K_ADDAC_TEST, 0x00000000 },
555 { AR5K_DEFAULT_ANTENNA, 0x00000000 }, 555 { AR5K_DEFAULT_ANTENNA, 0x00000000 },
556 { 0x8080, 0x00000000 }, 556 { AR5K_FRAME_CTL_QOSM, 0x000fc78f },
557 /*{ 0x805c, 0xffffc7ff },*/ /* Old value */
558 { 0x805c, 0x000fc78f },
559 { AR5K_NAV_5211, 0x00000000 }, /* Not found on recent */
560 { AR5K_RTS_OK_5211, 0x00000000 }, /* dumps but it makes */
561 { AR5K_RTS_FAIL_5211, 0x00000000 }, /* sense to reset counters */
562 { AR5K_ACK_FAIL_5211, 0x00000000 }, /* since pcu registers */
563 { AR5K_FCS_FAIL_5211, 0x00000000 }, /* are skiped during chan*/
564 { AR5K_BEACON_CNT_5211, 0x00000000 }, /* change */
565 { AR5K_XRMODE, 0x2a82301a }, 557 { AR5K_XRMODE, 0x2a82301a },
566 { AR5K_XRDELAY, 0x05dc01e0 }, 558 { AR5K_XRDELAY, 0x05dc01e0 },
567 { AR5K_XRTIMEOUT, 0x1f402710 }, 559 { AR5K_XRTIMEOUT, 0x1f402710 },
568 { AR5K_XRCHIRP, 0x01f40000 }, 560 { AR5K_XRCHIRP, 0x01f40000 },
569 { AR5K_XRSTOMP, 0x00001e1c }, 561 { AR5K_XRSTOMP, 0x00001e1c },
570 { AR5K_SLEEP0, 0x0002aaaa }, /* Found on SuperAG cards */ 562 { AR5K_SLEEP0, 0x0002aaaa },
571 { AR5K_SLEEP1, 0x02005555 }, /* Found on SuperAG cards */ 563 { AR5K_SLEEP1, 0x02005555 },
572 { AR5K_SLEEP2, 0x00000000 }, /* Found on SuperAG cards */ 564 { AR5K_SLEEP2, 0x00000000 },
573 { AR5K_BSS_IDM0, 0xffffffff }, 565 { AR5K_BSS_IDM0, 0xffffffff },
574 { AR5K_BSS_IDM1, 0x0000ffff }, 566 { AR5K_BSS_IDM1, 0x0000ffff },
575 { AR5K_TXPC, 0x00000000 }, 567 { AR5K_TXPC, 0x00000000 },
@@ -577,7 +569,8 @@ static const struct ath5k_ini ar5212_ini[] = {
577 { AR5K_PROFCNT_RX, 0x00000000 }, 569 { AR5K_PROFCNT_RX, 0x00000000 },
578 { AR5K_PROFCNT_RXCLR, 0x00000000 }, 570 { AR5K_PROFCNT_RXCLR, 0x00000000 },
579 { AR5K_PROFCNT_CYCLE, 0x00000000 }, 571 { AR5K_PROFCNT_CYCLE, 0x00000000 },
580 { 0x80fc, 0x00000088 }, 572 { AR5K_QUIET_CTL1, 0x00000088 },
573 /* Initial rate duration table (32 entries )*/
581 { AR5K_RATE_DUR(0), 0x00000000 }, 574 { AR5K_RATE_DUR(0), 0x00000000 },
582 { AR5K_RATE_DUR(1), 0x0000008c }, 575 { AR5K_RATE_DUR(1), 0x0000008c },
583 { AR5K_RATE_DUR(2), 0x000000e4 }, 576 { AR5K_RATE_DUR(2), 0x000000e4 },
@@ -610,881 +603,625 @@ static const struct ath5k_ini ar5212_ini[] = {
610 { AR5K_RATE_DUR(29), 0x0000007f }, 603 { AR5K_RATE_DUR(29), 0x0000007f },
611 { AR5K_RATE_DUR(30), 0x000000a2 }, 604 { AR5K_RATE_DUR(30), 0x000000a2 },
612 { AR5K_RATE_DUR(31), 0x00000000 }, 605 { AR5K_RATE_DUR(31), 0x00000000 },
613 { 0x8100, 0x00010002}, 606 { AR5K_QUIET_CTL2, 0x00010002 },
614 { AR5K_TSF_PARM, 0x00000001 }, 607 { AR5K_TSF_PARM, 0x00000001 },
615 { 0x8108, 0x000000c0 }, 608 { AR5K_QOS_NOACK, 0x000000c0 },
616 { AR5K_PHY_ERR_FIL, 0x00000000 }, 609 { AR5K_PHY_ERR_FIL, 0x00000000 },
617 { 0x8110, 0x00000168 }, 610 { AR5K_XRLAT_TX, 0x00000168 },
618 { 0x8114, 0x00000000 }, 611 { AR5K_ACKSIFS, 0x00000000 },
619 /* Some kind of table 612 /* Rate -> db table
620 * also notice ...03<-02<-01<-00) */ 613 * notice ...03<-02<-01<-00 ! */
621 { 0x87c0, 0x03020100 }, 614 { AR5K_RATE2DB(0), 0x03020100 },
622 { 0x87c4, 0x07060504 }, 615 { AR5K_RATE2DB(1), 0x07060504 },
623 { 0x87c8, 0x0b0a0908 }, 616 { AR5K_RATE2DB(2), 0x0b0a0908 },
624 { 0x87cc, 0x0f0e0d0c }, 617 { AR5K_RATE2DB(3), 0x0f0e0d0c },
625 { 0x87d0, 0x13121110 }, 618 { AR5K_RATE2DB(4), 0x13121110 },
626 { 0x87d4, 0x17161514 }, 619 { AR5K_RATE2DB(5), 0x17161514 },
627 { 0x87d8, 0x1b1a1918 }, 620 { AR5K_RATE2DB(6), 0x1b1a1918 },
628 { 0x87dc, 0x1f1e1d1c }, 621 { AR5K_RATE2DB(7), 0x1f1e1d1c },
629 /* loop ? */ 622 /* Db -> Rate table */
630 { 0x87e0, 0x03020100 }, 623 { AR5K_DB2RATE(0), 0x03020100 },
631 { 0x87e4, 0x07060504 }, 624 { AR5K_DB2RATE(1), 0x07060504 },
632 { 0x87e8, 0x0b0a0908 }, 625 { AR5K_DB2RATE(2), 0x0b0a0908 },
633 { 0x87ec, 0x0f0e0d0c }, 626 { AR5K_DB2RATE(3), 0x0f0e0d0c },
634 { 0x87f0, 0x13121110 }, 627 { AR5K_DB2RATE(4), 0x13121110 },
635 { 0x87f4, 0x17161514 }, 628 { AR5K_DB2RATE(5), 0x17161514 },
636 { 0x87f8, 0x1b1a1918 }, 629 { AR5K_DB2RATE(6), 0x1b1a1918 },
637 { 0x87fc, 0x1f1e1d1c }, 630 { AR5K_DB2RATE(7), 0x1f1e1d1c },
638 /* PHY registers */ 631 /* PHY registers (Common settings
639 /*{ AR5K_PHY_AGC, 0x00000000 },*/ 632 * for all chips/modes) */
640 { AR5K_PHY(3), 0xad848e19 }, 633 { AR5K_PHY(3), 0xad848e19 },
641 { AR5K_PHY(4), 0x7d28e000 }, 634 { AR5K_PHY(4), 0x7d28e000 },
642 { AR5K_PHY_TIMING_3, 0x9c0a9f6b }, 635 { AR5K_PHY_TIMING_3, 0x9c0a9f6b },
643 { AR5K_PHY_ACT, 0x00000000 }, 636 { AR5K_PHY_ACT, 0x00000000 },
644 /*{ AR5K_PHY(11), 0x00022ffe },*/ 637 { AR5K_PHY(16), 0x206a017a },
645 /*{ AR5K_PHY(15), 0x00020100 },*/ 638 { AR5K_PHY(21), 0x00000859 },
646 { AR5K_PHY(16), 0x206a017a }, 639 { AR5K_PHY_BIN_MASK_1, 0x00000000 },
647 /*{ AR5K_PHY(19), 0x1284613c },*/ 640 { AR5K_PHY_BIN_MASK_2, 0x00000000 },
648 { AR5K_PHY(21), 0x00000859 }, 641 { AR5K_PHY_BIN_MASK_3, 0x00000000 },
649 { AR5K_PHY(64), 0x00000000 }, 642 { AR5K_PHY_BIN_MASK_CTL, 0x00800000 },
650 { AR5K_PHY(65), 0x00000000 }, 643 { AR5K_PHY_ANT_CTL, 0x00000001 },
651 { AR5K_PHY(66), 0x00000000 },
652 { AR5K_PHY(67), 0x00800000 },
653 { AR5K_PHY(68), 0x00000001 },
654 /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */ 644 /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */
655 { AR5K_PHY(71), 0x00000c80 }, 645 { AR5K_PHY_MAX_RX_LEN, 0x00000c80 },
656 { AR5K_PHY_IQ, 0x05100000 }, 646 { AR5K_PHY_IQ, 0x05100000 },
657 { AR5K_PHY(74), 0x00000001 }, 647 { AR5K_PHY_WARM_RESET, 0x00000001 },
658 { AR5K_PHY(75), 0x00000004 }, 648 { AR5K_PHY_CTL, 0x00000004 },
659 { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 }, 649 { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 },
660 { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d }, 650 { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d },
661 { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f }, 651 { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f },
662 /*{ AR5K_PHY(80), 0x00000004 },*/ 652 { AR5K_PHY(82), 0x9280b212 },
663 { AR5K_PHY(82), 0x9280b212 }, 653 { AR5K_PHY_RADAR, 0x5d50e188 },
664 { AR5K_PHY_RADAR, 0x5d50e188 },
665 /*{ AR5K_PHY(86), 0x000000ff },*/ 654 /*{ AR5K_PHY(86), 0x000000ff },*/
666 { AR5K_PHY(87), 0x004b6a8e }, 655 { AR5K_PHY(87), 0x004b6a8e },
667 { AR5K_PHY(90), 0x000003ce }, 656 { AR5K_PHY_NFTHRES, 0x000003ce },
668 { AR5K_PHY(92), 0x192fb515 }, 657 { AR5K_PHY_RESTART, 0x192fb515 },
669 /*{ AR5K_PHY(93), 0x00000000 },*/ 658 { AR5K_PHY(94), 0x00000001 },
670 { AR5K_PHY(94), 0x00000001 }, 659 { AR5K_PHY_RFBUS_REQ, 0x00000000 },
671 { AR5K_PHY(95), 0x00000000 },
672 /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */ 660 /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */
673 /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */ 661 /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */
674 { AR5K_PHY(644), 0x00806333 }, 662 { AR5K_PHY(644), 0x00806333 },
675 { AR5K_PHY(645), 0x00106c10 }, 663 { AR5K_PHY(645), 0x00106c10 },
676 { AR5K_PHY(646), 0x009c4060 }, 664 { AR5K_PHY(646), 0x009c4060 },
677 { AR5K_PHY(647), 0x1483800a }, 665 /* { AR5K_PHY(647), 0x1483800a }, */
678 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413/2425 */ 666 /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */
679 { AR5K_PHY(648), 0x01831061 }, 667 { AR5K_PHY(648), 0x018830c6 },
680 { AR5K_PHY(649), 0x00000400 }, 668 { AR5K_PHY(649), 0x00000400 },
681 /*{ AR5K_PHY(650), 0x000001b5 },*/ 669 /*{ AR5K_PHY(650), 0x000001b5 },*/
682 { AR5K_PHY(651), 0x00000000 }, 670 { AR5K_PHY(651), 0x00000000 },
683 { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, 671 { AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
684 { AR5K_PHY_TXPOWER_RATE2, 0x20202020 }, 672 { AR5K_PHY_TXPOWER_RATE2, 0x20202020 },
685 /*{ AR5K_PHY(655), 0x13c889af },*/ 673 /*{ AR5K_PHY(655), 0x13c889af },*/
686 { AR5K_PHY(656), 0x38490a20 }, 674 { AR5K_PHY(656), 0x38490a20 },
687 { AR5K_PHY(657), 0x00007bb6 }, 675 { AR5K_PHY(657), 0x00007bb6 },
688 { AR5K_PHY(658), 0x0fff3ffc }, 676 { AR5K_PHY(658), 0x0fff3ffc },
689 /*{ AR5K_PHY_CCKTXCTL, 0x00000000 },*/
690}; 677};
691 678
692/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ 679/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */
693static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { 680static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
694 { AR5K_PHY(640),
695 /* a/XR aTurbo b g (DYN) gTurbo */
696 { 0x00000008, 0x00000008, 0x0000000b, 0x0000000e, 0x0000000e } },
697 { AR5K_PHY(0),
698 { 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 } },
699 { AR5K_QUEUE_DFS_LOCAL_IFS(0), 681 { AR5K_QUEUE_DFS_LOCAL_IFS(0),
700 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 682 /* a/XR aTurbo b g (DYN) gTurbo */
683 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
701 { AR5K_QUEUE_DFS_LOCAL_IFS(1), 684 { AR5K_QUEUE_DFS_LOCAL_IFS(1),
702 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 685 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
703 { AR5K_QUEUE_DFS_LOCAL_IFS(2), 686 { AR5K_QUEUE_DFS_LOCAL_IFS(2),
704 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 687 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
705 { AR5K_QUEUE_DFS_LOCAL_IFS(3), 688 { AR5K_QUEUE_DFS_LOCAL_IFS(3),
706 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 689 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
707 { AR5K_QUEUE_DFS_LOCAL_IFS(4), 690 { AR5K_QUEUE_DFS_LOCAL_IFS(4),
708 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 691 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
709 { AR5K_QUEUE_DFS_LOCAL_IFS(5), 692 { AR5K_QUEUE_DFS_LOCAL_IFS(5),
710 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 693 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
711 { AR5K_QUEUE_DFS_LOCAL_IFS(6), 694 { AR5K_QUEUE_DFS_LOCAL_IFS(6),
712 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 695 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
713 { AR5K_QUEUE_DFS_LOCAL_IFS(7), 696 { AR5K_QUEUE_DFS_LOCAL_IFS(7),
714 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 697 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
715 { AR5K_QUEUE_DFS_LOCAL_IFS(8), 698 { AR5K_QUEUE_DFS_LOCAL_IFS(8),
716 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 699 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
717 { AR5K_QUEUE_DFS_LOCAL_IFS(9), 700 { AR5K_QUEUE_DFS_LOCAL_IFS(9),
718 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 701 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
719 { AR5K_DCU_GBL_IFS_SIFS, 702 { AR5K_DCU_GBL_IFS_SIFS,
720 { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } }, 703 { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } },
721 { AR5K_DCU_GBL_IFS_SLOT, 704 { AR5K_DCU_GBL_IFS_SLOT,
722 { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } }, 705 { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } },
723 { AR5K_DCU_GBL_IFS_EIFS, 706 { AR5K_DCU_GBL_IFS_EIFS,
724 { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } }, 707 { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } },
725 { AR5K_DCU_GBL_IFS_MISC, 708 { AR5K_DCU_GBL_IFS_MISC,
726 { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } }, 709 { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } },
727 { AR5K_TIME_OUT, 710 { AR5K_TIME_OUT,
728 { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } }, 711 { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } },
729 { AR5K_PHY_TURBO, 712 { AR5K_PHY_TURBO,
730 { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } }, 713 { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } },
731 { AR5K_PHY(8), 714 { AR5K_PHY(8),
732 { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } }, 715 { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } },
733 { AR5K_PHY(9), 716 { AR5K_PHY_RF_CTL2,
734 { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } }, 717 { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } },
735 { AR5K_PHY(17), 718 { AR5K_PHY_SETTLING,
736 { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } }, 719 { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } },
737 { AR5K_PHY_AGCCTL, 720 { AR5K_PHY_AGCCTL,
738 { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d18 } }, 721 { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d18 } },
739 { AR5K_PHY_NF, 722 { AR5K_PHY_NF,
740 { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, 723 { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
741 { AR5K_PHY(26), 724 { AR5K_PHY_WEAK_OFDM_HIGH_THR,
742 { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } }, 725 { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } },
743 { AR5K_PHY(70), 726 { AR5K_PHY(70),
744 { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } }, 727 { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } },
745 { AR5K_PHY(73), 728 { AR5K_PHY_OFDM_SELFCORR,
746 { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } }, 729 { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } },
747 { 0xa230, 730 { 0xa230,
748 { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } }, 731 { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } },
749}; 732};
750 733
751/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ 734/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
752/* New dump pending */ 735static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
753static const struct ath5k_ini_mode ar5212_rf5111_ini_mode_end[] = {
754 { AR5K_PHY(640), /* This one differs from ar5212_ini_mode_start ! */
755 /* a/XR aTurbo b g (DYN) gTurbo */
756 { 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000006 } },
757 { AR5K_TXCFG, 736 { AR5K_TXCFG,
758 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, 737 /* a/XR aTurbo b g (DYN) gTurbo */
738 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } },
759 { AR5K_USEC_5211, 739 { AR5K_USEC_5211,
760 { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } }, 740 { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } },
761 { AR5K_PHY(10), 741 { AR5K_PHY_RF_CTL3,
762 { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } }, 742 { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } },
763 { AR5K_PHY(13), 743 { AR5K_PHY_RF_CTL4,
764 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 744 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
765 { AR5K_PHY(14), 745 { AR5K_PHY_PA_CTL,
766 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 746 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
767 { AR5K_PHY(18), 747 { AR5K_PHY_GAIN,
768 { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } }, 748 { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } },
769 { AR5K_PHY(20), 749 { AR5K_PHY_DESIRED_SIZE,
770 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, 750 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
771 { AR5K_PHY_SIG, 751 { AR5K_PHY_SIG,
772 { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } }, 752 { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } },
773 { AR5K_PHY_AGCCOARSE, 753 { AR5K_PHY_AGCCOARSE,
774 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } }, 754 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } },
775 { AR5K_PHY(27), 755 { AR5K_PHY_WEAK_OFDM_LOW_THR,
776 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } }, 756 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } },
777 { AR5K_PHY_RX_DELAY, 757 { AR5K_PHY_RX_DELAY,
778 { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } }, 758 { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } },
779 { AR5K_PHY_FRAME_CTL_5211, 759 { AR5K_PHY_FRAME_CTL_5211,
780 { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } }, 760 { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } },
781 { AR5K_PHY_GAIN_2GHZ, 761 { AR5K_PHY_GAIN_2GHZ,
782 { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } }, 762 { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } },
783 { 0xa21c, 763 { AR5K_PHY_CCK_RX_CTL_4,
784 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, 764 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } },
785 { AR5K_DCU_FP, 765};
786 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 766
787 { AR5K_PHY_AGC, 767static const struct ath5k_ini rf5111_ini_common_end[] = {
788 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 768 { AR5K_DCU_FP, 0x00000000 },
789 { AR5K_PHY(11), 769 { AR5K_PHY_AGC, 0x00000000 },
790 { 0x00022ffe, 0x00022ffe, 0x00022ffe, 0x00022ffe, 0x00022ffe } }, 770 { AR5K_PHY_ADC_CTL, 0x00022ffe },
791 { AR5K_PHY(15), 771 { 0x983c, 0x00020100 },
792 { 0x00020100, 0x00020100, 0x00020100, 0x00020100, 0x00020100 } }, 772 { AR5K_PHY_GAIN_OFFSET, 0x1284613c },
793 { AR5K_PHY(19), 773 { AR5K_PHY_PAPD_PROBE, 0x00004883 },
794 { 0x1284613c, 0x1284613c, 0x1284613c, 0x1284613c, 0x1284613c } }, 774 { 0x9940, 0x00000004 },
795 { AR5K_PHY_PAPD_PROBE, 775 { 0x9958, 0x000000ff },
796 { 0x00004883, 0x00004883, 0x00004883, 0x00004883, 0x00004883 } }, 776 { 0x9974, 0x00000000 },
797 { AR5K_PHY(80), 777 { AR5K_PHY_SPENDING, 0x00000018 },
798 { 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 } }, 778 { AR5K_PHY_CCKTXCTL, 0x00000000 },
799 { AR5K_PHY(86), 779 { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 },
800 { 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff } }, 780 { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
801 { AR5K_PHY(93), 781 { 0xa23c, 0x13c889af },
802 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
803 { AR5K_PHY_SPENDING,
804 { 0x00000018, 0x00000018, 0x00000018, 0x00000018, 0x00000018 } },
805 { AR5K_PHY_CCKTXCTL,
806 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
807 { AR5K_PHY(642),
808 { 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
809 { 0xa228,
810 { 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5 } },
811 { 0xa23c,
812 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } },
813}; 782};
814 783
815/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ 784/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
816/* XXX: No dumps for turbog yet, but i found settings from old values so it should be ok */ 785static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
817static const struct ath5k_ini_mode ar5212_rf5112_ini_mode_end[] = {
818 { AR5K_TXCFG, 786 { AR5K_TXCFG,
819 /* a/XR aTurbo b g (DYN) gTurbo */ 787 /* a/XR aTurbo b g (DYN) gTurbo */
820 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, 788 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } },
821 { AR5K_USEC_5211, 789 { AR5K_USEC_5211,
822 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 790 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
823 { AR5K_PHY(10), 791 { AR5K_PHY_RF_CTL3,
824 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, 792 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
825 { AR5K_PHY(13), 793 { AR5K_PHY_RF_CTL4,
826 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 794 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
827 { AR5K_PHY(14), 795 { AR5K_PHY_PA_CTL,
828 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 796 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
829 { AR5K_PHY(18), 797 { AR5K_PHY_GAIN,
830 { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } }, 798 { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } },
831 { AR5K_PHY(20), 799 { AR5K_PHY_DESIRED_SIZE,
832 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, 800 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
833 { AR5K_PHY_SIG, 801 { AR5K_PHY_SIG,
834 { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7ee80d2e } }, 802 { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7ee80d2e } },
835 { AR5K_PHY_AGCCOARSE, 803 { AR5K_PHY_AGCCOARSE,
836 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } }, 804 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } },
837 { AR5K_PHY(27), 805 { AR5K_PHY_WEAK_OFDM_LOW_THR,
838 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 806 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
839 { AR5K_PHY_RX_DELAY, 807 { AR5K_PHY_RX_DELAY,
840 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 808 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
841 { AR5K_PHY_FRAME_CTL_5211, 809 { AR5K_PHY_FRAME_CTL_5211,
842 { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } }, 810 { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } },
843 { AR5K_PHY_CCKTXCTL, 811 { AR5K_PHY_CCKTXCTL,
844 { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } }, 812 { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } },
845 { AR5K_PHY(642), 813 { AR5K_PHY_CCK_CROSSCORR,
846 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 814 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
847 { AR5K_PHY_GAIN_2GHZ, 815 { AR5K_PHY_GAIN_2GHZ,
848 { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } }, 816 { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } },
849 { 0xa21c, 817 { AR5K_PHY_CCK_RX_CTL_4,
850 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, 818 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } },
851 { AR5K_DCU_FP, 819};
852 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 820
853 { AR5K_PHY_AGC, 821static const struct ath5k_ini rf5112_ini_common_end[] = {
854 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 822 { AR5K_DCU_FP, 0x00000000 },
855 { AR5K_PHY(11), 823 { AR5K_PHY_AGC, 0x00000000 },
856 { 0x00022ffe, 0x00022ffe, 0x00022ffe, 0x00022ffe, 0x00022ffe } }, 824 { AR5K_PHY_ADC_CTL, 0x00022ffe },
857 { AR5K_PHY(15), 825 { 0x983c, 0x00020100 },
858 { 0x00020100, 0x00020100, 0x00020100, 0x00020100, 0x00020100 } }, 826 { AR5K_PHY_GAIN_OFFSET, 0x1284613c },
859 { AR5K_PHY(19), 827 { AR5K_PHY_PAPD_PROBE, 0x00004882 },
860 { 0x1284613c, 0x1284613c, 0x1284613c, 0x1284613c, 0x1284613c } }, 828 { 0x9940, 0x00000004 },
861 { AR5K_PHY_PAPD_PROBE, 829 { 0x9958, 0x000000ff },
862 { 0x00004882, 0x00004882, 0x00004882, 0x00004882, 0x00004882 } }, 830 { 0x9974, 0x00000000 },
863 { AR5K_PHY(80), 831 { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
864 { 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 } }, 832 { 0xa23c, 0x13c889af },
865 { AR5K_PHY(86),
866 { 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff } },
867 { AR5K_PHY(93),
868 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
869 { 0xa228,
870 { 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5 } },
871 { 0xa23c,
872 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } },
873}; 833};
874 834
875/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ 835/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
876/* XXX: No dumps for turbog yet, so turbog is the same with g here with some
877 * minor tweaking based on dumps from other chips */
878static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { 836static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
879 { AR5K_TXCFG, 837 { AR5K_TXCFG,
880 /* a/XR aTurbo b g gTurbo */ 838 /* a/XR aTurbo b g (DYN) gTurbo */
881 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, 839 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
882 { AR5K_USEC_5211, 840 { AR5K_USEC_5211,
883 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 841 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
884 { AR5K_PHY(10), 842 { AR5K_PHY_RF_CTL3,
885 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, 843 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
886 { AR5K_PHY(13), 844 { AR5K_PHY_RF_CTL4,
887 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 845 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
888 { AR5K_PHY(14), 846 { AR5K_PHY_PA_CTL,
889 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 847 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
890 { AR5K_PHY(18), 848 { AR5K_PHY_GAIN,
891 { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } }, 849 { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } },
892 { AR5K_PHY(20), 850 { AR5K_PHY_DESIRED_SIZE,
893 { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, 851 { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } },
894 { AR5K_PHY_SIG, 852 { AR5K_PHY_SIG,
895 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, 853 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
896 { AR5K_PHY_AGCCOARSE, 854 { AR5K_PHY_AGCCOARSE,
897 { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, 855 { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } },
898 { AR5K_PHY(27), 856 { AR5K_PHY_WEAK_OFDM_LOW_THR,
899 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 857 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
900 { AR5K_PHY_RX_DELAY, 858 { AR5K_PHY_RX_DELAY,
901 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 859 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
902 { AR5K_PHY_FRAME_CTL_5211, 860 { AR5K_PHY_FRAME_CTL_5211,
903 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, 861 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
904 { AR5K_PHY_CCKTXCTL, 862 { AR5K_PHY_CCKTXCTL,
905 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 863 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
906 { AR5K_PHY(642), 864 { AR5K_PHY_CCK_CROSSCORR,
907 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 865 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
908 { AR5K_PHY_GAIN_2GHZ, 866 { AR5K_PHY_GAIN_2GHZ,
909 { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } }, 867 { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } },
910 { 0xa21c, 868 { AR5K_PHY_CCK_RX_CTL_4,
911 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, 869 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
912 { 0xa300, 870 { 0xa300,
913 { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } }, 871 { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } },
914 { 0xa304, 872 { 0xa304,
915 { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } }, 873 { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } },
916 { 0xa308, 874 { 0xa308,
917 { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } }, 875 { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } },
918 { 0xa30c, 876 { 0xa30c,
919 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, 877 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
920 { 0xa310, 878 { 0xa310,
921 { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } }, 879 { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } },
922 { 0xa314, 880 { 0xa314,
923 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, 881 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
924 { 0xa318, 882 { 0xa318,
925 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, 883 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
926 { 0xa31c, 884 { 0xa31c,
927 { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } }, 885 { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } },
928 { 0xa320, 886 { 0xa320,
929 { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } }, 887 { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } },
930 { 0xa324, 888 { 0xa324,
931 { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } }, 889 { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } },
932 { 0xa328, 890 { 0xa328,
933 { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } }, 891 { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } },
934 { 0xa32c, 892 { 0xa32c,
935 { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } }, 893 { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } },
936 { 0xa330, 894 { 0xa330,
937 { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } }, 895 { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } },
938 { 0xa334, 896 { 0xa334,
939 { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, 897 { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
940 { AR5K_DCU_FP, 898};
941 { 0x000003e0, 0x000003e0, 0x000003e0, 0x000003e0, 0x000003e0 } }, 899
942 { 0x4068, 900static const struct ath5k_ini rf5413_ini_common_end[] = {
943 { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } }, 901 { AR5K_DCU_FP, 0x000003e0 },
944 { 0x8060, 902 { AR5K_5414_CBCFG, 0x00000010 },
945 { 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f } }, 903 { AR5K_SEQ_MASK, 0x0000000f },
946 { 0x809c, 904 { 0x809c, 0x00000000 },
947 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 905 { 0x80a0, 0x00000000 },
948 { 0x80a0, 906 { AR5K_MIC_QOS_CTL, 0x00000000 },
949 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 907 { AR5K_MIC_QOS_SEL, 0x00000000 },
950 { 0x8118, 908 { AR5K_MISC_MODE, 0x00000000 },
951 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 909 { AR5K_OFDM_FIL_CNT, 0x00000000 },
952 { 0x811c, 910 { AR5K_CCK_FIL_CNT, 0x00000000 },
953 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 911 { AR5K_PHYERR_CNT1, 0x00000000 },
954 { 0x8120, 912 { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
955 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 913 { AR5K_PHYERR_CNT2, 0x00000000 },
956 { 0x8124, 914 { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
957 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 915 { AR5K_TSF_THRES, 0x00000000 },
958 { 0x8128, 916 { 0x8140, 0x800003f9 },
959 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 917 { 0x8144, 0x00000000 },
960 { 0x812c, 918 { AR5K_PHY_AGC, 0x00000000 },
961 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 919 { AR5K_PHY_ADC_CTL, 0x0000a000 },
962 { 0x8130, 920 { 0x983c, 0x00200400 },
963 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 921 { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
964 { 0x8134, 922 { AR5K_PHY_SCR, 0x0000001f },
965 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 923 { AR5K_PHY_SLMT, 0x00000080 },
966 { 0x8138, 924 { AR5K_PHY_SCAL, 0x0000000e },
967 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 925 { 0x9958, 0x00081fff },
968 { 0x813c, 926 { AR5K_PHY_TIMING_7, 0x00000000 },
969 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 927 { AR5K_PHY_TIMING_8, 0x02800000 },
970 { 0x8140, 928 { AR5K_PHY_TIMING_11, 0x00000000 },
971 { 0x800003f9, 0x800003f9, 0x800003f9, 0x800003f9, 0x800003f9 } }, 929 { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
972 { 0x8144, 930 { 0x99e4, 0xaaaaaaaa },
973 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 931 { 0x99e8, 0x3c466478 },
974 { AR5K_PHY_AGC, 932 { 0x99ec, 0x000000aa },
975 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 933 { AR5K_PHY_SCLOCK, 0x0000000c },
976 { AR5K_PHY(11), 934 { AR5K_PHY_SDELAY, 0x000000ff },
977 { 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000 } }, 935 { AR5K_PHY_SPENDING, 0x00000014 },
978 { AR5K_PHY(15), 936 { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
979 { 0x00200400, 0x00200400, 0x00200400, 0x00200400, 0x00200400 } }, 937 { 0xa23c, 0x93c889af },
980 { AR5K_PHY(19), 938 { AR5K_PHY_FAST_ADC, 0x00000001 },
981 { 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c } }, 939 { 0xa250, 0x0000a000 },
982 { AR5K_PHY_SCR, 940 { AR5K_PHY_BLUETOOTH, 0x00000000 },
983 { 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f } }, 941 { AR5K_PHY_TPC_RG1, 0x0cc75380 },
984 { AR5K_PHY_SLMT, 942 { 0xa25c, 0x0f0f0f01 },
985 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } }, 943 { 0xa260, 0x5f690f01 },
986 { AR5K_PHY_SCAL, 944 { 0xa264, 0x00418a11 },
987 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, 945 { 0xa268, 0x00000000 },
988 { AR5K_PHY(86), 946 { AR5K_PHY_TPC_RG5, 0x0c30c16a },
989 { 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff } }, 947 { 0xa270, 0x00820820 },
990 { AR5K_PHY(96), 948 { 0xa274, 0x081b7caa },
991 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 949 { 0xa278, 0x1ce739ce },
992 { AR5K_PHY(97), 950 { 0xa27c, 0x051701ce },
993 { 0x02800000, 0x02800000, 0x02800000, 0x02800000, 0x02800000 } }, 951 { 0xa338, 0x00000000 },
994 { AR5K_PHY(104), 952 { 0xa33c, 0x00000000 },
995 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 953 { 0xa340, 0x00000000 },
996 { AR5K_PHY(120), 954 { 0xa344, 0x00000000 },
997 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 955 { 0xa348, 0x3fffffff },
998 { AR5K_PHY(121), 956 { 0xa34c, 0x3fffffff },
999 { 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa } }, 957 { 0xa350, 0x3fffffff },
1000 { AR5K_PHY(122), 958 { 0xa354, 0x0003ffff },
1001 { 0x3c466478, 0x3c466478, 0x3c466478, 0x3c466478, 0x3c466478 } }, 959 { 0xa358, 0x79a8aa1f },
1002 { AR5K_PHY(123), 960 { 0xa35c, 0x066c420f },
1003 { 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa } }, 961 { 0xa360, 0x0f282207 },
1004 { AR5K_PHY_SCLOCK, 962 { 0xa364, 0x17601685 },
1005 { 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c } }, 963 { 0xa368, 0x1f801104 },
1006 { AR5K_PHY_SDELAY, 964 { 0xa36c, 0x37a00c03 },
1007 { 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff } }, 965 { 0xa370, 0x3fc40883 },
1008 { AR5K_PHY_SPENDING, 966 { 0xa374, 0x57c00803 },
1009 { 0x00000014, 0x00000014, 0x00000014, 0x00000014, 0x00000014 } }, 967 { 0xa378, 0x5fd80682 },
1010 { 0xa228, 968 { 0xa37c, 0x7fe00482 },
1011 { 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5 } }, 969 { 0xa380, 0x7f3c7bba },
1012 { 0xa23c, 970 { 0xa384, 0xf3307ff0 },
1013 { 0x93c889af, 0x93c889af, 0x93c889af, 0x93c889af, 0x93c889af } },
1014 { 0xa24c,
1015 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1016 { 0xa250,
1017 { 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000 } },
1018 { 0xa254,
1019 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1020 { 0xa258,
1021 { 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380 } },
1022 { 0xa25c,
1023 { 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01 } },
1024 { 0xa260,
1025 { 0x5f690f01, 0x5f690f01, 0x5f690f01, 0x5f690f01, 0x5f690f01 } },
1026 { 0xa264,
1027 { 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11 } },
1028 { 0xa268,
1029 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1030 { 0xa26c,
1031 { 0x0c30c16a, 0x0c30c16a, 0x0c30c16a, 0x0c30c16a, 0x0c30c16a } },
1032 { 0xa270,
1033 { 0x00820820, 0x00820820, 0x00820820, 0x00820820, 0x00820820 } },
1034 { 0xa274,
1035 { 0x081b7caa, 0x081b7caa, 0x081b7caa, 0x081b7caa, 0x081b7caa } },
1036 { 0xa278,
1037 { 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce } },
1038 { 0xa27c,
1039 { 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce } },
1040 { 0xa338,
1041 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1042 { 0xa33c,
1043 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1044 { 0xa340,
1045 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1046 { 0xa344,
1047 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1048 { 0xa348,
1049 { 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1050 { 0xa34c,
1051 { 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1052 { 0xa350,
1053 { 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1054 { 0xa354,
1055 { 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff } },
1056 { 0xa358,
1057 { 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f } },
1058 { 0xa35c,
1059 { 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f } },
1060 { 0xa360,
1061 { 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207 } },
1062 { 0xa364,
1063 { 0x17601685, 0x17601685, 0x17601685, 0x17601685, 0x17601685 } },
1064 { 0xa368,
1065 { 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104 } },
1066 { 0xa36c,
1067 { 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03 } },
1068 { 0xa370,
1069 { 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883 } },
1070 { 0xa374,
1071 { 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803 } },
1072 { 0xa378,
1073 { 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682 } },
1074 { 0xa37c,
1075 { 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482 } },
1076 { 0xa380,
1077 { 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba } },
1078 { 0xa384,
1079 { 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0 } },
1080}; 971};
1081 972
1082/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ 973/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
1083/* XXX: No dumps for turbog yet, so turbog is the same with g here with some 974/* XXX: a mode ? */
1084 * minor tweaking based on dumps from other chips */
1085static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { 975static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
1086 { AR5K_TXCFG, 976 { AR5K_TXCFG,
1087 /* b g gTurbo */ 977 /* a/XR aTurbo b g (DYN) gTurbo */
1088 { 0x00000015, 0x00000015, 0x00000015 } }, 978 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
1089 { AR5K_USEC_5211, 979 { AR5K_USEC_5211,
1090 { 0x04e01395, 0x12e013ab, 0x098813cf } }, 980 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
1091 { AR5K_PHY(10), 981 { AR5K_PHY_RF_CTL3,
1092 { 0x05020000, 0x0a020001, 0x0a020001 } }, 982 { 0x0a020001, 0x0a020001, 0x05020000, 0x0a020001, 0x0a020001 } },
1093 { AR5K_PHY(13), 983 { AR5K_PHY_RF_CTL4,
1094 { 0x00000e00, 0x00000e00, 0x00000e00 } }, 984 { 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00 } },
1095 { AR5K_PHY(14), 985 { AR5K_PHY_PA_CTL,
1096 { 0x0000000a, 0x0000000a, 0x0000000a } }, 986 { 0x00000002, 0x00000002, 0x0000000a, 0x0000000a, 0x0000000a } },
1097 { AR5K_PHY(18), 987 { AR5K_PHY_GAIN,
1098 { 0x001a6a64, 0x001a6a64, 0x001a6a64 } }, 988 { 0x0018da6d, 0x0018da6d, 0x001a6a64, 0x001a6a64, 0x001a6a64 } },
1099 { AR5K_PHY(20), 989 { AR5K_PHY_DESIRED_SIZE,
1100 { 0x0de8b0da, 0x0c98b0da, 0x0c98b0da } }, 990 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da, 0x0de8b0da } },
1101 { AR5K_PHY_SIG, 991 { AR5K_PHY_SIG,
1102 { 0x7ee80d2e, 0x7ec80d2e, 0x7ec80d2e } }, 992 { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e, 0x7e800d2e } },
1103 { AR5K_PHY_AGCCOARSE, 993 { AR5K_PHY_AGCCOARSE,
1104 { 0x3137665e, 0x3139605e, 0x3139605e } }, 994 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3139605e, 0x3137665e } },
1105 { AR5K_PHY(27), 995 { AR5K_PHY_WEAK_OFDM_LOW_THR,
1106 { 0x050cb081, 0x050cb081, 0x050cb081 } }, 996 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
1107 { AR5K_PHY_RX_DELAY, 997 { AR5K_PHY_RX_DELAY,
1108 { 0x0000044c, 0x00000898, 0x000007d0 } }, 998 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
1109 { AR5K_PHY_FRAME_CTL_5211, 999 { AR5K_PHY_FRAME_CTL_5211,
1110 { 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, 1000 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
1111 { AR5K_PHY_CCKTXCTL, 1001 { AR5K_PHY_CCKTXCTL,
1112 { 0x00000000, 0x00000000, 0x00000000 } }, 1002 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1113 { AR5K_PHY(642), 1003 { AR5K_PHY_CCK_CROSSCORR,
1114 { 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 1004 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
1115 { AR5K_PHY_GAIN_2GHZ, 1005 { AR5K_PHY_GAIN_2GHZ,
1116 { 0x0042c140, 0x0042c140, 0x0042c140 } }, 1006 { 0x002c0140, 0x002c0140, 0x0042c140, 0x0042c140, 0x0042c140 } },
1117 { 0xa21c, 1007 { AR5K_PHY_CCK_RX_CTL_4,
1118 { 0x1863800a, 0x1883800a, 0x1883800a } }, 1008 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
1119 { AR5K_DCU_FP, 1009};
1120 { 0x000003e0, 0x000003e0, 0x000003e0 } }, 1010
1121 { 0x8060, 1011static const struct ath5k_ini rf2413_ini_common_end[] = {
1122 { 0x0000000f, 0x0000000f, 0x0000000f } }, 1012 { AR5K_DCU_FP, 0x000003e0 },
1123 { 0x8118, 1013 { AR5K_SEQ_MASK, 0x0000000f },
1124 { 0x00000000, 0x00000000, 0x00000000 } }, 1014 { AR5K_MIC_QOS_CTL, 0x00000000 },
1125 { 0x811c, 1015 { AR5K_MIC_QOS_SEL, 0x00000000 },
1126 { 0x00000000, 0x00000000, 0x00000000 } }, 1016 { AR5K_MISC_MODE, 0x00000000 },
1127 { 0x8120, 1017 { AR5K_OFDM_FIL_CNT, 0x00000000 },
1128 { 0x00000000, 0x00000000, 0x00000000 } }, 1018 { AR5K_CCK_FIL_CNT, 0x00000000 },
1129 { 0x8124, 1019 { AR5K_PHYERR_CNT1, 0x00000000 },
1130 { 0x00000000, 0x00000000, 0x00000000 } }, 1020 { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
1131 { 0x8128, 1021 { AR5K_PHYERR_CNT2, 0x00000000 },
1132 { 0x00000000, 0x00000000, 0x00000000 } }, 1022 { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
1133 { 0x812c, 1023 { AR5K_TSF_THRES, 0x00000000 },
1134 { 0x00000000, 0x00000000, 0x00000000 } }, 1024 { 0x8140, 0x800000a8 },
1135 { 0x8130, 1025 { 0x8144, 0x00000000 },
1136 { 0x00000000, 0x00000000, 0x00000000 } }, 1026 { AR5K_PHY_AGC, 0x00000000 },
1137 { 0x8134, 1027 { AR5K_PHY_ADC_CTL, 0x0000a000 },
1138 { 0x00000000, 0x00000000, 0x00000000 } }, 1028 { 0x983c, 0x00200400 },
1139 { 0x8138, 1029 { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
1140 { 0x00000000, 0x00000000, 0x00000000 } }, 1030 { AR5K_PHY_SCR, 0x0000001f },
1141 { 0x813c, 1031 { AR5K_PHY_SLMT, 0x00000080 },
1142 { 0x00000000, 0x00000000, 0x00000000 } }, 1032 { AR5K_PHY_SCAL, 0x0000000e },
1143 { 0x8140, 1033 { 0x9958, 0x000000ff },
1144 { 0x800000a8, 0x800000a8, 0x800000a8 } }, 1034 { AR5K_PHY_TIMING_7, 0x00000000 },
1145 { 0x8144, 1035 { AR5K_PHY_TIMING_8, 0x02800000 },
1146 { 0x00000000, 0x00000000, 0x00000000 } }, 1036 { AR5K_PHY_TIMING_11, 0x00000000 },
1147 { AR5K_PHY_AGC, 1037 { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
1148 { 0x00000000, 0x00000000, 0x00000000 } }, 1038 { 0x99e4, 0xaaaaaaaa },
1149 { AR5K_PHY(11), 1039 { 0x99e8, 0x3c466478 },
1150 { 0x0000a000, 0x0000a000, 0x0000a000 } }, 1040 { 0x99ec, 0x000000aa },
1151 { AR5K_PHY(15), 1041 { AR5K_PHY_SCLOCK, 0x0000000c },
1152 { 0x00200400, 0x00200400, 0x00200400 } }, 1042 { AR5K_PHY_SDELAY, 0x000000ff },
1153 { AR5K_PHY(19), 1043 { AR5K_PHY_SPENDING, 0x00000014 },
1154 { 0x1284233c, 0x1284233c, 0x1284233c } }, 1044 { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
1155 { AR5K_PHY_SCR, 1045 { 0xa23c, 0x93c889af },
1156 { 0x0000001f, 0x0000001f, 0x0000001f } }, 1046 { AR5K_PHY_FAST_ADC, 0x00000001 },
1157 { AR5K_PHY_SLMT, 1047 { 0xa250, 0x0000a000 },
1158 { 0x00000080, 0x00000080, 0x00000080 } }, 1048 { AR5K_PHY_BLUETOOTH, 0x00000000 },
1159 { AR5K_PHY_SCAL, 1049 { AR5K_PHY_TPC_RG1, 0x0cc75380 },
1160 { 0x0000000e, 0x0000000e, 0x0000000e } }, 1050 { 0xa25c, 0x0f0f0f01 },
1161 { AR5K_PHY(86), 1051 { 0xa260, 0x5f690f01 },
1162 { 0x000000ff, 0x000000ff, 0x000000ff } }, 1052 { 0xa264, 0x00418a11 },
1163 { AR5K_PHY(96), 1053 { 0xa268, 0x00000000 },
1164 { 0x00000000, 0x00000000, 0x00000000 } }, 1054 { AR5K_PHY_TPC_RG5, 0x0c30c16a },
1165 { AR5K_PHY(97), 1055 { 0xa270, 0x00820820 },
1166 { 0x02800000, 0x02800000, 0x02800000 } }, 1056 { 0xa274, 0x001b7caa },
1167 { AR5K_PHY(104), 1057 { 0xa278, 0x1ce739ce },
1168 { 0x00000000, 0x00000000, 0x00000000 } }, 1058 { 0xa27c, 0x051701ce },
1169 { AR5K_PHY(120), 1059 { 0xa300, 0x18010000 },
1170 { 0x00000000, 0x00000000, 0x00000000 } }, 1060 { 0xa304, 0x30032602 },
1171 { AR5K_PHY(121), 1061 { 0xa308, 0x48073e06 },
1172 { 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa } }, 1062 { 0xa30c, 0x560b4c0a },
1173 { AR5K_PHY(122), 1063 { 0xa310, 0x641a600f },
1174 { 0x3c466478, 0x3c466478, 0x3c466478 } }, 1064 { 0xa314, 0x784f6e1b },
1175 { AR5K_PHY(123), 1065 { 0xa318, 0x868f7c5a },
1176 { 0x000000aa, 0x000000aa, 0x000000aa } }, 1066 { 0xa31c, 0x8ecf865b },
1177 { AR5K_PHY_SCLOCK, 1067 { 0xa320, 0x9d4f970f },
1178 { 0x0000000c, 0x0000000c, 0x0000000c } }, 1068 { 0xa324, 0xa5cfa18f },
1179 { AR5K_PHY_SDELAY, 1069 { 0xa328, 0xb55faf1f },
1180 { 0x000000ff, 0x000000ff, 0x000000ff } }, 1070 { 0xa32c, 0xbddfb99f },
1181 { AR5K_PHY_SPENDING, 1071 { 0xa330, 0xcd7fc73f },
1182 { 0x00000014, 0x00000014, 0x00000014 } }, 1072 { 0xa334, 0xd5ffd1bf },
1183 { 0xa228, 1073 { 0xa338, 0x00000000 },
1184 { 0x000009b5, 0x000009b5, 0x000009b5 } }, 1074 { 0xa33c, 0x00000000 },
1185 { 0xa23c, 1075 { 0xa340, 0x00000000 },
1186 { 0x93c889af, 0x93c889af, 0x93c889af } }, 1076 { 0xa344, 0x00000000 },
1187 { 0xa24c, 1077 { 0xa348, 0x3fffffff },
1188 { 0x00000001, 0x00000001, 0x00000001 } }, 1078 { 0xa34c, 0x3fffffff },
1189 { 0xa250, 1079 { 0xa350, 0x3fffffff },
1190 { 0x0000a000, 0x0000a000, 0x0000a000 } }, 1080 { 0xa354, 0x0003ffff },
1191 { 0xa254, 1081 { 0xa358, 0x79a8aa1f },
1192 { 0x00000000, 0x00000000, 0x00000000 } }, 1082 { 0xa35c, 0x066c420f },
1193 { 0xa258, 1083 { 0xa360, 0x0f282207 },
1194 { 0x0cc75380, 0x0cc75380, 0x0cc75380 } }, 1084 { 0xa364, 0x17601685 },
1195 { 0xa25c, 1085 { 0xa368, 0x1f801104 },
1196 { 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01 } }, 1086 { 0xa36c, 0x37a00c03 },
1197 { 0xa260, 1087 { 0xa370, 0x3fc40883 },
1198 { 0x5f690f01, 0x5f690f01, 0x5f690f01 } }, 1088 { 0xa374, 0x57c00803 },
1199 { 0xa264, 1089 { 0xa378, 0x5fd80682 },
1200 { 0x00418a11, 0x00418a11, 0x00418a11 } }, 1090 { 0xa37c, 0x7fe00482 },
1201 { 0xa268, 1091 { 0xa380, 0x7f3c7bba },
1202 { 0x00000000, 0x00000000, 0x00000000 } }, 1092 { 0xa384, 0xf3307ff0 },
1203 { 0xa26c,
1204 { 0x0c30c16a, 0x0c30c16a, 0x0c30c16a } },
1205 { 0xa270,
1206 { 0x00820820, 0x00820820, 0x00820820 } },
1207 { 0xa274,
1208 { 0x001b7caa, 0x001b7caa, 0x001b7caa } },
1209 { 0xa278,
1210 { 0x1ce739ce, 0x1ce739ce, 0x1ce739ce } },
1211 { 0xa27c,
1212 { 0x051701ce, 0x051701ce, 0x051701ce } },
1213 { 0xa300,
1214 { 0x18010000, 0x18010000, 0x18010000 } },
1215 { 0xa304,
1216 { 0x30032602, 0x30032602, 0x30032602 } },
1217 { 0xa308,
1218 { 0x48073e06, 0x48073e06, 0x48073e06 } },
1219 { 0xa30c,
1220 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
1221 { 0xa310,
1222 { 0x641a600f, 0x641a600f, 0x641a600f } },
1223 { 0xa314,
1224 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
1225 { 0xa318,
1226 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
1227 { 0xa31c,
1228 { 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } },
1229 { 0xa320,
1230 { 0x9d4f970f, 0x9d4f970f, 0x9d4f970f } },
1231 { 0xa324,
1232 { 0xa5cfa18f, 0xa5cfa18f, 0xa5cfa18f } },
1233 { 0xa328,
1234 { 0xb55faf1f, 0xb55faf1f, 0xb55faf1f } },
1235 { 0xa32c,
1236 { 0xbddfb99f, 0xbddfb99f, 0xbddfb99f } },
1237 { 0xa330,
1238 { 0xcd7fc73f, 0xcd7fc73f, 0xcd7fc73f } },
1239 { 0xa334,
1240 { 0xd5ffd1bf, 0xd5ffd1bf, 0xd5ffd1bf } },
1241 { 0xa338,
1242 { 0x00000000, 0x00000000, 0x00000000 } },
1243 { 0xa33c,
1244 { 0x00000000, 0x00000000, 0x00000000 } },
1245 { 0xa340,
1246 { 0x00000000, 0x00000000, 0x00000000 } },
1247 { 0xa344,
1248 { 0x00000000, 0x00000000, 0x00000000 } },
1249 { 0xa348,
1250 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1251 { 0xa34c,
1252 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1253 { 0xa350,
1254 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1255 { 0xa354,
1256 { 0x0003ffff, 0x0003ffff, 0x0003ffff } },
1257 { 0xa358,
1258 { 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f } },
1259 { 0xa35c,
1260 { 0x066c420f, 0x066c420f, 0x066c420f } },
1261 { 0xa360,
1262 { 0x0f282207, 0x0f282207, 0x0f282207 } },
1263 { 0xa364,
1264 { 0x17601685, 0x17601685, 0x17601685 } },
1265 { 0xa368,
1266 { 0x1f801104, 0x1f801104, 0x1f801104 } },
1267 { 0xa36c,
1268 { 0x37a00c03, 0x37a00c03, 0x37a00c03 } },
1269 { 0xa370,
1270 { 0x3fc40883, 0x3fc40883, 0x3fc40883 } },
1271 { 0xa374,
1272 { 0x57c00803, 0x57c00803, 0x57c00803 } },
1273 { 0xa378,
1274 { 0x5fd80682, 0x5fd80682, 0x5fd80682 } },
1275 { 0xa37c,
1276 { 0x7fe00482, 0x7fe00482, 0x7fe00482 } },
1277 { 0xa380,
1278 { 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba } },
1279 { 0xa384,
1280 { 0xf3307ff0, 0xf3307ff0, 0xf3307ff0 } },
1281}; 1093};
1282 1094
1283/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ 1095/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
1284/* XXX: No dumps for turbog yet, so turbog is the same with g here with some 1096/* XXX: a mode ? */
1285 * minor tweaking based on dumps from other chips */
1286static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { 1097static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
1287 { AR5K_TXCFG, 1098 { AR5K_TXCFG,
1288 /* g gTurbo */ 1099 /* a/XR aTurbo b g (DYN) gTurbo */
1289 { 0x00000015, 0x00000015 } }, 1100 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
1290 { AR5K_USEC_5211, 1101 { AR5K_USEC_5211,
1291 { 0x12e013ab, 0x098813cf } }, 1102 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
1292 { AR5K_PHY_TURBO, 1103 { AR5K_PHY_TURBO,
1293 { 0x00000000, 0x00000003 } }, 1104 { 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000001 } },
1294 { AR5K_PHY(10), 1105 { AR5K_PHY_RF_CTL3,
1295 { 0x0a020001, 0x0a020001 } }, 1106 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
1296 { AR5K_PHY(13), 1107 { AR5K_PHY_RF_CTL4,
1297 { 0x00000e0e, 0x00000e0e } }, 1108 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
1298 { AR5K_PHY(14), 1109 { AR5K_PHY_PA_CTL,
1299 { 0x0000000b, 0x0000000b } }, 1110 { 0x00000003, 0x00000003, 0x0000000b, 0x0000000b, 0x0000000b } },
1300 { AR5K_PHY(17), 1111 { AR5K_PHY_SETTLING,
1301 { 0x13721422, 0x13721422 } }, 1112 { 0x1372161c, 0x13721c25, 0x13721722, 0x13721422, 0x13721c25 } },
1302 { AR5K_PHY(18), 1113 { AR5K_PHY_GAIN,
1303 { 0x00199a65, 0x00199a65 } }, 1114 { 0x0018fa61, 0x0018fa61, 0x00199a65, 0x00199a65, 0x00199a65 } },
1304 { AR5K_PHY(20), 1115 { AR5K_PHY_DESIRED_SIZE,
1305 { 0x0c98b0da, 0x0c98b0da } }, 1116 { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } },
1306 { AR5K_PHY_SIG, 1117 { AR5K_PHY_SIG,
1307 { 0x7ec80d2e, 0x7ec80d2e } }, 1118 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
1308 { AR5K_PHY_AGCCOARSE, 1119 { AR5K_PHY_AGCCOARSE,
1309 { 0x3139605e, 0x3139605e } }, 1120 { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } },
1310 { AR5K_PHY(27), 1121 { AR5K_PHY_WEAK_OFDM_LOW_THR,
1311 { 0x050cb081, 0x050cb081 } }, 1122 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
1312 { AR5K_PHY_RX_DELAY, 1123 { AR5K_PHY_RX_DELAY,
1313 { 0x00000898, 0x000007d0 } }, 1124 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
1314 { AR5K_PHY_FRAME_CTL_5211, 1125 { AR5K_PHY_FRAME_CTL_5211,
1315 { 0xf7b81000, 0xf7b81000 } }, 1126 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
1316 { AR5K_PHY_CCKTXCTL, 1127 { AR5K_PHY_CCKTXCTL,
1317 { 0x00000000, 0x00000000 } }, 1128 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1318 { AR5K_PHY(642), 1129 { AR5K_PHY_CCK_CROSSCORR,
1319 { 0xd03e6788, 0xd03e6788 } }, 1130 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
1320 { AR5K_PHY_GAIN_2GHZ, 1131 { AR5K_PHY_GAIN_2GHZ,
1321 { 0x0052c140, 0x0052c140 } }, 1132 { 0x00000140, 0x00000140, 0x0052c140, 0x0052c140, 0x0052c140 } },
1322 { 0xa21c, 1133 { AR5K_PHY_CCK_RX_CTL_4,
1323 { 0x1883800a, 0x1883800a } }, 1134 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
1324 { 0xa324, 1135 { 0xa324,
1325 { 0xa7cfa7cf, 0xa7cfa7cf } }, 1136 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1326 { 0xa328, 1137 { 0xa328,
1327 { 0xa7cfa7cf, 0xa7cfa7cf } }, 1138 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1328 { 0xa32c, 1139 { 0xa32c,
1329 { 0xa7cfa7cf, 0xa7cfa7cf } }, 1140 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1330 { 0xa330, 1141 { 0xa330,
1331 { 0xa7cfa7cf, 0xa7cfa7cf } }, 1142 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1332 { 0xa334, 1143 { 0xa334,
1333 { 0xa7cfa7cf, 0xa7cfa7cf } }, 1144 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1334 { AR5K_DCU_FP, 1145};
1335 { 0x000003e0, 0x000003e0 } }, 1146
1336 { 0x8060, 1147static const struct ath5k_ini rf2425_ini_common_end[] = {
1337 { 0x0000000f, 0x0000000f } }, 1148 { AR5K_DCU_FP, 0x000003e0 },
1338 { 0x809c, 1149 { AR5K_SEQ_MASK, 0x0000000f },
1339 { 0x00000000, 0x00000000 } }, 1150 { 0x809c, 0x00000000 },
1340 { 0x80a0, 1151 { 0x80a0, 0x00000000 },
1341 { 0x00000000, 0x00000000 } }, 1152 { AR5K_MIC_QOS_CTL, 0x00000000 },
1342 { 0x8118, 1153 { AR5K_MIC_QOS_SEL, 0x00000000 },
1343 { 0x00000000, 0x00000000 } }, 1154 { AR5K_MISC_MODE, 0x00000000 },
1344 { 0x811c, 1155 { AR5K_OFDM_FIL_CNT, 0x00000000 },
1345 { 0x00000000, 0x00000000 } }, 1156 { AR5K_CCK_FIL_CNT, 0x00000000 },
1346 { 0x8120, 1157 { AR5K_PHYERR_CNT1, 0x00000000 },
1347 { 0x00000000, 0x00000000 } }, 1158 { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
1348 { 0x8124, 1159 { AR5K_PHYERR_CNT2, 0x00000000 },
1349 { 0x00000000, 0x00000000 } }, 1160 { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
1350 { 0x8128, 1161 { AR5K_TSF_THRES, 0x00000000 },
1351 { 0x00000000, 0x00000000 } }, 1162 { 0x8140, 0x800003f9 },
1352 { 0x812c, 1163 { 0x8144, 0x00000000 },
1353 { 0x00000000, 0x00000000 } }, 1164 { AR5K_PHY_AGC, 0x00000000 },
1354 { 0x8130, 1165 { AR5K_PHY_ADC_CTL, 0x0000a000 },
1355 { 0x00000000, 0x00000000 } }, 1166 { 0x983c, 0x00200400 },
1356 { 0x8134, 1167 { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
1357 { 0x00000000, 0x00000000 } }, 1168 { AR5K_PHY_SCR, 0x0000001f },
1358 { 0x8138, 1169 { AR5K_PHY_SLMT, 0x00000080 },
1359 { 0x00000000, 0x00000000 } }, 1170 { AR5K_PHY_SCAL, 0x0000000e },
1360 { 0x813c, 1171 { 0x9958, 0x00081fff },
1361 { 0x00000000, 0x00000000 } }, 1172 { AR5K_PHY_TIMING_7, 0x00000000 },
1362 { 0x8140, 1173 { AR5K_PHY_TIMING_8, 0x02800000 },
1363 { 0x800003f9, 0x800003f9 } }, 1174 { AR5K_PHY_TIMING_11, 0x00000000 },
1364 { 0x8144, 1175 { 0x99dc, 0xfebadbe8 },
1365 { 0x00000000, 0x00000000 } }, 1176 { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
1366 { AR5K_PHY_AGC, 1177 { 0x99e4, 0xaaaaaaaa },
1367 { 0x00000000, 0x00000000 } }, 1178 { 0x99e8, 0x3c466478 },
1368 { AR5K_PHY(11), 1179 { 0x99ec, 0x000000aa },
1369 { 0x0000a000, 0x0000a000 } }, 1180 { AR5K_PHY_SCLOCK, 0x0000000c },
1370 { AR5K_PHY(15), 1181 { AR5K_PHY_SDELAY, 0x000000ff },
1371 { 0x00200400, 0x00200400 } }, 1182 { AR5K_PHY_SPENDING, 0x00000014 },
1372 { AR5K_PHY(19), 1183 { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
1373 { 0x1284233c, 0x1284233c } }, 1184 { AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
1374 { AR5K_PHY_SCR, 1185 { AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
1375 { 0x0000001f, 0x0000001f } }, 1186 { 0xa23c, 0x93c889af },
1376 { AR5K_PHY_SLMT, 1187 { AR5K_PHY_FAST_ADC, 0x00000001 },
1377 { 0x00000080, 0x00000080 } }, 1188 { 0xa250, 0x0000a000 },
1378 { AR5K_PHY_SCAL, 1189 { AR5K_PHY_BLUETOOTH, 0x00000000 },
1379 { 0x0000000e, 0x0000000e } }, 1190 { AR5K_PHY_TPC_RG1, 0x0cc75380 },
1380 { AR5K_PHY(86), 1191 { 0xa25c, 0x0f0f0f01 },
1381 { 0x00081fff, 0x00081fff } }, 1192 { 0xa260, 0x5f690f01 },
1382 { AR5K_PHY(96), 1193 { 0xa264, 0x00418a11 },
1383 { 0x00000000, 0x00000000 } }, 1194 { 0xa268, 0x00000000 },
1384 { AR5K_PHY(97), 1195 { AR5K_PHY_TPC_RG5, 0x0c30c166 },
1385 { 0x02800000, 0x02800000 } }, 1196 { 0xa270, 0x00820820 },
1386 { AR5K_PHY(104), 1197 { 0xa274, 0x081a3caa },
1387 { 0x00000000, 0x00000000 } }, 1198 { 0xa278, 0x1ce739ce },
1388 { AR5K_PHY(119), 1199 { 0xa27c, 0x051701ce },
1389 { 0xfebadbe8, 0xfebadbe8 } }, 1200 { 0xa300, 0x16010000 },
1390 { AR5K_PHY(120), 1201 { 0xa304, 0x2c032402 },
1391 { 0x00000000, 0x00000000 } }, 1202 { 0xa308, 0x48433e42 },
1392 { AR5K_PHY(121), 1203 { 0xa30c, 0x5a0f500b },
1393 { 0xaaaaaaaa, 0xaaaaaaaa } }, 1204 { 0xa310, 0x6c4b624a },
1394 { AR5K_PHY(122), 1205 { 0xa314, 0x7e8b748a },
1395 { 0x3c466478, 0x3c466478 } }, 1206 { 0xa318, 0x96cf8ccb },
1396 { AR5K_PHY(123), 1207 { 0xa31c, 0xa34f9d0f },
1397 { 0x000000aa, 0x000000aa } }, 1208 { 0xa320, 0xa7cfa58f },
1398 { AR5K_PHY_SCLOCK, 1209 { 0xa348, 0x3fffffff },
1399 { 0x0000000c, 0x0000000c } }, 1210 { 0xa34c, 0x3fffffff },
1400 { AR5K_PHY_SDELAY, 1211 { 0xa350, 0x3fffffff },
1401 { 0x000000ff, 0x000000ff } }, 1212 { 0xa354, 0x0003ffff },
1402 { AR5K_PHY_SPENDING, 1213 { 0xa358, 0x79a8aa1f },
1403 { 0x00000014, 0x00000014 } }, 1214 { 0xa35c, 0x066c420f },
1404 { 0xa228, 1215 { 0xa360, 0x0f282207 },
1405 { 0x000009b5, 0x000009b5 } }, 1216 { 0xa364, 0x17601685 },
1406 { AR5K_PHY_TXPOWER_RATE3, 1217 { 0xa368, 0x1f801104 },
1407 { 0x20202020, 0x20202020 } }, 1218 { 0xa36c, 0x37a00c03 },
1408 { AR5K_PHY_TXPOWER_RATE4, 1219 { 0xa370, 0x3fc40883 },
1409 { 0x20202020, 0x20202020 } }, 1220 { 0xa374, 0x57c00803 },
1410 { 0xa23c, 1221 { 0xa378, 0x5fd80682 },
1411 { 0x93c889af, 0x93c889af } }, 1222 { 0xa37c, 0x7fe00482 },
1412 { 0xa24c, 1223 { 0xa380, 0x7f3c7bba },
1413 { 0x00000001, 0x00000001 } }, 1224 { 0xa384, 0xf3307ff0 },
1414 { 0xa250,
1415 { 0x0000a000, 0x0000a000 } },
1416 { 0xa254,
1417 { 0x00000000, 0x00000000 } },
1418 { 0xa258,
1419 { 0x0cc75380, 0x0cc75380 } },
1420 { 0xa25c,
1421 { 0x0f0f0f01, 0x0f0f0f01 } },
1422 { 0xa260,
1423 { 0x5f690f01, 0x5f690f01 } },
1424 { 0xa264,
1425 { 0x00418a11, 0x00418a11 } },
1426 { 0xa268,
1427 { 0x00000000, 0x00000000 } },
1428 { 0xa26c,
1429 { 0x0c30c166, 0x0c30c166 } },
1430 { 0xa270,
1431 { 0x00820820, 0x00820820 } },
1432 { 0xa274,
1433 { 0x081a3caa, 0x081a3caa } },
1434 { 0xa278,
1435 { 0x1ce739ce, 0x1ce739ce } },
1436 { 0xa27c,
1437 { 0x051701ce, 0x051701ce } },
1438 { 0xa300,
1439 { 0x16010000, 0x16010000 } },
1440 { 0xa304,
1441 { 0x2c032402, 0x2c032402 } },
1442 { 0xa308,
1443 { 0x48433e42, 0x48433e42 } },
1444 { 0xa30c,
1445 { 0x5a0f500b, 0x5a0f500b } },
1446 { 0xa310,
1447 { 0x6c4b624a, 0x6c4b624a } },
1448 { 0xa314,
1449 { 0x7e8b748a, 0x7e8b748a } },
1450 { 0xa318,
1451 { 0x96cf8ccb, 0x96cf8ccb } },
1452 { 0xa31c,
1453 { 0xa34f9d0f, 0xa34f9d0f } },
1454 { 0xa320,
1455 { 0xa7cfa58f, 0xa7cfa58f } },
1456 { 0xa348,
1457 { 0x3fffffff, 0x3fffffff } },
1458 { 0xa34c,
1459 { 0x3fffffff, 0x3fffffff } },
1460 { 0xa350,
1461 { 0x3fffffff, 0x3fffffff } },
1462 { 0xa354,
1463 { 0x0003ffff, 0x0003ffff } },
1464 { 0xa358,
1465 { 0x79a8aa1f, 0x79a8aa1f } },
1466 { 0xa35c,
1467 { 0x066c420f, 0x066c420f } },
1468 { 0xa360,
1469 { 0x0f282207, 0x0f282207 } },
1470 { 0xa364,
1471 { 0x17601685, 0x17601685 } },
1472 { 0xa368,
1473 { 0x1f801104, 0x1f801104 } },
1474 { 0xa36c,
1475 { 0x37a00c03, 0x37a00c03 } },
1476 { 0xa370,
1477 { 0x3fc40883, 0x3fc40883 } },
1478 { 0xa374,
1479 { 0x57c00803, 0x57c00803 } },
1480 { 0xa378,
1481 { 0x5fd80682, 0x5fd80682 } },
1482 { 0xa37c,
1483 { 0x7fe00482, 0x7fe00482 } },
1484 { 0xa380,
1485 { 0x7f3c7bba, 0x7f3c7bba } },
1486 { 0xa384,
1487 { 0xf3307ff0, 0xf3307ff0 } },
1488}; 1225};
1489 1226
1490/* 1227/*
@@ -1560,7 +1297,7 @@ static const struct ath5k_ini rf5111_ini_bbgain[] = {
1560 { AR5K_BB_GAIN(63), 0x00000016 }, 1297 { AR5K_BB_GAIN(63), 0x00000016 },
1561}; 1298};
1562 1299
1563/* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414) */ 1300/* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */
1564static const struct ath5k_ini rf5112_ini_bbgain[] = { 1301static const struct ath5k_ini rf5112_ini_bbgain[] = {
1565 { AR5K_BB_GAIN(0), 0x00000000 }, 1302 { AR5K_BB_GAIN(0), 0x00000000 },
1566 { AR5K_BB_GAIN(1), 0x00000001 }, 1303 { AR5K_BB_GAIN(1), 0x00000001 },
@@ -1691,87 +1428,97 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1691 /* 1428 /*
1692 * Write initial settings common for all modes 1429 * Write initial settings common for all modes
1693 */ 1430 */
1694 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini), 1431 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start),
1695 ar5212_ini, change_channel); 1432 ar5212_ini_common_start, change_channel);
1696 1433
1697 /* Second set of mode-specific settings */ 1434 /* Second set of mode-specific settings */
1698 if (ah->ah_radio == AR5K_RF5111) { 1435 switch (ah->ah_radio) {
1436 case AR5K_RF5111:
1699 1437
1700 ath5k_hw_ini_mode_registers(ah, 1438 ath5k_hw_ini_mode_registers(ah,
1701 ARRAY_SIZE(ar5212_rf5111_ini_mode_end), 1439 ARRAY_SIZE(rf5111_ini_mode_end),
1702 ar5212_rf5111_ini_mode_end, mode); 1440 rf5111_ini_mode_end, mode);
1441
1442 ath5k_hw_ini_registers(ah,
1443 ARRAY_SIZE(rf5111_ini_common_end),
1444 rf5111_ini_common_end, change_channel);
1703 1445
1704 /* Baseband gain table */ 1446 /* Baseband gain table */
1705 ath5k_hw_ini_registers(ah, 1447 ath5k_hw_ini_registers(ah,
1706 ARRAY_SIZE(rf5111_ini_bbgain), 1448 ARRAY_SIZE(rf5111_ini_bbgain),
1707 rf5111_ini_bbgain, change_channel); 1449 rf5111_ini_bbgain, change_channel);
1708 1450
1709 } else if (ah->ah_radio == AR5K_RF5112) { 1451 break;
1452 case AR5K_RF5112:
1710 1453
1711 ath5k_hw_ini_mode_registers(ah, 1454 ath5k_hw_ini_mode_registers(ah,
1712 ARRAY_SIZE(ar5212_rf5112_ini_mode_end), 1455 ARRAY_SIZE(rf5112_ini_mode_end),
1713 ar5212_rf5112_ini_mode_end, mode); 1456 rf5112_ini_mode_end, mode);
1457
1458 ath5k_hw_ini_registers(ah,
1459 ARRAY_SIZE(rf5112_ini_common_end),
1460 rf5112_ini_common_end, change_channel);
1714 1461
1715 ath5k_hw_ini_registers(ah, 1462 ath5k_hw_ini_registers(ah,
1716 ARRAY_SIZE(rf5112_ini_bbgain), 1463 ARRAY_SIZE(rf5112_ini_bbgain),
1717 rf5112_ini_bbgain, change_channel); 1464 rf5112_ini_bbgain, change_channel);
1718 1465
1719 } else if (ah->ah_radio == AR5K_RF5413) { 1466 break;
1467 case AR5K_RF5413:
1720 1468
1721 ath5k_hw_ini_mode_registers(ah, 1469 ath5k_hw_ini_mode_registers(ah,
1722 ARRAY_SIZE(rf5413_ini_mode_end), 1470 ARRAY_SIZE(rf5413_ini_mode_end),
1723 rf5413_ini_mode_end, mode); 1471 rf5413_ini_mode_end, mode);
1724 1472
1725 ath5k_hw_ini_registers(ah, 1473 ath5k_hw_ini_registers(ah,
1474 ARRAY_SIZE(rf5413_ini_common_end),
1475 rf5413_ini_common_end, change_channel);
1476
1477 ath5k_hw_ini_registers(ah,
1726 ARRAY_SIZE(rf5112_ini_bbgain), 1478 ARRAY_SIZE(rf5112_ini_bbgain),
1727 rf5112_ini_bbgain, change_channel); 1479 rf5112_ini_bbgain, change_channel);
1728 1480
1729 } else if (ah->ah_radio == AR5K_RF2413) { 1481 break;
1730 1482 case AR5K_RF2316:
1731 if (mode < 2) { 1483 case AR5K_RF2413:
1732 ATH5K_ERR(ah->ah_sc,
1733 "unsupported channel mode: %d\n", mode);
1734 return -EINVAL;
1735 }
1736 mode = mode - 2;
1737
1738 /* Override a setting from ar5212_ini */
1739 ath5k_hw_reg_write(ah, 0x018830c6, AR5K_PHY(648));
1740 1484
1741 ath5k_hw_ini_mode_registers(ah, 1485 ath5k_hw_ini_mode_registers(ah,
1742 ARRAY_SIZE(rf2413_ini_mode_end), 1486 ARRAY_SIZE(rf2413_ini_mode_end),
1743 rf2413_ini_mode_end, mode); 1487 rf2413_ini_mode_end, mode);
1744 1488
1745 /* Baseband gain table */
1746 ath5k_hw_ini_registers(ah, 1489 ath5k_hw_ini_registers(ah,
1747 ARRAY_SIZE(rf5112_ini_bbgain), 1490 ARRAY_SIZE(rf2413_ini_common_end),
1748 rf5112_ini_bbgain, change_channel); 1491 rf2413_ini_common_end, change_channel);
1749
1750 } else if (ah->ah_radio == AR5K_RF2425) {
1751 1492
1752 if (mode < 2) { 1493 /* Override settings from rf2413_ini_common_end */
1753 ATH5K_ERR(ah->ah_sc, 1494 if (ah->ah_radio == AR5K_RF2316) {
1754 "unsupported channel mode: %d\n", mode); 1495 ath5k_hw_reg_write(ah, 0x00004000,
1755 return -EINVAL; 1496 AR5K_PHY_AGC);
1497 ath5k_hw_reg_write(ah, 0x081b7caa,
1498 0xa274);
1756 } 1499 }
1757 1500
1758 /* Map b to g */ 1501 ath5k_hw_ini_registers(ah,
1759 if (mode == 2) 1502 ARRAY_SIZE(rf5112_ini_bbgain),
1760 mode = 0; 1503 rf5112_ini_bbgain, change_channel);
1761 else 1504 break;
1762 mode = mode - 3; 1505 case AR5K_RF2317:
1763 1506 case AR5K_RF2425:
1764 /* Override a setting from ar5212_ini */
1765 ath5k_hw_reg_write(ah, 0x018830c6, AR5K_PHY(648));
1766 1507
1767 ath5k_hw_ini_mode_registers(ah, 1508 ath5k_hw_ini_mode_registers(ah,
1768 ARRAY_SIZE(rf2425_ini_mode_end), 1509 ARRAY_SIZE(rf2425_ini_mode_end),
1769 rf2425_ini_mode_end, mode); 1510 rf2425_ini_mode_end, mode);
1770 1511
1771 /* Baseband gain table */ 1512 ath5k_hw_ini_registers(ah,
1513 ARRAY_SIZE(rf2413_ini_common_end),
1514 rf2413_ini_common_end, change_channel);
1515
1772 ath5k_hw_ini_registers(ah, 1516 ath5k_hw_ini_registers(ah,
1773 ARRAY_SIZE(rf5112_ini_bbgain), 1517 ARRAY_SIZE(rf5112_ini_bbgain),
1774 rf5112_ini_bbgain, change_channel); 1518 rf5112_ini_bbgain, change_channel);
1519 break;
1520 default:
1521 return -EINVAL;
1775 1522
1776 } 1523 }
1777 1524
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
index 75eb9f43c74..f8a4a696027 100644
--- a/drivers/net/wireless/ath5k/pcu.c
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -646,6 +646,23 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
646} 646}
647 647
648/** 648/**
649 * ath5k_hw_set_tsf64 - Set a new 64bit TSF
650 *
651 * @ah: The &struct ath5k_hw
652 * @tsf64: The new 64bit TSF
653 *
654 * Sets the new TSF
655 */
656void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
657{
658 ATH5K_TRACE(ah->ah_sc);
659
660 ath5k_hw_reg_write(ah, 0x00000000, AR5K_TSF_L32);
661 ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
662 ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
663}
664
665/**
649 * ath5k_hw_reset_tsf - Force a TSF reset 666 * ath5k_hw_reset_tsf - Force a TSF reset
650 * 667 *
651 * @ah: The &struct ath5k_hw 668 * @ah: The &struct ath5k_hw
@@ -1026,6 +1043,9 @@ int ath5k_keycache_type(const struct ieee80211_key_conf *key)
1026 return AR5K_KEYTABLE_TYPE_40; 1043 return AR5K_KEYTABLE_TYPE_40;
1027 else if (key->keylen == LEN_WEP104) 1044 else if (key->keylen == LEN_WEP104)
1028 return AR5K_KEYTABLE_TYPE_104; 1045 return AR5K_KEYTABLE_TYPE_104;
1046 return -EINVAL;
1047 default:
1048 return -EINVAL;
1029 } 1049 }
1030 return -EINVAL; 1050 return -EINVAL;
1031} 1051}
@@ -1041,7 +1061,7 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
1041 __le32 key_v[5] = {}; 1061 __le32 key_v[5] = {};
1042 __le32 key0 = 0, key1 = 0; 1062 __le32 key0 = 0, key1 = 0;
1043 __le32 *rxmic, *txmic; 1063 __le32 *rxmic, *txmic;
1044 u32 keytype; 1064 int keytype;
1045 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; 1065 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
1046 bool is_tkip; 1066 bool is_tkip;
1047 const u8 *key_ptr; 1067 const u8 *key_ptr;
@@ -1139,7 +1159,7 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1139 1159
1140 /* MAC may be NULL if it's a broadcast key. In this case no need to 1160 /* MAC may be NULL if it's a broadcast key. In this case no need to
1141 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */ 1161 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
1142 if (unlikely(mac == NULL)) { 1162 if (!mac) {
1143 low_id = 0xffffffff; 1163 low_id = 0xffffffff;
1144 high_id = 0xffff | AR5K_KEYTABLE_VALID; 1164 high_id = 0xffff | AR5K_KEYTABLE_VALID;
1145 } else { 1165 } else {
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index 7ba18e09463..81f5bebc48b 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -2,7 +2,7 @@
2 * PHY functions 2 * PHY functions
3 * 3 *
4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
@@ -26,1138 +26,191 @@
26#include "ath5k.h" 26#include "ath5k.h"
27#include "reg.h" 27#include "reg.h"
28#include "base.h" 28#include "base.h"
29 29#include "rfbuffer.h"
30/* Struct to hold initial RF register values (RF Banks) */ 30#include "rfgain.h"
31struct ath5k_ini_rf {
32 u8 rf_bank; /* check out ath5k_reg.h */
33 u16 rf_register; /* register address */
34 u32 rf_value[5]; /* register value for different modes (above) */
35};
36
37/*
38 * Mode-specific RF Gain table (64bytes) for RF5111/5112
39 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
40 * RF Gain values are included in AR5K_AR5210_INI)
41 */
42struct ath5k_ini_rfgain {
43 u16 rfg_register; /* RF Gain register address */
44 u32 rfg_value[2]; /* [freq (see below)] */
45};
46
47struct ath5k_gain_opt {
48 u32 go_default;
49 u32 go_steps_count;
50 const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
51};
52
53/* RF5111 mode-specific init registers */
54static const struct ath5k_ini_rf rfregs_5111[] = {
55 { 0, 0x989c,
56 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
57 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
58 { 0, 0x989c,
59 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
60 { 0, 0x989c,
61 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
62 { 0, 0x989c,
63 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
64 { 0, 0x989c,
65 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
66 { 0, 0x989c,
67 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
68 { 0, 0x989c,
69 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
70 { 0, 0x989c,
71 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
72 { 0, 0x989c,
73 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
74 { 0, 0x989c,
75 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
76 { 0, 0x989c,
77 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
78 { 0, 0x989c,
79 { 0x00380000, 0x00380000, 0x00380000, 0x00380000, 0x00380000 } },
80 { 0, 0x989c,
81 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
82 { 0, 0x989c,
83 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
84 { 0, 0x989c,
85 { 0x00000000, 0x00000000, 0x000000c0, 0x00000080, 0x00000080 } },
86 { 0, 0x989c,
87 { 0x000400f9, 0x000400f9, 0x000400ff, 0x000400fd, 0x000400fd } },
88 { 0, 0x98d4,
89 { 0x00000000, 0x00000000, 0x00000004, 0x00000004, 0x00000004 } },
90 { 1, 0x98d4,
91 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
92 { 2, 0x98d4,
93 { 0x00000010, 0x00000014, 0x00000010, 0x00000010, 0x00000014 } },
94 { 3, 0x98d8,
95 { 0x00601068, 0x00601068, 0x00601068, 0x00601068, 0x00601068 } },
96 { 6, 0x989c,
97 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
98 { 6, 0x989c,
99 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
100 { 6, 0x989c,
101 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
102 { 6, 0x989c,
103 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
104 { 6, 0x989c,
105 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
106 { 6, 0x989c,
107 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
108 { 6, 0x989c,
109 { 0x04000000, 0x04000000, 0x04000000, 0x04000000, 0x04000000 } },
110 { 6, 0x989c,
111 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
112 { 6, 0x989c,
113 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
114 { 6, 0x989c,
115 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
116 { 6, 0x989c,
117 { 0x00000000, 0x00000000, 0x0a000000, 0x00000000, 0x00000000 } },
118 { 6, 0x989c,
119 { 0x003800c0, 0x00380080, 0x023800c0, 0x003800c0, 0x003800c0 } },
120 { 6, 0x989c,
121 { 0x00020006, 0x00020006, 0x00000006, 0x00020006, 0x00020006 } },
122 { 6, 0x989c,
123 { 0x00000089, 0x00000089, 0x00000089, 0x00000089, 0x00000089 } },
124 { 6, 0x989c,
125 { 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0 } },
126 { 6, 0x989c,
127 { 0x00040007, 0x00040007, 0x00040007, 0x00040007, 0x00040007 } },
128 { 6, 0x98d4,
129 { 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a } },
130 { 7, 0x989c,
131 { 0x00000040, 0x00000048, 0x00000040, 0x00000040, 0x00000040 } },
132 { 7, 0x989c,
133 { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } },
134 { 7, 0x989c,
135 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
136 { 7, 0x989c,
137 { 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f } },
138 { 7, 0x989c,
139 { 0x000000f1, 0x000000f1, 0x00000061, 0x000000f1, 0x000000f1 } },
140 { 7, 0x989c,
141 { 0x0000904f, 0x0000904f, 0x0000904c, 0x0000904f, 0x0000904f } },
142 { 7, 0x989c,
143 { 0x0000125a, 0x0000125a, 0x0000129a, 0x0000125a, 0x0000125a } },
144 { 7, 0x98cc,
145 { 0x0000000e, 0x0000000e, 0x0000000f, 0x0000000e, 0x0000000e } },
146};
147
148/* Initial RF Gain settings for RF5111 */
149static const struct ath5k_ini_rfgain rfgain_5111[] = {
150 /* 5Ghz 2Ghz */
151 { AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } },
152 { AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } },
153 { AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } },
154 { AR5K_RF_GAIN(3), { 0x00000069, 0x00000150 } },
155 { AR5K_RF_GAIN(4), { 0x00000199, 0x00000190 } },
156 { AR5K_RF_GAIN(5), { 0x000001d9, 0x000001d0 } },
157 { AR5K_RF_GAIN(6), { 0x00000019, 0x00000010 } },
158 { AR5K_RF_GAIN(7), { 0x00000059, 0x00000044 } },
159 { AR5K_RF_GAIN(8), { 0x00000099, 0x00000084 } },
160 { AR5K_RF_GAIN(9), { 0x000001a5, 0x00000148 } },
161 { AR5K_RF_GAIN(10), { 0x000001e5, 0x00000188 } },
162 { AR5K_RF_GAIN(11), { 0x00000025, 0x000001c8 } },
163 { AR5K_RF_GAIN(12), { 0x000001c8, 0x00000014 } },
164 { AR5K_RF_GAIN(13), { 0x00000008, 0x00000042 } },
165 { AR5K_RF_GAIN(14), { 0x00000048, 0x00000082 } },
166 { AR5K_RF_GAIN(15), { 0x00000088, 0x00000178 } },
167 { AR5K_RF_GAIN(16), { 0x00000198, 0x000001b8 } },
168 { AR5K_RF_GAIN(17), { 0x000001d8, 0x000001f8 } },
169 { AR5K_RF_GAIN(18), { 0x00000018, 0x00000012 } },
170 { AR5K_RF_GAIN(19), { 0x00000058, 0x00000052 } },
171 { AR5K_RF_GAIN(20), { 0x00000098, 0x00000092 } },
172 { AR5K_RF_GAIN(21), { 0x000001a4, 0x0000017c } },
173 { AR5K_RF_GAIN(22), { 0x000001e4, 0x000001bc } },
174 { AR5K_RF_GAIN(23), { 0x00000024, 0x000001fc } },
175 { AR5K_RF_GAIN(24), { 0x00000064, 0x0000000a } },
176 { AR5K_RF_GAIN(25), { 0x000000a4, 0x0000004a } },
177 { AR5K_RF_GAIN(26), { 0x000000e4, 0x0000008a } },
178 { AR5K_RF_GAIN(27), { 0x0000010a, 0x0000015a } },
179 { AR5K_RF_GAIN(28), { 0x0000014a, 0x0000019a } },
180 { AR5K_RF_GAIN(29), { 0x0000018a, 0x000001da } },
181 { AR5K_RF_GAIN(30), { 0x000001ca, 0x0000000e } },
182 { AR5K_RF_GAIN(31), { 0x0000000a, 0x0000004e } },
183 { AR5K_RF_GAIN(32), { 0x0000004a, 0x0000008e } },
184 { AR5K_RF_GAIN(33), { 0x0000008a, 0x0000015e } },
185 { AR5K_RF_GAIN(34), { 0x000001ba, 0x0000019e } },
186 { AR5K_RF_GAIN(35), { 0x000001fa, 0x000001de } },
187 { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000009 } },
188 { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000049 } },
189 { AR5K_RF_GAIN(38), { 0x00000186, 0x00000089 } },
190 { AR5K_RF_GAIN(39), { 0x000001c6, 0x00000179 } },
191 { AR5K_RF_GAIN(40), { 0x00000006, 0x000001b9 } },
192 { AR5K_RF_GAIN(41), { 0x00000046, 0x000001f9 } },
193 { AR5K_RF_GAIN(42), { 0x00000086, 0x00000039 } },
194 { AR5K_RF_GAIN(43), { 0x000000c6, 0x00000079 } },
195 { AR5K_RF_GAIN(44), { 0x000000c6, 0x000000b9 } },
196 { AR5K_RF_GAIN(45), { 0x000000c6, 0x000001bd } },
197 { AR5K_RF_GAIN(46), { 0x000000c6, 0x000001fd } },
198 { AR5K_RF_GAIN(47), { 0x000000c6, 0x0000003d } },
199 { AR5K_RF_GAIN(48), { 0x000000c6, 0x0000007d } },
200 { AR5K_RF_GAIN(49), { 0x000000c6, 0x000000bd } },
201 { AR5K_RF_GAIN(50), { 0x000000c6, 0x000000fd } },
202 { AR5K_RF_GAIN(51), { 0x000000c6, 0x000000fd } },
203 { AR5K_RF_GAIN(52), { 0x000000c6, 0x000000fd } },
204 { AR5K_RF_GAIN(53), { 0x000000c6, 0x000000fd } },
205 { AR5K_RF_GAIN(54), { 0x000000c6, 0x000000fd } },
206 { AR5K_RF_GAIN(55), { 0x000000c6, 0x000000fd } },
207 { AR5K_RF_GAIN(56), { 0x000000c6, 0x000000fd } },
208 { AR5K_RF_GAIN(57), { 0x000000c6, 0x000000fd } },
209 { AR5K_RF_GAIN(58), { 0x000000c6, 0x000000fd } },
210 { AR5K_RF_GAIN(59), { 0x000000c6, 0x000000fd } },
211 { AR5K_RF_GAIN(60), { 0x000000c6, 0x000000fd } },
212 { AR5K_RF_GAIN(61), { 0x000000c6, 0x000000fd } },
213 { AR5K_RF_GAIN(62), { 0x000000c6, 0x000000fd } },
214 { AR5K_RF_GAIN(63), { 0x000000c6, 0x000000fd } },
215};
216
217static const struct ath5k_gain_opt rfgain_opt_5111 = {
218 4,
219 9,
220 {
221 { { 4, 1, 1, 1 }, 6 },
222 { { 4, 0, 1, 1 }, 4 },
223 { { 3, 1, 1, 1 }, 3 },
224 { { 4, 0, 0, 1 }, 1 },
225 { { 4, 1, 1, 0 }, 0 },
226 { { 4, 0, 1, 0 }, -2 },
227 { { 3, 1, 1, 0 }, -3 },
228 { { 4, 0, 0, 0 }, -4 },
229 { { 2, 1, 1, 0 }, -6 }
230 }
231};
232
233/* RF5112 mode-specific init registers */
234static const struct ath5k_ini_rf rfregs_5112[] = {
235 { 1, 0x98d4,
236 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
237 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
238 { 2, 0x98d0,
239 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
240 { 3, 0x98dc,
241 { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
242 { 6, 0x989c,
243 { 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000 } },
244 { 6, 0x989c,
245 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
246 { 6, 0x989c,
247 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
248 { 6, 0x989c,
249 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
250 { 6, 0x989c,
251 { 0x00660000, 0x00660000, 0x00660000, 0x00660000, 0x00660000 } },
252 { 6, 0x989c,
253 { 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000 } },
254 { 6, 0x989c,
255 { 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000 } },
256 { 6, 0x989c,
257 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
258 { 6, 0x989c,
259 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
260 { 6, 0x989c,
261 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
262 { 6, 0x989c,
263 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
264 { 6, 0x989c,
265 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
266 { 6, 0x989c,
267 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
268 { 6, 0x989c,
269 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
270 { 6, 0x989c,
271 { 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000 } },
272 { 6, 0x989c,
273 { 0x00600000, 0x00600000, 0x00600000, 0x00600000, 0x00600000 } },
274 { 6, 0x989c,
275 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
276 { 6, 0x989c,
277 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
278 { 6, 0x989c,
279 { 0x00640000, 0x00640000, 0x00640000, 0x00640000, 0x00640000 } },
280 { 6, 0x989c,
281 { 0x00200000, 0x00200000, 0x00200000, 0x00200000, 0x00200000 } },
282 { 6, 0x989c,
283 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
284 { 6, 0x989c,
285 { 0x00250000, 0x00250000, 0x00250000, 0x00250000, 0x00250000 } },
286 { 6, 0x989c,
287 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
288 { 6, 0x989c,
289 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
290 { 6, 0x989c,
291 { 0x00510000, 0x00510000, 0x00510000, 0x00510000, 0x00510000 } },
292 { 6, 0x989c,
293 { 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000 } },
294 { 6, 0x989c,
295 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
296 { 6, 0x989c,
297 { 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000 } },
298 { 6, 0x989c,
299 { 0x00400000, 0x00400000, 0x00400000, 0x00400000, 0x00400000 } },
300 { 6, 0x989c,
301 { 0x03090000, 0x03090000, 0x03090000, 0x03090000, 0x03090000 } },
302 { 6, 0x989c,
303 { 0x06000000, 0x06000000, 0x06000000, 0x06000000, 0x06000000 } },
304 { 6, 0x989c,
305 { 0x000000b0, 0x000000b0, 0x000000a8, 0x000000a8, 0x000000a8 } },
306 { 6, 0x989c,
307 { 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e } },
308 { 6, 0x989c,
309 { 0x006c4a41, 0x006c4a41, 0x006c4af1, 0x006c4a61, 0x006c4a61 } },
310 { 6, 0x989c,
311 { 0x0050892a, 0x0050892a, 0x0050892b, 0x0050892b, 0x0050892b } },
312 { 6, 0x989c,
313 { 0x00842400, 0x00842400, 0x00842400, 0x00842400, 0x00842400 } },
314 { 6, 0x989c,
315 { 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200 } },
316 { 6, 0x98d0,
317 { 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c } },
318 { 7, 0x989c,
319 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
320 { 7, 0x989c,
321 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
322 { 7, 0x989c,
323 { 0x0000000a, 0x0000000a, 0x00000012, 0x00000012, 0x00000012 } },
324 { 7, 0x989c,
325 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
326 { 7, 0x989c,
327 { 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1 } },
328 { 7, 0x989c,
329 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
330 { 7, 0x989c,
331 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
332 { 7, 0x989c,
333 { 0x00000022, 0x00000022, 0x00000022, 0x00000022, 0x00000022 } },
334 { 7, 0x989c,
335 { 0x00000092, 0x00000092, 0x00000092, 0x00000092, 0x00000092 } },
336 { 7, 0x989c,
337 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
338 { 7, 0x989c,
339 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
340 { 7, 0x989c,
341 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
342 { 7, 0x98c4,
343 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
344};
345
346/* RF5112A mode-specific init registers */
347static const struct ath5k_ini_rf rfregs_5112a[] = {
348 { 1, 0x98d4,
349 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
350 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
351 { 2, 0x98d0,
352 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
353 { 3, 0x98dc,
354 { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
355 { 6, 0x989c,
356 { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } },
357 { 6, 0x989c,
358 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
359 { 6, 0x989c,
360 { 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000 } },
361 { 6, 0x989c,
362 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
363 { 6, 0x989c,
364 { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
365 { 6, 0x989c,
366 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
367 { 6, 0x989c,
368 { 0x00180000, 0x00180000, 0x00180000, 0x00180000, 0x00180000 } },
369 { 6, 0x989c,
370 { 0x00600000, 0x00600000, 0x006e0000, 0x006e0000, 0x006e0000 } },
371 { 6, 0x989c,
372 { 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000 } },
373 { 6, 0x989c,
374 { 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000 } },
375 { 6, 0x989c,
376 { 0x04480000, 0x04480000, 0x04480000, 0x04480000, 0x04480000 } },
377 { 6, 0x989c,
378 { 0x00220000, 0x00220000, 0x00220000, 0x00220000, 0x00220000 } },
379 { 6, 0x989c,
380 { 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000 } },
381 { 6, 0x989c,
382 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
383 { 6, 0x989c,
384 { 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
385 { 6, 0x989c,
386 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
387 { 6, 0x989c,
388 { 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000 } },
389 { 6, 0x989c,
390 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
391 { 6, 0x989c,
392 { 0x00190000, 0x00190000, 0x00190000, 0x00190000, 0x00190000 } },
393 { 6, 0x989c,
394 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
395 { 6, 0x989c,
396 { 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000 } },
397 { 6, 0x989c,
398 { 0x00990000, 0x00990000, 0x00990000, 0x00990000, 0x00990000 } },
399 { 6, 0x989c,
400 { 0x00500000, 0x00500000, 0x00500000, 0x00500000, 0x00500000 } },
401 { 6, 0x989c,
402 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
403 { 6, 0x989c,
404 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
405 { 6, 0x989c,
406 { 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000 } },
407 { 6, 0x989c,
408 { 0x01740000, 0x01740000, 0x01740000, 0x01740000, 0x01740000 } },
409 { 6, 0x989c,
410 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
411 { 6, 0x989c,
412 { 0x86280000, 0x86280000, 0x86280000, 0x86280000, 0x86280000 } },
413 { 6, 0x989c,
414 { 0x31840000, 0x31840000, 0x31840000, 0x31840000, 0x31840000 } },
415 { 6, 0x989c,
416 { 0x00020080, 0x00020080, 0x00020080, 0x00020080, 0x00020080 } },
417 { 6, 0x989c,
418 { 0x00080009, 0x00080009, 0x00080009, 0x00080009, 0x00080009 } },
419 { 6, 0x989c,
420 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
421 { 6, 0x989c,
422 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
423 { 6, 0x989c,
424 { 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2 } },
425 { 6, 0x989c,
426 { 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084 } },
427 { 6, 0x989c,
428 { 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4 } },
429 { 6, 0x989c,
430 { 0x00119220, 0x00119220, 0x00119220, 0x00119220, 0x00119220 } },
431 { 6, 0x989c,
432 { 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800 } },
433 { 6, 0x98d8,
434 { 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230 } },
435 { 7, 0x989c,
436 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
437 { 7, 0x989c,
438 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
439 { 7, 0x989c,
440 { 0x00000012, 0x00000012, 0x00000012, 0x00000012, 0x00000012 } },
441 { 7, 0x989c,
442 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
443 { 7, 0x989c,
444 { 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9 } },
445 { 7, 0x989c,
446 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
447 { 7, 0x989c,
448 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
449 { 7, 0x989c,
450 { 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2 } },
451 { 7, 0x989c,
452 { 0x00000052, 0x00000052, 0x00000052, 0x00000052, 0x00000052 } },
453 { 7, 0x989c,
454 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
455 { 7, 0x989c,
456 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
457 { 7, 0x989c,
458 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
459 { 7, 0x98c4,
460 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
461};
462
463
464static const struct ath5k_ini_rf rfregs_2112a[] = {
465 { 1, AR5K_RF_BUFFER_CONTROL_4,
466 /* mode b mode g mode gTurbo */
467 { 0x00000020, 0x00000020, 0x00000020 } },
468 { 2, AR5K_RF_BUFFER_CONTROL_3,
469 { 0x03060408, 0x03060408, 0x03070408 } },
470 { 3, AR5K_RF_BUFFER_CONTROL_6,
471 { 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
472 { 6, AR5K_RF_BUFFER,
473 { 0x0a000000, 0x0a000000, 0x0a000000 } },
474 { 6, AR5K_RF_BUFFER,
475 { 0x00000000, 0x00000000, 0x00000000 } },
476 { 6, AR5K_RF_BUFFER,
477 { 0x00800000, 0x00800000, 0x00800000 } },
478 { 6, AR5K_RF_BUFFER,
479 { 0x002a0000, 0x002a0000, 0x002a0000 } },
480 { 6, AR5K_RF_BUFFER,
481 { 0x00010000, 0x00010000, 0x00010000 } },
482 { 6, AR5K_RF_BUFFER,
483 { 0x00000000, 0x00000000, 0x00000000 } },
484 { 6, AR5K_RF_BUFFER,
485 { 0x00180000, 0x00180000, 0x00180000 } },
486 { 6, AR5K_RF_BUFFER,
487 { 0x006e0000, 0x006e0000, 0x006e0000 } },
488 { 6, AR5K_RF_BUFFER,
489 { 0x00c70000, 0x00c70000, 0x00c70000 } },
490 { 6, AR5K_RF_BUFFER,
491 { 0x004b0000, 0x004b0000, 0x004b0000 } },
492 { 6, AR5K_RF_BUFFER,
493 { 0x04480000, 0x04480000, 0x04480000 } },
494 { 6, AR5K_RF_BUFFER,
495 { 0x002a0000, 0x002a0000, 0x002a0000 } },
496 { 6, AR5K_RF_BUFFER,
497 { 0x00e40000, 0x00e40000, 0x00e40000 } },
498 { 6, AR5K_RF_BUFFER,
499 { 0x00000000, 0x00000000, 0x00000000 } },
500 { 6, AR5K_RF_BUFFER,
501 { 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
502 { 6, AR5K_RF_BUFFER,
503 { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
504 { 6, AR5K_RF_BUFFER,
505 { 0x043f0000, 0x043f0000, 0x043f0000 } },
506 { 6, AR5K_RF_BUFFER,
507 { 0x0c0c0000, 0x0c0c0000, 0x0c0c0000 } },
508 { 6, AR5K_RF_BUFFER,
509 { 0x02190000, 0x02190000, 0x02190000 } },
510 { 6, AR5K_RF_BUFFER,
511 { 0x00240000, 0x00240000, 0x00240000 } },
512 { 6, AR5K_RF_BUFFER,
513 { 0x00b40000, 0x00b40000, 0x00b40000 } },
514 { 6, AR5K_RF_BUFFER,
515 { 0x00990000, 0x00990000, 0x00990000 } },
516 { 6, AR5K_RF_BUFFER,
517 { 0x00500000, 0x00500000, 0x00500000 } },
518 { 6, AR5K_RF_BUFFER,
519 { 0x002a0000, 0x002a0000, 0x002a0000 } },
520 { 6, AR5K_RF_BUFFER,
521 { 0x00120000, 0x00120000, 0x00120000 } },
522 { 6, AR5K_RF_BUFFER,
523 { 0xc0320000, 0xc0320000, 0xc0320000 } },
524 { 6, AR5K_RF_BUFFER,
525 { 0x01740000, 0x01740000, 0x01740000 } },
526 { 6, AR5K_RF_BUFFER,
527 { 0x00110000, 0x00110000, 0x00110000 } },
528 { 6, AR5K_RF_BUFFER,
529 { 0x86280000, 0x86280000, 0x86280000 } },
530 { 6, AR5K_RF_BUFFER,
531 { 0x31840000, 0x31840000, 0x31840000 } },
532 { 6, AR5K_RF_BUFFER,
533 { 0x00f20080, 0x00f20080, 0x00f20080 } },
534 { 6, AR5K_RF_BUFFER,
535 { 0x00070019, 0x00070019, 0x00070019 } },
536 { 6, AR5K_RF_BUFFER,
537 { 0x00000000, 0x00000000, 0x00000000 } },
538 { 6, AR5K_RF_BUFFER,
539 { 0x00000000, 0x00000000, 0x00000000 } },
540 { 6, AR5K_RF_BUFFER,
541 { 0x000000b2, 0x000000b2, 0x000000b2 } },
542 { 6, AR5K_RF_BUFFER,
543 { 0x00b02184, 0x00b02184, 0x00b02184 } },
544 { 6, AR5K_RF_BUFFER,
545 { 0x004125a4, 0x004125a4, 0x004125a4 } },
546 { 6, AR5K_RF_BUFFER,
547 { 0x00119220, 0x00119220, 0x00119220 } },
548 { 6, AR5K_RF_BUFFER,
549 { 0x001a4800, 0x001a4800, 0x001a4800 } },
550 { 6, AR5K_RF_BUFFER_CONTROL_5,
551 { 0x000b0230, 0x000b0230, 0x000b0230 } },
552 { 7, AR5K_RF_BUFFER,
553 { 0x00000094, 0x00000094, 0x00000094 } },
554 { 7, AR5K_RF_BUFFER,
555 { 0x00000091, 0x00000091, 0x00000091 } },
556 { 7, AR5K_RF_BUFFER,
557 { 0x00000012, 0x00000012, 0x00000012 } },
558 { 7, AR5K_RF_BUFFER,
559 { 0x00000080, 0x00000080, 0x00000080 } },
560 { 7, AR5K_RF_BUFFER,
561 { 0x000000d9, 0x000000d9, 0x000000d9 } },
562 { 7, AR5K_RF_BUFFER,
563 { 0x00000060, 0x00000060, 0x00000060 } },
564 { 7, AR5K_RF_BUFFER,
565 { 0x000000f0, 0x000000f0, 0x000000f0 } },
566 { 7, AR5K_RF_BUFFER,
567 { 0x000000a2, 0x000000a2, 0x000000a2 } },
568 { 7, AR5K_RF_BUFFER,
569 { 0x00000052, 0x00000052, 0x00000052 } },
570 { 7, AR5K_RF_BUFFER,
571 { 0x000000d4, 0x000000d4, 0x000000d4 } },
572 { 7, AR5K_RF_BUFFER,
573 { 0x000014cc, 0x000014cc, 0x000014cc } },
574 { 7, AR5K_RF_BUFFER,
575 { 0x0000048c, 0x0000048c, 0x0000048c } },
576 { 7, AR5K_RF_BUFFER_CONTROL_1,
577 { 0x00000003, 0x00000003, 0x00000003 } },
578};
579
580/* RF5413/5414 mode-specific init registers */
581static const struct ath5k_ini_rf rfregs_5413[] = {
582 { 1, 0x98d4,
583 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
584 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
585 { 2, 0x98d0,
586 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
587 { 3, 0x98dc,
588 { 0x00a000c0, 0x00a000c0, 0x00e000c0, 0x00e000c0, 0x00e000c0 } },
589 { 6, 0x989c,
590 { 0x33000000, 0x33000000, 0x33000000, 0x33000000, 0x33000000 } },
591 { 6, 0x989c,
592 { 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000 } },
593 { 6, 0x989c,
594 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
595 { 6, 0x989c,
596 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
597 { 6, 0x989c,
598 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
599 { 6, 0x989c,
600 { 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000 } },
601 { 6, 0x989c,
602 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
603 { 6, 0x989c,
604 { 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000 } },
605 { 6, 0x989c,
606 { 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000 } },
607 { 6, 0x989c,
608 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
609 { 6, 0x989c,
610 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
611 { 6, 0x989c,
612 { 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000 } },
613 { 6, 0x989c,
614 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
615 { 6, 0x989c,
616 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
617 { 6, 0x989c,
618 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
619 { 6, 0x989c,
620 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
621 { 6, 0x989c,
622 { 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000 } },
623 { 6, 0x989c,
624 { 0x00610000, 0x00610000, 0x00610000, 0x00610000, 0x00610000 } },
625 { 6, 0x989c,
626 { 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
627 { 6, 0x989c,
628 { 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000 } },
629 { 6, 0x989c,
630 { 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000 } },
631 { 6, 0x989c,
632 { 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000 } },
633 { 6, 0x989c,
634 { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
635 { 6, 0x989c,
636 { 0x00440000, 0x00440000, 0x00440000, 0x00440000, 0x00440000 } },
637 { 6, 0x989c,
638 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
639 { 6, 0x989c,
640 { 0x00100080, 0x00100080, 0x00100080, 0x00100080, 0x00100080 } },
641 { 6, 0x989c,
642 { 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034 } },
643 { 6, 0x989c,
644 { 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0 } },
645 { 6, 0x989c,
646 { 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f } },
647 { 6, 0x989c,
648 { 0x00510040, 0x00510040, 0x005100a0, 0x005100a0, 0x005100a0 } },
649 { 6, 0x989c,
650 { 0x0050006a, 0x0050006a, 0x005000dd, 0x005000dd, 0x005000dd } },
651 { 6, 0x989c,
652 { 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000 } },
653 { 6, 0x989c,
654 { 0x00004044, 0x00004044, 0x00004044, 0x00004044, 0x00004044 } },
655 { 6, 0x989c,
656 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
657 { 6, 0x989c,
658 { 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0 } },
659 { 6, 0x989c,
660 { 0x00002c00, 0x00002c00, 0x00003600, 0x00003600, 0x00003600 } },
661 { 6, 0x98c8,
662 { 0x00000403, 0x00000403, 0x00040403, 0x00040403, 0x00040403 } },
663 { 7, 0x989c,
664 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
665 { 7, 0x989c,
666 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
667 { 7, 0x98cc,
668 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
669};
670
671/* RF2413/2414 mode-specific init registers */
672static const struct ath5k_ini_rf rfregs_2413[] = {
673 { 1, AR5K_RF_BUFFER_CONTROL_4,
674 /* mode b mode g mode gTurbo */
675 { 0x00000020, 0x00000020, 0x00000020 } },
676 { 2, AR5K_RF_BUFFER_CONTROL_3,
677 { 0x02001408, 0x02001408, 0x02001408 } },
678 { 3, AR5K_RF_BUFFER_CONTROL_6,
679 { 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
680 { 6, AR5K_RF_BUFFER,
681 { 0xf0000000, 0xf0000000, 0xf0000000 } },
682 { 6, AR5K_RF_BUFFER,
683 { 0x00000000, 0x00000000, 0x00000000 } },
684 { 6, AR5K_RF_BUFFER,
685 { 0x03000000, 0x03000000, 0x03000000 } },
686 { 6, AR5K_RF_BUFFER,
687 { 0x00000000, 0x00000000, 0x00000000 } },
688 { 6, AR5K_RF_BUFFER,
689 { 0x00000000, 0x00000000, 0x00000000 } },
690 { 6, AR5K_RF_BUFFER,
691 { 0x00000000, 0x00000000, 0x00000000 } },
692 { 6, AR5K_RF_BUFFER,
693 { 0x00000000, 0x00000000, 0x00000000 } },
694 { 6, AR5K_RF_BUFFER,
695 { 0x00000000, 0x00000000, 0x00000000 } },
696 { 6, AR5K_RF_BUFFER,
697 { 0x40400000, 0x40400000, 0x40400000 } },
698 { 6, AR5K_RF_BUFFER,
699 { 0x65050000, 0x65050000, 0x65050000 } },
700 { 6, AR5K_RF_BUFFER,
701 { 0x00000000, 0x00000000, 0x00000000 } },
702 { 6, AR5K_RF_BUFFER,
703 { 0x00000000, 0x00000000, 0x00000000 } },
704 { 6, AR5K_RF_BUFFER,
705 { 0x00420000, 0x00420000, 0x00420000 } },
706 { 6, AR5K_RF_BUFFER,
707 { 0x00b50000, 0x00b50000, 0x00b50000 } },
708 { 6, AR5K_RF_BUFFER,
709 { 0x00030000, 0x00030000, 0x00030000 } },
710 { 6, AR5K_RF_BUFFER,
711 { 0x00f70000, 0x00f70000, 0x00f70000 } },
712 { 6, AR5K_RF_BUFFER,
713 { 0x009d0000, 0x009d0000, 0x009d0000 } },
714 { 6, AR5K_RF_BUFFER,
715 { 0x00220000, 0x00220000, 0x00220000 } },
716 { 6, AR5K_RF_BUFFER,
717 { 0x04220000, 0x04220000, 0x04220000 } },
718 { 6, AR5K_RF_BUFFER,
719 { 0x00230018, 0x00230018, 0x00230018 } },
720 { 6, AR5K_RF_BUFFER,
721 { 0x00280050, 0x00280050, 0x00280050 } },
722 { 6, AR5K_RF_BUFFER,
723 { 0x005000c3, 0x005000c3, 0x005000c3 } },
724 { 6, AR5K_RF_BUFFER,
725 { 0x0004007f, 0x0004007f, 0x0004007f } },
726 { 6, AR5K_RF_BUFFER,
727 { 0x00000458, 0x00000458, 0x00000458 } },
728 { 6, AR5K_RF_BUFFER,
729 { 0x00000000, 0x00000000, 0x00000000 } },
730 { 6, AR5K_RF_BUFFER,
731 { 0x0000c000, 0x0000c000, 0x0000c000 } },
732 { 6, AR5K_RF_BUFFER_CONTROL_5,
733 { 0x00400230, 0x00400230, 0x00400230 } },
734 { 7, AR5K_RF_BUFFER,
735 { 0x00006400, 0x00006400, 0x00006400 } },
736 { 7, AR5K_RF_BUFFER,
737 { 0x00000800, 0x00000800, 0x00000800 } },
738 { 7, AR5K_RF_BUFFER_CONTROL_2,
739 { 0x0000000e, 0x0000000e, 0x0000000e } },
740};
741
742/* RF2425 mode-specific init registers */
743static const struct ath5k_ini_rf rfregs_2425[] = {
744 { 1, AR5K_RF_BUFFER_CONTROL_4,
745 /* mode g mode gTurbo */
746 { 0x00000020, 0x00000020 } },
747 { 2, AR5K_RF_BUFFER_CONTROL_3,
748 { 0x02001408, 0x02001408 } },
749 { 3, AR5K_RF_BUFFER_CONTROL_6,
750 { 0x00e020c0, 0x00e020c0 } },
751 { 6, AR5K_RF_BUFFER,
752 { 0x10000000, 0x10000000 } },
753 { 6, AR5K_RF_BUFFER,
754 { 0x00000000, 0x00000000 } },
755 { 6, AR5K_RF_BUFFER,
756 { 0x00000000, 0x00000000 } },
757 { 6, AR5K_RF_BUFFER,
758 { 0x00000000, 0x00000000 } },
759 { 6, AR5K_RF_BUFFER,
760 { 0x00000000, 0x00000000 } },
761 { 6, AR5K_RF_BUFFER,
762 { 0x00000000, 0x00000000 } },
763 { 6, AR5K_RF_BUFFER,
764 { 0x00000000, 0x00000000 } },
765 { 6, AR5K_RF_BUFFER,
766 { 0x00000000, 0x00000000 } },
767 { 6, AR5K_RF_BUFFER,
768 { 0x00000000, 0x00000000 } },
769 { 6, AR5K_RF_BUFFER,
770 { 0x00000000, 0x00000000 } },
771 { 6, AR5K_RF_BUFFER,
772 { 0x00000000, 0x00000000 } },
773 { 6, AR5K_RF_BUFFER,
774 { 0x002a0000, 0x002a0000 } },
775 { 6, AR5K_RF_BUFFER,
776 { 0x00000000, 0x00000000 } },
777 { 6, AR5K_RF_BUFFER,
778 { 0x00000000, 0x00000000 } },
779 { 6, AR5K_RF_BUFFER,
780 { 0x00100000, 0x00100000 } },
781 { 6, AR5K_RF_BUFFER,
782 { 0x00020000, 0x00020000 } },
783 { 6, AR5K_RF_BUFFER,
784 { 0x00730000, 0x00730000 } },
785 { 6, AR5K_RF_BUFFER,
786 { 0x00f80000, 0x00f80000 } },
787 { 6, AR5K_RF_BUFFER,
788 { 0x00e70000, 0x00e70000 } },
789 { 6, AR5K_RF_BUFFER,
790 { 0x00140000, 0x00140000 } },
791 { 6, AR5K_RF_BUFFER,
792 { 0x00910040, 0x00910040 } },
793 { 6, AR5K_RF_BUFFER,
794 { 0x0007001a, 0x0007001a } },
795 { 6, AR5K_RF_BUFFER,
796 { 0x00410000, 0x00410000 } },
797 { 6, AR5K_RF_BUFFER,
798 { 0x00810060, 0x00810060 } },
799 { 6, AR5K_RF_BUFFER,
800 { 0x00020803, 0x00020803 } },
801 { 6, AR5K_RF_BUFFER,
802 { 0x00000000, 0x00000000 } },
803 { 6, AR5K_RF_BUFFER,
804 { 0x00000000, 0x00000000 } },
805 { 6, AR5K_RF_BUFFER,
806 { 0x00001660, 0x00001660 } },
807 { 6, AR5K_RF_BUFFER,
808 { 0x00001688, 0x00001688 } },
809 { 6, AR5K_RF_BUFFER_CONTROL_1,
810 { 0x00000001, 0x00000001 } },
811 { 7, AR5K_RF_BUFFER,
812 { 0x00006400, 0x00006400 } },
813 { 7, AR5K_RF_BUFFER,
814 { 0x00000800, 0x00000800 } },
815 { 7, AR5K_RF_BUFFER_CONTROL_2,
816 { 0x0000000e, 0x0000000e } },
817};
818
819/* Initial RF Gain settings for RF5112 */
820static const struct ath5k_ini_rfgain rfgain_5112[] = {
821 /* 5Ghz 2Ghz */
822 { AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } },
823 { AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } },
824 { AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } },
825 { AR5K_RF_GAIN(3), { 0x000001a0, 0x000001a0 } },
826 { AR5K_RF_GAIN(4), { 0x000001e0, 0x000001e0 } },
827 { AR5K_RF_GAIN(5), { 0x00000020, 0x00000020 } },
828 { AR5K_RF_GAIN(6), { 0x00000060, 0x00000060 } },
829 { AR5K_RF_GAIN(7), { 0x000001a1, 0x000001a1 } },
830 { AR5K_RF_GAIN(8), { 0x000001e1, 0x000001e1 } },
831 { AR5K_RF_GAIN(9), { 0x00000021, 0x00000021 } },
832 { AR5K_RF_GAIN(10), { 0x00000061, 0x00000061 } },
833 { AR5K_RF_GAIN(11), { 0x00000162, 0x00000162 } },
834 { AR5K_RF_GAIN(12), { 0x000001a2, 0x000001a2 } },
835 { AR5K_RF_GAIN(13), { 0x000001e2, 0x000001e2 } },
836 { AR5K_RF_GAIN(14), { 0x00000022, 0x00000022 } },
837 { AR5K_RF_GAIN(15), { 0x00000062, 0x00000062 } },
838 { AR5K_RF_GAIN(16), { 0x00000163, 0x00000163 } },
839 { AR5K_RF_GAIN(17), { 0x000001a3, 0x000001a3 } },
840 { AR5K_RF_GAIN(18), { 0x000001e3, 0x000001e3 } },
841 { AR5K_RF_GAIN(19), { 0x00000023, 0x00000023 } },
842 { AR5K_RF_GAIN(20), { 0x00000063, 0x00000063 } },
843 { AR5K_RF_GAIN(21), { 0x00000184, 0x00000184 } },
844 { AR5K_RF_GAIN(22), { 0x000001c4, 0x000001c4 } },
845 { AR5K_RF_GAIN(23), { 0x00000004, 0x00000004 } },
846 { AR5K_RF_GAIN(24), { 0x000001ea, 0x0000000b } },
847 { AR5K_RF_GAIN(25), { 0x0000002a, 0x0000004b } },
848 { AR5K_RF_GAIN(26), { 0x0000006a, 0x0000008b } },
849 { AR5K_RF_GAIN(27), { 0x000000aa, 0x000001ac } },
850 { AR5K_RF_GAIN(28), { 0x000001ab, 0x000001ec } },
851 { AR5K_RF_GAIN(29), { 0x000001eb, 0x0000002c } },
852 { AR5K_RF_GAIN(30), { 0x0000002b, 0x00000012 } },
853 { AR5K_RF_GAIN(31), { 0x0000006b, 0x00000052 } },
854 { AR5K_RF_GAIN(32), { 0x000000ab, 0x00000092 } },
855 { AR5K_RF_GAIN(33), { 0x000001ac, 0x00000193 } },
856 { AR5K_RF_GAIN(34), { 0x000001ec, 0x000001d3 } },
857 { AR5K_RF_GAIN(35), { 0x0000002c, 0x00000013 } },
858 { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000053 } },
859 { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000093 } },
860 { AR5K_RF_GAIN(38), { 0x000000ba, 0x00000194 } },
861 { AR5K_RF_GAIN(39), { 0x000001bb, 0x000001d4 } },
862 { AR5K_RF_GAIN(40), { 0x000001fb, 0x00000014 } },
863 { AR5K_RF_GAIN(41), { 0x0000003b, 0x0000003a } },
864 { AR5K_RF_GAIN(42), { 0x0000007b, 0x0000007a } },
865 { AR5K_RF_GAIN(43), { 0x000000bb, 0x000000ba } },
866 { AR5K_RF_GAIN(44), { 0x000001bc, 0x000001bb } },
867 { AR5K_RF_GAIN(45), { 0x000001fc, 0x000001fb } },
868 { AR5K_RF_GAIN(46), { 0x0000003c, 0x0000003b } },
869 { AR5K_RF_GAIN(47), { 0x0000007c, 0x0000007b } },
870 { AR5K_RF_GAIN(48), { 0x000000bc, 0x000000bb } },
871 { AR5K_RF_GAIN(49), { 0x000000fc, 0x000001bc } },
872 { AR5K_RF_GAIN(50), { 0x000000fc, 0x000001fc } },
873 { AR5K_RF_GAIN(51), { 0x000000fc, 0x0000003c } },
874 { AR5K_RF_GAIN(52), { 0x000000fc, 0x0000007c } },
875 { AR5K_RF_GAIN(53), { 0x000000fc, 0x000000bc } },
876 { AR5K_RF_GAIN(54), { 0x000000fc, 0x000000fc } },
877 { AR5K_RF_GAIN(55), { 0x000000fc, 0x000000fc } },
878 { AR5K_RF_GAIN(56), { 0x000000fc, 0x000000fc } },
879 { AR5K_RF_GAIN(57), { 0x000000fc, 0x000000fc } },
880 { AR5K_RF_GAIN(58), { 0x000000fc, 0x000000fc } },
881 { AR5K_RF_GAIN(59), { 0x000000fc, 0x000000fc } },
882 { AR5K_RF_GAIN(60), { 0x000000fc, 0x000000fc } },
883 { AR5K_RF_GAIN(61), { 0x000000fc, 0x000000fc } },
884 { AR5K_RF_GAIN(62), { 0x000000fc, 0x000000fc } },
885 { AR5K_RF_GAIN(63), { 0x000000fc, 0x000000fc } },
886};
887
888/* Initial RF Gain settings for RF5413 */
889static const struct ath5k_ini_rfgain rfgain_5413[] = {
890 /* 5Ghz 2Ghz */
891 { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
892 { AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } },
893 { AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } },
894 { AR5K_RF_GAIN(3), { 0x000001a1, 0x00000161 } },
895 { AR5K_RF_GAIN(4), { 0x000001e1, 0x000001a1 } },
896 { AR5K_RF_GAIN(5), { 0x00000021, 0x000001e1 } },
897 { AR5K_RF_GAIN(6), { 0x00000061, 0x00000021 } },
898 { AR5K_RF_GAIN(7), { 0x00000188, 0x00000061 } },
899 { AR5K_RF_GAIN(8), { 0x000001c8, 0x00000188 } },
900 { AR5K_RF_GAIN(9), { 0x00000008, 0x000001c8 } },
901 { AR5K_RF_GAIN(10), { 0x00000048, 0x00000008 } },
902 { AR5K_RF_GAIN(11), { 0x00000088, 0x00000048 } },
903 { AR5K_RF_GAIN(12), { 0x000001a9, 0x00000088 } },
904 { AR5K_RF_GAIN(13), { 0x000001e9, 0x00000169 } },
905 { AR5K_RF_GAIN(14), { 0x00000029, 0x000001a9 } },
906 { AR5K_RF_GAIN(15), { 0x00000069, 0x000001e9 } },
907 { AR5K_RF_GAIN(16), { 0x000001d0, 0x00000029 } },
908 { AR5K_RF_GAIN(17), { 0x00000010, 0x00000069 } },
909 { AR5K_RF_GAIN(18), { 0x00000050, 0x00000190 } },
910 { AR5K_RF_GAIN(19), { 0x00000090, 0x000001d0 } },
911 { AR5K_RF_GAIN(20), { 0x000001b1, 0x00000010 } },
912 { AR5K_RF_GAIN(21), { 0x000001f1, 0x00000050 } },
913 { AR5K_RF_GAIN(22), { 0x00000031, 0x00000090 } },
914 { AR5K_RF_GAIN(23), { 0x00000071, 0x00000171 } },
915 { AR5K_RF_GAIN(24), { 0x000001b8, 0x000001b1 } },
916 { AR5K_RF_GAIN(25), { 0x000001f8, 0x000001f1 } },
917 { AR5K_RF_GAIN(26), { 0x00000038, 0x00000031 } },
918 { AR5K_RF_GAIN(27), { 0x00000078, 0x00000071 } },
919 { AR5K_RF_GAIN(28), { 0x00000199, 0x00000198 } },
920 { AR5K_RF_GAIN(29), { 0x000001d9, 0x000001d8 } },
921 { AR5K_RF_GAIN(30), { 0x00000019, 0x00000018 } },
922 { AR5K_RF_GAIN(31), { 0x00000059, 0x00000058 } },
923 { AR5K_RF_GAIN(32), { 0x00000099, 0x00000098 } },
924 { AR5K_RF_GAIN(33), { 0x000000d9, 0x00000179 } },
925 { AR5K_RF_GAIN(34), { 0x000000f9, 0x000001b9 } },
926 { AR5K_RF_GAIN(35), { 0x000000f9, 0x000001f9 } },
927 { AR5K_RF_GAIN(36), { 0x000000f9, 0x00000039 } },
928 { AR5K_RF_GAIN(37), { 0x000000f9, 0x00000079 } },
929 { AR5K_RF_GAIN(38), { 0x000000f9, 0x000000b9 } },
930 { AR5K_RF_GAIN(39), { 0x000000f9, 0x000000f9 } },
931 { AR5K_RF_GAIN(40), { 0x000000f9, 0x000000f9 } },
932 { AR5K_RF_GAIN(41), { 0x000000f9, 0x000000f9 } },
933 { AR5K_RF_GAIN(42), { 0x000000f9, 0x000000f9 } },
934 { AR5K_RF_GAIN(43), { 0x000000f9, 0x000000f9 } },
935 { AR5K_RF_GAIN(44), { 0x000000f9, 0x000000f9 } },
936 { AR5K_RF_GAIN(45), { 0x000000f9, 0x000000f9 } },
937 { AR5K_RF_GAIN(46), { 0x000000f9, 0x000000f9 } },
938 { AR5K_RF_GAIN(47), { 0x000000f9, 0x000000f9 } },
939 { AR5K_RF_GAIN(48), { 0x000000f9, 0x000000f9 } },
940 { AR5K_RF_GAIN(49), { 0x000000f9, 0x000000f9 } },
941 { AR5K_RF_GAIN(50), { 0x000000f9, 0x000000f9 } },
942 { AR5K_RF_GAIN(51), { 0x000000f9, 0x000000f9 } },
943 { AR5K_RF_GAIN(52), { 0x000000f9, 0x000000f9 } },
944 { AR5K_RF_GAIN(53), { 0x000000f9, 0x000000f9 } },
945 { AR5K_RF_GAIN(54), { 0x000000f9, 0x000000f9 } },
946 { AR5K_RF_GAIN(55), { 0x000000f9, 0x000000f9 } },
947 { AR5K_RF_GAIN(56), { 0x000000f9, 0x000000f9 } },
948 { AR5K_RF_GAIN(57), { 0x000000f9, 0x000000f9 } },
949 { AR5K_RF_GAIN(58), { 0x000000f9, 0x000000f9 } },
950 { AR5K_RF_GAIN(59), { 0x000000f9, 0x000000f9 } },
951 { AR5K_RF_GAIN(60), { 0x000000f9, 0x000000f9 } },
952 { AR5K_RF_GAIN(61), { 0x000000f9, 0x000000f9 } },
953 { AR5K_RF_GAIN(62), { 0x000000f9, 0x000000f9 } },
954 { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } },
955};
956
957/* Initial RF Gain settings for RF2413 */
958static const struct ath5k_ini_rfgain rfgain_2413[] = {
959 { AR5K_RF_GAIN(0), { 0x00000000 } },
960 { AR5K_RF_GAIN(1), { 0x00000040 } },
961 { AR5K_RF_GAIN(2), { 0x00000080 } },
962 { AR5K_RF_GAIN(3), { 0x00000181 } },
963 { AR5K_RF_GAIN(4), { 0x000001c1 } },
964 { AR5K_RF_GAIN(5), { 0x00000001 } },
965 { AR5K_RF_GAIN(6), { 0x00000041 } },
966 { AR5K_RF_GAIN(7), { 0x00000081 } },
967 { AR5K_RF_GAIN(8), { 0x00000168 } },
968 { AR5K_RF_GAIN(9), { 0x000001a8 } },
969 { AR5K_RF_GAIN(10), { 0x000001e8 } },
970 { AR5K_RF_GAIN(11), { 0x00000028 } },
971 { AR5K_RF_GAIN(12), { 0x00000068 } },
972 { AR5K_RF_GAIN(13), { 0x00000189 } },
973 { AR5K_RF_GAIN(14), { 0x000001c9 } },
974 { AR5K_RF_GAIN(15), { 0x00000009 } },
975 { AR5K_RF_GAIN(16), { 0x00000049 } },
976 { AR5K_RF_GAIN(17), { 0x00000089 } },
977 { AR5K_RF_GAIN(18), { 0x00000190 } },
978 { AR5K_RF_GAIN(19), { 0x000001d0 } },
979 { AR5K_RF_GAIN(20), { 0x00000010 } },
980 { AR5K_RF_GAIN(21), { 0x00000050 } },
981 { AR5K_RF_GAIN(22), { 0x00000090 } },
982 { AR5K_RF_GAIN(23), { 0x00000191 } },
983 { AR5K_RF_GAIN(24), { 0x000001d1 } },
984 { AR5K_RF_GAIN(25), { 0x00000011 } },
985 { AR5K_RF_GAIN(26), { 0x00000051 } },
986 { AR5K_RF_GAIN(27), { 0x00000091 } },
987 { AR5K_RF_GAIN(28), { 0x00000178 } },
988 { AR5K_RF_GAIN(29), { 0x000001b8 } },
989 { AR5K_RF_GAIN(30), { 0x000001f8 } },
990 { AR5K_RF_GAIN(31), { 0x00000038 } },
991 { AR5K_RF_GAIN(32), { 0x00000078 } },
992 { AR5K_RF_GAIN(33), { 0x00000199 } },
993 { AR5K_RF_GAIN(34), { 0x000001d9 } },
994 { AR5K_RF_GAIN(35), { 0x00000019 } },
995 { AR5K_RF_GAIN(36), { 0x00000059 } },
996 { AR5K_RF_GAIN(37), { 0x00000099 } },
997 { AR5K_RF_GAIN(38), { 0x000000d9 } },
998 { AR5K_RF_GAIN(39), { 0x000000f9 } },
999 { AR5K_RF_GAIN(40), { 0x000000f9 } },
1000 { AR5K_RF_GAIN(41), { 0x000000f9 } },
1001 { AR5K_RF_GAIN(42), { 0x000000f9 } },
1002 { AR5K_RF_GAIN(43), { 0x000000f9 } },
1003 { AR5K_RF_GAIN(44), { 0x000000f9 } },
1004 { AR5K_RF_GAIN(45), { 0x000000f9 } },
1005 { AR5K_RF_GAIN(46), { 0x000000f9 } },
1006 { AR5K_RF_GAIN(47), { 0x000000f9 } },
1007 { AR5K_RF_GAIN(48), { 0x000000f9 } },
1008 { AR5K_RF_GAIN(49), { 0x000000f9 } },
1009 { AR5K_RF_GAIN(50), { 0x000000f9 } },
1010 { AR5K_RF_GAIN(51), { 0x000000f9 } },
1011 { AR5K_RF_GAIN(52), { 0x000000f9 } },
1012 { AR5K_RF_GAIN(53), { 0x000000f9 } },
1013 { AR5K_RF_GAIN(54), { 0x000000f9 } },
1014 { AR5K_RF_GAIN(55), { 0x000000f9 } },
1015 { AR5K_RF_GAIN(56), { 0x000000f9 } },
1016 { AR5K_RF_GAIN(57), { 0x000000f9 } },
1017 { AR5K_RF_GAIN(58), { 0x000000f9 } },
1018 { AR5K_RF_GAIN(59), { 0x000000f9 } },
1019 { AR5K_RF_GAIN(60), { 0x000000f9 } },
1020 { AR5K_RF_GAIN(61), { 0x000000f9 } },
1021 { AR5K_RF_GAIN(62), { 0x000000f9 } },
1022 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1023};
1024
1025/* Initial RF Gain settings for RF2425 */
1026static const struct ath5k_ini_rfgain rfgain_2425[] = {
1027 { AR5K_RF_GAIN(0), { 0x00000000 } },
1028 { AR5K_RF_GAIN(1), { 0x00000040 } },
1029 { AR5K_RF_GAIN(2), { 0x00000080 } },
1030 { AR5K_RF_GAIN(3), { 0x00000181 } },
1031 { AR5K_RF_GAIN(4), { 0x000001c1 } },
1032 { AR5K_RF_GAIN(5), { 0x00000001 } },
1033 { AR5K_RF_GAIN(6), { 0x00000041 } },
1034 { AR5K_RF_GAIN(7), { 0x00000081 } },
1035 { AR5K_RF_GAIN(8), { 0x00000188 } },
1036 { AR5K_RF_GAIN(9), { 0x000001c8 } },
1037 { AR5K_RF_GAIN(10), { 0x00000008 } },
1038 { AR5K_RF_GAIN(11), { 0x00000048 } },
1039 { AR5K_RF_GAIN(12), { 0x00000088 } },
1040 { AR5K_RF_GAIN(13), { 0x00000189 } },
1041 { AR5K_RF_GAIN(14), { 0x000001c9 } },
1042 { AR5K_RF_GAIN(15), { 0x00000009 } },
1043 { AR5K_RF_GAIN(16), { 0x00000049 } },
1044 { AR5K_RF_GAIN(17), { 0x00000089 } },
1045 { AR5K_RF_GAIN(18), { 0x000001b0 } },
1046 { AR5K_RF_GAIN(19), { 0x000001f0 } },
1047 { AR5K_RF_GAIN(20), { 0x00000030 } },
1048 { AR5K_RF_GAIN(21), { 0x00000070 } },
1049 { AR5K_RF_GAIN(22), { 0x00000171 } },
1050 { AR5K_RF_GAIN(23), { 0x000001b1 } },
1051 { AR5K_RF_GAIN(24), { 0x000001f1 } },
1052 { AR5K_RF_GAIN(25), { 0x00000031 } },
1053 { AR5K_RF_GAIN(26), { 0x00000071 } },
1054 { AR5K_RF_GAIN(27), { 0x000001b8 } },
1055 { AR5K_RF_GAIN(28), { 0x000001f8 } },
1056 { AR5K_RF_GAIN(29), { 0x00000038 } },
1057 { AR5K_RF_GAIN(30), { 0x00000078 } },
1058 { AR5K_RF_GAIN(31), { 0x000000b8 } },
1059 { AR5K_RF_GAIN(32), { 0x000001b9 } },
1060 { AR5K_RF_GAIN(33), { 0x000001f9 } },
1061 { AR5K_RF_GAIN(34), { 0x00000039 } },
1062 { AR5K_RF_GAIN(35), { 0x00000079 } },
1063 { AR5K_RF_GAIN(36), { 0x000000b9 } },
1064 { AR5K_RF_GAIN(37), { 0x000000f9 } },
1065 { AR5K_RF_GAIN(38), { 0x000000f9 } },
1066 { AR5K_RF_GAIN(39), { 0x000000f9 } },
1067 { AR5K_RF_GAIN(40), { 0x000000f9 } },
1068 { AR5K_RF_GAIN(41), { 0x000000f9 } },
1069 { AR5K_RF_GAIN(42), { 0x000000f9 } },
1070 { AR5K_RF_GAIN(43), { 0x000000f9 } },
1071 { AR5K_RF_GAIN(44), { 0x000000f9 } },
1072 { AR5K_RF_GAIN(45), { 0x000000f9 } },
1073 { AR5K_RF_GAIN(46), { 0x000000f9 } },
1074 { AR5K_RF_GAIN(47), { 0x000000f9 } },
1075 { AR5K_RF_GAIN(48), { 0x000000f9 } },
1076 { AR5K_RF_GAIN(49), { 0x000000f9 } },
1077 { AR5K_RF_GAIN(50), { 0x000000f9 } },
1078 { AR5K_RF_GAIN(51), { 0x000000f9 } },
1079 { AR5K_RF_GAIN(52), { 0x000000f9 } },
1080 { AR5K_RF_GAIN(53), { 0x000000f9 } },
1081 { AR5K_RF_GAIN(54), { 0x000000f9 } },
1082 { AR5K_RF_GAIN(55), { 0x000000f9 } },
1083 { AR5K_RF_GAIN(56), { 0x000000f9 } },
1084 { AR5K_RF_GAIN(57), { 0x000000f9 } },
1085 { AR5K_RF_GAIN(58), { 0x000000f9 } },
1086 { AR5K_RF_GAIN(59), { 0x000000f9 } },
1087 { AR5K_RF_GAIN(60), { 0x000000f9 } },
1088 { AR5K_RF_GAIN(61), { 0x000000f9 } },
1089 { AR5K_RF_GAIN(62), { 0x000000f9 } },
1090 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1091};
1092
1093static const struct ath5k_gain_opt rfgain_opt_5112 = {
1094 1,
1095 8,
1096 {
1097 { { 3, 0, 0, 0, 0, 0, 0 }, 6 },
1098 { { 2, 0, 0, 0, 0, 0, 0 }, 0 },
1099 { { 1, 0, 0, 0, 0, 0, 0 }, -3 },
1100 { { 0, 0, 0, 0, 0, 0, 0 }, -6 },
1101 { { 0, 1, 1, 0, 0, 0, 0 }, -8 },
1102 { { 0, 1, 1, 0, 1, 1, 0 }, -10 },
1103 { { 0, 1, 0, 1, 1, 1, 0 }, -13 },
1104 { { 0, 1, 0, 1, 1, 0, 1 }, -16 },
1105 }
1106};
1107 31
1108/* 32/*
1109 * Used to modify RF Banks before writing them to AR5K_RF_BUFFER 33 * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
1110 */ 34 */
1111static unsigned int ath5k_hw_rfregs_op(u32 *rf, u32 offset, u32 reg, u32 bits, 35static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
1112 u32 first, u32 col, bool set) 36 const struct ath5k_rf_reg *rf_regs,
37 u32 val, u8 reg_id, bool set)
1113{ 38{
1114 u32 mask, entry, last, data, shift, position; 39 const struct ath5k_rf_reg *rfreg = NULL;
1115 s32 left; 40 u8 offset, bank, num_bits, col, position;
41 u16 entry;
42 u32 mask, data, last_bit, bits_shifted, first_bit;
43 u32 *rfb;
44 s32 bits_left;
1116 int i; 45 int i;
1117 46
1118 data = 0; 47 data = 0;
48 rfb = ah->ah_rf_banks;
1119 49
1120 if (rf == NULL) 50 for (i = 0; i < ah->ah_rf_regs_count; i++) {
51 if (rf_regs[i].index == reg_id) {
52 rfreg = &rf_regs[i];
53 break;
54 }
55 }
56
57 if (rfb == NULL || rfreg == NULL) {
58 ATH5K_PRINTF("Rf register not found!\n");
1121 /* should not happen */ 59 /* should not happen */
1122 return 0; 60 return 0;
61 }
62
63 bank = rfreg->bank;
64 num_bits = rfreg->field.len;
65 first_bit = rfreg->field.pos;
66 col = rfreg->field.col;
67
68 /* first_bit is an offset from bank's
69 * start. Since we have all banks on
70 * the same array, we use this offset
71 * to mark each bank's start */
72 offset = ah->ah_offset[bank];
1123 73
1124 if (!(col <= 3 && bits <= 32 && first + bits <= 319)) { 74 /* Boundary check */
75 if (!(col <= 3 && num_bits <= 32 && first_bit + num_bits <= 319)) {
1125 ATH5K_PRINTF("invalid values at offset %u\n", offset); 76 ATH5K_PRINTF("invalid values at offset %u\n", offset);
1126 return 0; 77 return 0;
1127 } 78 }
1128 79
1129 entry = ((first - 1) / 8) + offset; 80 entry = ((first_bit - 1) / 8) + offset;
1130 position = (first - 1) % 8; 81 position = (first_bit - 1) % 8;
1131 82
1132 if (set) 83 if (set)
1133 data = ath5k_hw_bitswap(reg, bits); 84 data = ath5k_hw_bitswap(val, num_bits);
85
86 for (bits_shifted = 0, bits_left = num_bits; bits_left > 0;
87 position = 0, entry++) {
88
89 last_bit = (position + bits_left > 8) ? 8 :
90 position + bits_left;
1134 91
1135 for (i = shift = 0, left = bits; left > 0; position = 0, entry++, i++) { 92 mask = (((1 << last_bit) - 1) ^ ((1 << position) - 1)) <<
1136 last = (position + left > 8) ? 8 : position + left; 93 (col * 8);
1137 mask = (((1 << last) - 1) ^ ((1 << position) - 1)) << (col * 8);
1138 94
1139 if (set) { 95 if (set) {
1140 rf[entry] &= ~mask; 96 rfb[entry] &= ~mask;
1141 rf[entry] |= ((data << position) << (col * 8)) & mask; 97 rfb[entry] |= ((data << position) << (col * 8)) & mask;
1142 data >>= (8 - position); 98 data >>= (8 - position);
1143 } else { 99 } else {
1144 data = (((rf[entry] & mask) >> (col * 8)) >> position) 100 data |= (((rfb[entry] & mask) >> (col * 8)) >> position)
1145 << shift; 101 << bits_shifted;
1146 shift += last - position; 102 bits_shifted += last_bit - position;
1147 } 103 }
1148 104
1149 left -= 8 - position; 105 bits_left -= 8 - position;
1150 } 106 }
1151 107
1152 data = set ? 1 : ath5k_hw_bitswap(data, bits); 108 data = set ? 1 : ath5k_hw_bitswap(data, num_bits);
1153 109
1154 return data; 110 return data;
1155} 111}
1156 112
1157static u32 ath5k_hw_rfregs_gainf_corr(struct ath5k_hw *ah) 113/**********************\
114* RF Gain optimization *
115\**********************/
116
117/*
118 * This code is used to optimize rf gain on different environments
119 * (temprature mostly) based on feedback from a power detector.
120 *
121 * It's only used on RF5111 and RF5112, later RF chips seem to have
122 * auto adjustment on hw -notice they have a much smaller BANK 7 and
123 * no gain optimization ladder-.
124 *
125 * For more infos check out this patent doc
126 * http://www.freepatentsonline.com/7400691.html
127 *
128 * This paper describes power drops as seen on the receiver due to
129 * probe packets
130 * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
131 * %20of%20Power%20Control.pdf
132 *
133 * And this is the MadWiFi bug entry related to the above
134 * http://madwifi-project.org/ticket/1659
135 * with various measurements and diagrams
136 *
137 * TODO: Deal with power drops due to probes by setting an apropriate
138 * tx power on the probe packets ! Make this part of the calibration process.
139 */
140
141/* Initialize ah_gain durring attach */
142int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
143{
144 /* Initialize the gain optimization values */
145 switch (ah->ah_radio) {
146 case AR5K_RF5111:
147 ah->ah_gain.g_step_idx = rfgain_opt_5111.go_default;
148 ah->ah_gain.g_low = 20;
149 ah->ah_gain.g_high = 35;
150 ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
151 break;
152 case AR5K_RF5112:
153 ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default;
154 ah->ah_gain.g_low = 20;
155 ah->ah_gain.g_high = 85;
156 ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
157 break;
158 default:
159 return -EINVAL;
160 }
161
162 return 0;
163}
164
165/* Schedule a gain probe check on the next transmited packet.
166 * That means our next packet is going to be sent with lower
167 * tx power and a Peak to Average Power Detector (PAPD) will try
168 * to measure the gain.
169 *
170 * TODO: Use propper tx power setting for the probe packet so
171 * that we don't observe a serious power drop on the receiver
172 *
173 * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
174 * just after we enable the probe so that we don't mess with
175 * standard traffic ? Maybe it's time to use sw interrupts and
176 * a probe tasklet !!!
177 */
178static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
179{
180
181 /* Skip if gain calibration is inactive or
182 * we already handle a probe request */
183 if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
184 return;
185
186 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max,
187 AR5K_PHY_PAPD_PROBE_TXPOWER) |
188 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
189
190 ah->ah_gain.g_state = AR5K_RFGAIN_READ_REQUESTED;
191
192}
193
194/* Calculate gain_F measurement correction
195 * based on the current step for RF5112 rev. 2 */
196static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
1158{ 197{
1159 u32 mix, step; 198 u32 mix, step;
1160 u32 *rf; 199 u32 *rf;
200 const struct ath5k_gain_opt *go;
201 const struct ath5k_gain_opt_step *g_step;
202 const struct ath5k_rf_reg *rf_regs;
203
204 /* Only RF5112 Rev. 2 supports it */
205 if ((ah->ah_radio != AR5K_RF5112) ||
206 (ah->ah_radio_5ghz_revision <= AR5K_SREV_RAD_5112A))
207 return 0;
208
209 go = &rfgain_opt_5112;
210 rf_regs = rf_regs_5112a;
211 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
212
213 g_step = &go->go_step[ah->ah_gain.g_step_idx];
1161 214
1162 if (ah->ah_rf_banks == NULL) 215 if (ah->ah_rf_banks == NULL)
1163 return 0; 216 return 0;
@@ -1165,11 +218,15 @@ static u32 ath5k_hw_rfregs_gainf_corr(struct ath5k_hw *ah)
1165 rf = ah->ah_rf_banks; 218 rf = ah->ah_rf_banks;
1166 ah->ah_gain.g_f_corr = 0; 219 ah->ah_gain.g_f_corr = 0;
1167 220
1168 if (ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 0, 1, 36, 0, false) != 1) 221 /* No VGA (Variable Gain Amplifier) override, skip */
222 if (ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR, false) != 1)
1169 return 0; 223 return 0;
1170 224
1171 step = ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 0, 4, 32, 0, false); 225 /* Mix gain stepping */
1172 mix = ah->ah_gain.g_step->gos_param[0]; 226 step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXGAIN_STEP, false);
227
228 /* Mix gain override */
229 mix = g_step->gos_param[0];
1173 230
1174 switch (mix) { 231 switch (mix) {
1175 case 3: 232 case 3:
@@ -1189,9 +246,14 @@ static u32 ath5k_hw_rfregs_gainf_corr(struct ath5k_hw *ah)
1189 return ah->ah_gain.g_f_corr; 246 return ah->ah_gain.g_f_corr;
1190} 247}
1191 248
1192static bool ath5k_hw_rfregs_gain_readback(struct ath5k_hw *ah) 249/* Check if current gain_F measurement is in the range of our
250 * power detector windows. If we get a measurement outside range
251 * we know it's not accurate (detectors can't measure anything outside
252 * their detection window) so we must ignore it */
253static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
1193{ 254{
1194 u32 step, mix, level[4]; 255 const struct ath5k_rf_reg *rf_regs;
256 u32 step, mix_ovr, level[4];
1195 u32 *rf; 257 u32 *rf;
1196 258
1197 if (ah->ah_rf_banks == NULL) 259 if (ah->ah_rf_banks == NULL)
@@ -1200,23 +262,33 @@ static bool ath5k_hw_rfregs_gain_readback(struct ath5k_hw *ah)
1200 rf = ah->ah_rf_banks; 262 rf = ah->ah_rf_banks;
1201 263
1202 if (ah->ah_radio == AR5K_RF5111) { 264 if (ah->ah_radio == AR5K_RF5111) {
1203 step = ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 0, 6, 37, 0, 265
1204 false); 266 rf_regs = rf_regs_5111;
267 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
268
269 step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_RFGAIN_STEP,
270 false);
271
1205 level[0] = 0; 272 level[0] = 0;
1206 level[1] = (step == 0x3f) ? 0x32 : step + 4; 273 level[1] = (step == 63) ? 50 : step + 4;
1207 level[2] = (step != 0x3f) ? 0x40 : level[0]; 274 level[2] = (step != 63) ? 64 : level[0];
1208 level[3] = level[2] + 0x32; 275 level[3] = level[2] + 50 ;
1209 276
1210 ah->ah_gain.g_high = level[3] - 277 ah->ah_gain.g_high = level[3] -
1211 (step == 0x3f ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5); 278 (step == 63 ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5);
1212 ah->ah_gain.g_low = level[0] + 279 ah->ah_gain.g_low = level[0] +
1213 (step == 0x3f ? AR5K_GAIN_DYN_ADJUST_LO_MARGIN : 0); 280 (step == 63 ? AR5K_GAIN_DYN_ADJUST_LO_MARGIN : 0);
1214 } else { 281 } else {
1215 mix = ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 0, 1, 36, 0, 282
1216 false); 283 rf_regs = rf_regs_5112;
284 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
285
286 mix_ovr = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR,
287 false);
288
1217 level[0] = level[2] = 0; 289 level[0] = level[2] = 0;
1218 290
1219 if (mix == 1) { 291 if (mix_ovr == 1) {
1220 level[1] = level[3] = 83; 292 level[1] = level[3] = 83;
1221 } else { 293 } else {
1222 level[1] = level[3] = 107; 294 level[1] = level[3] = 107;
@@ -1230,9 +302,12 @@ static bool ath5k_hw_rfregs_gain_readback(struct ath5k_hw *ah)
1230 ah->ah_gain.g_current <= level[3]); 302 ah->ah_gain.g_current <= level[3]);
1231} 303}
1232 304
1233static s32 ath5k_hw_rfregs_gain_adjust(struct ath5k_hw *ah) 305/* Perform gain_F adjustment by choosing the right set
306 * of parameters from rf gain optimization ladder */
307static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
1234{ 308{
1235 const struct ath5k_gain_opt *go; 309 const struct ath5k_gain_opt *go;
310 const struct ath5k_gain_opt_step *g_step;
1236 int ret = 0; 311 int ret = 0;
1237 312
1238 switch (ah->ah_radio) { 313 switch (ah->ah_radio) {
@@ -1246,35 +321,39 @@ static s32 ath5k_hw_rfregs_gain_adjust(struct ath5k_hw *ah)
1246 return 0; 321 return 0;
1247 } 322 }
1248 323
1249 ah->ah_gain.g_step = &go->go_step[ah->ah_gain.g_step_idx]; 324 g_step = &go->go_step[ah->ah_gain.g_step_idx];
1250 325
1251 if (ah->ah_gain.g_current >= ah->ah_gain.g_high) { 326 if (ah->ah_gain.g_current >= ah->ah_gain.g_high) {
327
328 /* Reached maximum */
1252 if (ah->ah_gain.g_step_idx == 0) 329 if (ah->ah_gain.g_step_idx == 0)
1253 return -1; 330 return -1;
331
1254 for (ah->ah_gain.g_target = ah->ah_gain.g_current; 332 for (ah->ah_gain.g_target = ah->ah_gain.g_current;
1255 ah->ah_gain.g_target >= ah->ah_gain.g_high && 333 ah->ah_gain.g_target >= ah->ah_gain.g_high &&
1256 ah->ah_gain.g_step_idx > 0; 334 ah->ah_gain.g_step_idx > 0;
1257 ah->ah_gain.g_step = 335 g_step = &go->go_step[ah->ah_gain.g_step_idx])
1258 &go->go_step[ah->ah_gain.g_step_idx])
1259 ah->ah_gain.g_target -= 2 * 336 ah->ah_gain.g_target -= 2 *
1260 (go->go_step[--(ah->ah_gain.g_step_idx)].gos_gain - 337 (go->go_step[--(ah->ah_gain.g_step_idx)].gos_gain -
1261 ah->ah_gain.g_step->gos_gain); 338 g_step->gos_gain);
1262 339
1263 ret = 1; 340 ret = 1;
1264 goto done; 341 goto done;
1265 } 342 }
1266 343
1267 if (ah->ah_gain.g_current <= ah->ah_gain.g_low) { 344 if (ah->ah_gain.g_current <= ah->ah_gain.g_low) {
345
346 /* Reached minimum */
1268 if (ah->ah_gain.g_step_idx == (go->go_steps_count - 1)) 347 if (ah->ah_gain.g_step_idx == (go->go_steps_count - 1))
1269 return -2; 348 return -2;
349
1270 for (ah->ah_gain.g_target = ah->ah_gain.g_current; 350 for (ah->ah_gain.g_target = ah->ah_gain.g_current;
1271 ah->ah_gain.g_target <= ah->ah_gain.g_low && 351 ah->ah_gain.g_target <= ah->ah_gain.g_low &&
1272 ah->ah_gain.g_step_idx < go->go_steps_count-1; 352 ah->ah_gain.g_step_idx < go->go_steps_count-1;
1273 ah->ah_gain.g_step = 353 g_step = &go->go_step[ah->ah_gain.g_step_idx])
1274 &go->go_step[ah->ah_gain.g_step_idx])
1275 ah->ah_gain.g_target -= 2 * 354 ah->ah_gain.g_target -= 2 *
1276 (go->go_step[++ah->ah_gain.g_step_idx].gos_gain - 355 (go->go_step[++ah->ah_gain.g_step_idx].gos_gain -
1277 ah->ah_gain.g_step->gos_gain); 356 g_step->gos_gain);
1278 357
1279 ret = 2; 358 ret = 2;
1280 goto done; 359 goto done;
@@ -1289,468 +368,449 @@ done:
1289 return ret; 368 return ret;
1290} 369}
1291 370
1292/* 371/* Main callback for thermal rf gain calibration engine
1293 * Read EEPROM Calibration data, modify RF Banks and Initialize RF5111 372 * Check for a new gain reading and schedule an adjustment
1294 */ 373 * if needed.
1295static int ath5k_hw_rf5111_rfregs(struct ath5k_hw *ah, 374 *
1296 struct ieee80211_channel *channel, unsigned int mode) 375 * TODO: Use sw interrupt to schedule reset if gain_F needs
376 * adjustment */
377enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
1297{ 378{
379 u32 data, type;
1298 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 380 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1299 u32 *rf;
1300 const unsigned int rf_size = ARRAY_SIZE(rfregs_5111);
1301 unsigned int i;
1302 int obdb = -1, bank = -1;
1303 u32 ee_mode;
1304
1305 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX);
1306
1307 rf = ah->ah_rf_banks;
1308 381
1309 /* Copy values to modify them */ 382 ATH5K_TRACE(ah->ah_sc);
1310 for (i = 0; i < rf_size; i++) {
1311 if (rfregs_5111[i].rf_bank >= AR5K_RF5111_INI_RF_MAX_BANKS) {
1312 ATH5K_ERR(ah->ah_sc, "invalid bank\n");
1313 return -EINVAL;
1314 }
1315
1316 if (bank != rfregs_5111[i].rf_bank) {
1317 bank = rfregs_5111[i].rf_bank;
1318 ah->ah_offset[bank] = i;
1319 }
1320 383
1321 rf[i] = rfregs_5111[i].rf_value[mode]; 384 if (ah->ah_rf_banks == NULL ||
1322 } 385 ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
386 return AR5K_RFGAIN_INACTIVE;
1323 387
1324 /* Modify bank 0 */ 388 /* No check requested, either engine is inactive
1325 if (channel->hw_value & CHANNEL_2GHZ) { 389 * or an adjustment is already requested */
1326 if (channel->hw_value & CHANNEL_CCK) 390 if (ah->ah_gain.g_state != AR5K_RFGAIN_READ_REQUESTED)
1327 ee_mode = AR5K_EEPROM_MODE_11B; 391 goto done;
1328 else
1329 ee_mode = AR5K_EEPROM_MODE_11G;
1330 obdb = 0;
1331 392
1332 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[0], 393 /* Read the PAPD (Peak to Average Power Detector)
1333 ee->ee_ob[ee_mode][obdb], 3, 119, 0, true)) 394 * register */
1334 return -EINVAL; 395 data = ath5k_hw_reg_read(ah, AR5K_PHY_PAPD_PROBE);
1335 396
1336 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[0], 397 /* No probe is scheduled, read gain_F measurement */
1337 ee->ee_ob[ee_mode][obdb], 3, 122, 0, true)) 398 if (!(data & AR5K_PHY_PAPD_PROBE_TX_NEXT)) {
1338 return -EINVAL; 399 ah->ah_gain.g_current = data >> AR5K_PHY_PAPD_PROBE_GAINF_S;
400 type = AR5K_REG_MS(data, AR5K_PHY_PAPD_PROBE_TYPE);
1339 401
1340 obdb = 1; 402 /* If tx packet is CCK correct the gain_F measurement
1341 /* Modify bank 6 */ 403 * by cck ofdm gain delta */
1342 } else { 404 if (type == AR5K_PHY_PAPD_PROBE_TYPE_CCK) {
1343 /* For 11a, Turbo and XR */ 405 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A)
1344 ee_mode = AR5K_EEPROM_MODE_11A; 406 ah->ah_gain.g_current +=
1345 obdb = channel->center_freq >= 5725 ? 3 : 407 ee->ee_cck_ofdm_gain_delta;
1346 (channel->center_freq >= 5500 ? 2 : 408 else
1347 (channel->center_freq >= 5260 ? 1 : 409 ah->ah_gain.g_current +=
1348 (channel->center_freq > 4000 ? 0 : -1))); 410 AR5K_GAIN_CCK_PROBE_CORR;
411 }
1349 412
1350 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 413 /* Further correct gain_F measurement for
1351 ee->ee_pwd_84, 1, 51, 3, true)) 414 * RF5112A radios */
1352 return -EINVAL; 415 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
416 ath5k_hw_rf_gainf_corr(ah);
417 ah->ah_gain.g_current =
418 ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
419 (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) :
420 0;
421 }
1353 422
1354 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 423 /* Check if measurement is ok and if we need
1355 ee->ee_pwd_90, 1, 45, 3, true)) 424 * to adjust gain, schedule a gain adjustment,
1356 return -EINVAL; 425 * else switch back to the acive state */
426 if (ath5k_hw_rf_check_gainf_readback(ah) &&
427 AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) &&
428 ath5k_hw_rf_gainf_adjust(ah)) {
429 ah->ah_gain.g_state = AR5K_RFGAIN_NEED_CHANGE;
430 } else {
431 ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
432 }
1357 } 433 }
1358 434
1359 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 435done:
1360 !ee->ee_xpd[ee_mode], 1, 95, 0, true)) 436 return ah->ah_gain.g_state;
1361 return -EINVAL; 437}
1362
1363 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6],
1364 ee->ee_x_gain[ee_mode], 4, 96, 0, true))
1365 return -EINVAL;
1366
1367 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], obdb >= 0 ?
1368 ee->ee_ob[ee_mode][obdb] : 0, 3, 104, 0, true))
1369 return -EINVAL;
1370 438
1371 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], obdb >= 0 ? 439/* Write initial rf gain table to set the RF sensitivity
1372 ee->ee_db[ee_mode][obdb] : 0, 3, 107, 0, true)) 440 * this one works on all RF chips and has nothing to do
1373 return -EINVAL; 441 * with gain_F calibration */
442int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
443{
444 const struct ath5k_ini_rfgain *ath5k_rfg;
445 unsigned int i, size;
1374 446
1375 /* Modify bank 7 */ 447 switch (ah->ah_radio) {
1376 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 448 case AR5K_RF5111:
1377 ee->ee_i_gain[ee_mode], 6, 29, 0, true)) 449 ath5k_rfg = rfgain_5111;
450 size = ARRAY_SIZE(rfgain_5111);
451 break;
452 case AR5K_RF5112:
453 ath5k_rfg = rfgain_5112;
454 size = ARRAY_SIZE(rfgain_5112);
455 break;
456 case AR5K_RF2413:
457 ath5k_rfg = rfgain_2413;
458 size = ARRAY_SIZE(rfgain_2413);
459 break;
460 case AR5K_RF2316:
461 ath5k_rfg = rfgain_2316;
462 size = ARRAY_SIZE(rfgain_2316);
463 break;
464 case AR5K_RF5413:
465 ath5k_rfg = rfgain_5413;
466 size = ARRAY_SIZE(rfgain_5413);
467 break;
468 case AR5K_RF2317:
469 case AR5K_RF2425:
470 ath5k_rfg = rfgain_2425;
471 size = ARRAY_SIZE(rfgain_2425);
472 break;
473 default:
1378 return -EINVAL; 474 return -EINVAL;
475 }
1379 476
1380 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 477 switch (freq) {
1381 ee->ee_xpd[ee_mode], 1, 4, 0, true)) 478 case AR5K_INI_RFGAIN_2GHZ:
479 case AR5K_INI_RFGAIN_5GHZ:
480 break;
481 default:
1382 return -EINVAL; 482 return -EINVAL;
483 }
1383 484
1384 /* Write RF values */ 485 for (i = 0; i < size; i++) {
1385 for (i = 0; i < rf_size; i++) {
1386 AR5K_REG_WAIT(i); 486 AR5K_REG_WAIT(i);
1387 ath5k_hw_reg_write(ah, rf[i], rfregs_5111[i].rf_register); 487 ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[freq],
488 (u32)ath5k_rfg[i].rfg_register);
1388 } 489 }
1389 490
1390 return 0; 491 return 0;
1391} 492}
1392 493
494
495
496/********************\
497* RF Registers setup *
498\********************/
499
500
1393/* 501/*
1394 * Read EEPROM Calibration data, modify RF Banks and Initialize RF5112 502 * Setup RF registers by writing rf buffer on hw
1395 */ 503 */
1396static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah, 504int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1397 struct ieee80211_channel *channel, unsigned int mode) 505 unsigned int mode)
1398{ 506{
1399 const struct ath5k_ini_rf *rf_ini; 507 const struct ath5k_rf_reg *rf_regs;
508 const struct ath5k_ini_rfbuffer *ini_rfb;
509 const struct ath5k_gain_opt *go = NULL;
510 const struct ath5k_gain_opt_step *g_step;
1400 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 511 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1401 u32 *rf; 512 u8 ee_mode = 0;
1402 unsigned int rf_size, i; 513 u32 *rfb;
1403 int obdb = -1, bank = -1; 514 int i, obdb = -1, bank = -1;
1404 u32 ee_mode;
1405
1406 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX);
1407 515
1408 rf = ah->ah_rf_banks; 516 switch (ah->ah_radio) {
517 case AR5K_RF5111:
518 rf_regs = rf_regs_5111;
519 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
520 ini_rfb = rfb_5111;
521 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5111);
522 go = &rfgain_opt_5111;
523 break;
524 case AR5K_RF5112:
525 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
526 rf_regs = rf_regs_5112a;
527 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
528 ini_rfb = rfb_5112a;
529 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112a);
530 } else {
531 rf_regs = rf_regs_5112;
532 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
533 ini_rfb = rfb_5112;
534 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112);
535 }
536 go = &rfgain_opt_5112;
537 break;
538 case AR5K_RF2413:
539 rf_regs = rf_regs_2413;
540 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2413);
541 ini_rfb = rfb_2413;
542 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2413);
543 break;
544 case AR5K_RF2316:
545 rf_regs = rf_regs_2316;
546 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2316);
547 ini_rfb = rfb_2316;
548 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2316);
549 break;
550 case AR5K_RF5413:
551 rf_regs = rf_regs_5413;
552 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5413);
553 ini_rfb = rfb_5413;
554 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5413);
555 break;
556 case AR5K_RF2317:
557 rf_regs = rf_regs_2425;
558 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
559 ini_rfb = rfb_2317;
560 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2317);
561 break;
562 case AR5K_RF2425:
563 rf_regs = rf_regs_2425;
564 ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
565 if (ah->ah_mac_srev < AR5K_SREV_AR2417) {
566 ini_rfb = rfb_2425;
567 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2425);
568 } else {
569 ini_rfb = rfb_2417;
570 ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2417);
571 }
572 break;
573 default:
574 return -EINVAL;
575 }
1409 576
1410 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_2112A 577 /* If it's the first time we set rf buffer, allocate
1411 && !test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { 578 * ah->ah_rf_banks based on ah->ah_rf_banks_size
1412 rf_ini = rfregs_2112a; 579 * we set above */
1413 rf_size = ARRAY_SIZE(rfregs_5112a); 580 if (ah->ah_rf_banks == NULL) {
1414 if (mode < 2) { 581 ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
1415 ATH5K_ERR(ah->ah_sc, "invalid channel mode: %i\n", 582 GFP_KERNEL);
1416 mode); 583 if (ah->ah_rf_banks == NULL) {
1417 return -EINVAL; 584 ATH5K_ERR(ah->ah_sc, "out of memory\n");
585 return -ENOMEM;
1418 } 586 }
1419 mode = mode - 2; /*no a/turboa modes for 2112*/
1420 } else if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
1421 rf_ini = rfregs_5112a;
1422 rf_size = ARRAY_SIZE(rfregs_5112a);
1423 } else {
1424 rf_ini = rfregs_5112;
1425 rf_size = ARRAY_SIZE(rfregs_5112);
1426 } 587 }
1427 588
1428 /* Copy values to modify them */ 589 /* Copy values to modify them */
1429 for (i = 0; i < rf_size; i++) { 590 rfb = ah->ah_rf_banks;
1430 if (rf_ini[i].rf_bank >= AR5K_RF5112_INI_RF_MAX_BANKS) { 591
592 for (i = 0; i < ah->ah_rf_banks_size; i++) {
593 if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
1431 ATH5K_ERR(ah->ah_sc, "invalid bank\n"); 594 ATH5K_ERR(ah->ah_sc, "invalid bank\n");
1432 return -EINVAL; 595 return -EINVAL;
1433 } 596 }
1434 597
1435 if (bank != rf_ini[i].rf_bank) { 598 /* Bank changed, write down the offset */
1436 bank = rf_ini[i].rf_bank; 599 if (bank != ini_rfb[i].rfb_bank) {
600 bank = ini_rfb[i].rfb_bank;
1437 ah->ah_offset[bank] = i; 601 ah->ah_offset[bank] = i;
1438 } 602 }
1439 603
1440 rf[i] = rf_ini[i].rf_value[mode]; 604 rfb[i] = ini_rfb[i].rfb_mode_data[mode];
1441 } 605 }
1442 606
1443 /* Modify bank 6 */ 607 /* Set Output and Driver bias current (OB/DB) */
1444 if (channel->hw_value & CHANNEL_2GHZ) { 608 if (channel->hw_value & CHANNEL_2GHZ) {
1445 if (channel->hw_value & CHANNEL_OFDM) 609
610 if (channel->hw_value & CHANNEL_CCK)
611 ee_mode = AR5K_EEPROM_MODE_11B;
612 else
1446 ee_mode = AR5K_EEPROM_MODE_11G; 613 ee_mode = AR5K_EEPROM_MODE_11G;
614
615 /* For RF511X/RF211X combination we
616 * use b_OB and b_DB parameters stored
617 * in eeprom on ee->ee_ob[ee_mode][0]
618 *
619 * For all other chips we use OB/DB for 2Ghz
620 * stored in the b/g modal section just like
621 * 802.11a on ee->ee_ob[ee_mode][1] */
622 if ((ah->ah_radio == AR5K_RF5111) ||
623 (ah->ah_radio == AR5K_RF5112))
624 obdb = 0;
1447 else 625 else
1448 ee_mode = AR5K_EEPROM_MODE_11B; 626 obdb = 1;
1449 obdb = 0;
1450 627
1451 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 628 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
1452 ee->ee_ob[ee_mode][obdb], 3, 287, 0, true)) 629 AR5K_RF_OB_2GHZ, true);
1453 return -EINVAL;
1454 630
1455 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 631 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
1456 ee->ee_ob[ee_mode][obdb], 3, 290, 0, true)) 632 AR5K_RF_DB_2GHZ, true);
1457 return -EINVAL; 633
1458 } else { 634 /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
1459 /* For 11a, Turbo and XR */ 635 } else if ((channel->hw_value & CHANNEL_5GHZ) ||
636 (ah->ah_radio == AR5K_RF5111)) {
637
638 /* For 11a, Turbo and XR we need to choose
639 * OB/DB based on frequency range */
1460 ee_mode = AR5K_EEPROM_MODE_11A; 640 ee_mode = AR5K_EEPROM_MODE_11A;
1461 obdb = channel->center_freq >= 5725 ? 3 : 641 obdb = channel->center_freq >= 5725 ? 3 :
1462 (channel->center_freq >= 5500 ? 2 : 642 (channel->center_freq >= 5500 ? 2 :
1463 (channel->center_freq >= 5260 ? 1 : 643 (channel->center_freq >= 5260 ? 1 :
1464 (channel->center_freq > 4000 ? 0 : -1))); 644 (channel->center_freq > 4000 ? 0 : -1)));
1465 645
1466 if (obdb == -1) 646 if (obdb < 0)
1467 return -EINVAL; 647 return -EINVAL;
1468 648
1469 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 649 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
1470 ee->ee_ob[ee_mode][obdb], 3, 279, 0, true)) 650 AR5K_RF_OB_5GHZ, true);
1471 return -EINVAL;
1472 651
1473 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 652 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
1474 ee->ee_ob[ee_mode][obdb], 3, 282, 0, true)) 653 AR5K_RF_DB_5GHZ, true);
1475 return -EINVAL;
1476 } 654 }
1477 655
1478 ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 656 g_step = &go->go_step[ah->ah_gain.g_step_idx];
1479 ee->ee_x_gain[ee_mode], 2, 270, 0, true);
1480 ath5k_hw_rfregs_op(rf, ah->ah_offset[6],
1481 ee->ee_x_gain[ee_mode], 2, 257, 0, true);
1482 657
1483 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 658 /* Bank Modifications (chip-specific) */
1484 ee->ee_xpd[ee_mode], 1, 302, 0, true)) 659 if (ah->ah_radio == AR5K_RF5111) {
1485 return -EINVAL;
1486 660
1487 /* Modify bank 7 */ 661 /* Set gain_F settings according to current step */
1488 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[7], 662 if (channel->hw_value & CHANNEL_OFDM) {
1489 ee->ee_i_gain[ee_mode], 6, 14, 0, true))
1490 return -EINVAL;
1491 663
1492 /* Write RF values */ 664 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
1493 for (i = 0; i < rf_size; i++) 665 AR5K_PHY_FRAME_CTL_TX_CLIP,
1494 ath5k_hw_reg_write(ah, rf[i], rf_ini[i].rf_register); 666 g_step->gos_param[0]);
1495 667
1496 return 0; 668 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
1497} 669 AR5K_RF_PWD_90, true);
1498 670
1499/* 671 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
1500 * Initialize RF5413/5414 and future chips 672 AR5K_RF_PWD_84, true);
1501 * (until we come up with a better solution)
1502 */
1503static int ath5k_hw_rf5413_rfregs(struct ath5k_hw *ah,
1504 struct ieee80211_channel *channel, unsigned int mode)
1505{
1506 const struct ath5k_ini_rf *rf_ini;
1507 u32 *rf;
1508 unsigned int rf_size, i;
1509 int bank = -1;
1510 673
1511 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX); 674 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
1512 675 AR5K_RF_RFGAIN_SEL, true);
1513 rf = ah->ah_rf_banks;
1514 676
1515 switch (ah->ah_radio) { 677 /* We programmed gain_F parameters, switch back
1516 case AR5K_RF5413: 678 * to active state */
1517 rf_ini = rfregs_5413; 679 ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
1518 rf_size = ARRAY_SIZE(rfregs_5413);
1519 break;
1520 case AR5K_RF2413:
1521 rf_ini = rfregs_2413;
1522 rf_size = ARRAY_SIZE(rfregs_2413);
1523 680
1524 if (mode < 2) {
1525 ATH5K_ERR(ah->ah_sc,
1526 "invalid channel mode: %i\n", mode);
1527 return -EINVAL;
1528 } 681 }
1529 682
1530 mode = mode - 2; 683 /* Bank 6/7 setup */
1531 break;
1532 case AR5K_RF2425:
1533 rf_ini = rfregs_2425;
1534 rf_size = ARRAY_SIZE(rfregs_2425);
1535 684
1536 if (mode < 2) { 685 ath5k_hw_rfb_op(ah, rf_regs, !ee->ee_xpd[ee_mode],
1537 ATH5K_ERR(ah->ah_sc, 686 AR5K_RF_PWD_XPD, true);
1538 "invalid channel mode: %i\n", mode);
1539 return -EINVAL;
1540 }
1541 687
1542 /* Map b to g */ 688 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_x_gain[ee_mode],
1543 if (mode == 2) 689 AR5K_RF_XPD_GAIN, true);
1544 mode = 0;
1545 else
1546 mode = mode - 3;
1547 690
1548 break; 691 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
1549 default: 692 AR5K_RF_GAIN_I, true);
1550 return -EINVAL; 693
694 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
695 AR5K_RF_PLO_SEL, true);
696
697 /* TODO: Half/quarter channel support */
1551 } 698 }
1552 699
1553 /* Copy values to modify them */ 700 if (ah->ah_radio == AR5K_RF5112) {
1554 for (i = 0; i < rf_size; i++) {
1555 if (rf_ini[i].rf_bank >= AR5K_RF5112_INI_RF_MAX_BANKS) {
1556 ATH5K_ERR(ah->ah_sc, "invalid bank\n");
1557 return -EINVAL;
1558 }
1559 701
1560 if (bank != rf_ini[i].rf_bank) { 702 /* Set gain_F settings according to current step */
1561 bank = rf_ini[i].rf_bank; 703 if (channel->hw_value & CHANNEL_OFDM) {
1562 ah->ah_offset[bank] = i;
1563 }
1564 704
1565 rf[i] = rf_ini[i].rf_value[mode]; 705 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[0],
1566 } 706 AR5K_RF_MIXGAIN_OVR, true);
1567 707
1568 /* 708 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
1569 * After compairing dumps from different cards 709 AR5K_RF_PWD_138, true);
1570 * we get the same RF_BUFFER settings (diff returns
1571 * 0 lines). It seems that RF_BUFFER settings are static
1572 * and are written unmodified (no EEPROM stuff
1573 * is used because calibration data would be
1574 * different between different cards and would result
1575 * different RF_BUFFER settings)
1576 */
1577 710
1578 /* Write RF values */ 711 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
1579 for (i = 0; i < rf_size; i++) 712 AR5K_RF_PWD_137, true);
1580 ath5k_hw_reg_write(ah, rf[i], rf_ini[i].rf_register);
1581 713
1582 return 0; 714 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
1583} 715 AR5K_RF_PWD_136, true);
1584 716
1585/* 717 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[4],
1586 * Initialize RF 718 AR5K_RF_PWD_132, true);
1587 */
1588int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1589 unsigned int mode)
1590{
1591 int (*func)(struct ath5k_hw *, struct ieee80211_channel *, unsigned int);
1592 int ret;
1593 719
1594 switch (ah->ah_radio) { 720 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[5],
1595 case AR5K_RF5111: 721 AR5K_RF_PWD_131, true);
1596 ah->ah_rf_banks_size = sizeof(rfregs_5111);
1597 func = ath5k_hw_rf5111_rfregs;
1598 break;
1599 case AR5K_RF5112:
1600 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A)
1601 ah->ah_rf_banks_size = sizeof(rfregs_5112a);
1602 else
1603 ah->ah_rf_banks_size = sizeof(rfregs_5112);
1604 func = ath5k_hw_rf5112_rfregs;
1605 break;
1606 case AR5K_RF5413:
1607 ah->ah_rf_banks_size = sizeof(rfregs_5413);
1608 func = ath5k_hw_rf5413_rfregs;
1609 break;
1610 case AR5K_RF2413:
1611 ah->ah_rf_banks_size = sizeof(rfregs_2413);
1612 func = ath5k_hw_rf5413_rfregs;
1613 break;
1614 case AR5K_RF2425:
1615 ah->ah_rf_banks_size = sizeof(rfregs_2425);
1616 func = ath5k_hw_rf5413_rfregs;
1617 break;
1618 default:
1619 return -EINVAL;
1620 }
1621 722
1622 if (ah->ah_rf_banks == NULL) { 723 ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[6],
1623 /* XXX do extra checks? */ 724 AR5K_RF_PWD_130, true);
1624 ah->ah_rf_banks = kmalloc(ah->ah_rf_banks_size, GFP_KERNEL); 725
1625 if (ah->ah_rf_banks == NULL) { 726 /* We programmed gain_F parameters, switch back
1626 ATH5K_ERR(ah->ah_sc, "out of memory\n"); 727 * to active state */
1627 return -ENOMEM; 728 ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
1628 } 729 }
1629 }
1630 730
1631 ret = func(ah, channel, mode); 731 /* Bank 6/7 setup */
1632 if (!ret)
1633 ah->ah_rf_gain = AR5K_RFGAIN_INACTIVE;
1634 732
1635 return ret; 733 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
1636} 734 AR5K_RF_XPD_SEL, true);
1637 735
1638int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq) 736 if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
1639{ 737 /* Rev. 1 supports only one xpd */
1640 const struct ath5k_ini_rfgain *ath5k_rfg; 738 ath5k_hw_rfb_op(ah, rf_regs,
1641 unsigned int i, size; 739 ee->ee_x_gain[ee_mode],
740 AR5K_RF_XPD_GAIN, true);
1642 741
1643 switch (ah->ah_radio) { 742 } else {
1644 case AR5K_RF5111: 743 /* TODO: Set high and low gain bits */
1645 ath5k_rfg = rfgain_5111; 744 ath5k_hw_rfb_op(ah, rf_regs,
1646 size = ARRAY_SIZE(rfgain_5111); 745 ee->ee_x_gain[ee_mode],
1647 break; 746 AR5K_RF_PD_GAIN_LO, true);
1648 case AR5K_RF5112: 747 ath5k_hw_rfb_op(ah, rf_regs,
1649 ath5k_rfg = rfgain_5112; 748 ee->ee_x_gain[ee_mode],
1650 size = ARRAY_SIZE(rfgain_5112); 749 AR5K_RF_PD_GAIN_HI, true);
1651 break;
1652 case AR5K_RF5413:
1653 ath5k_rfg = rfgain_5413;
1654 size = ARRAY_SIZE(rfgain_5413);
1655 break;
1656 case AR5K_RF2413:
1657 ath5k_rfg = rfgain_2413;
1658 size = ARRAY_SIZE(rfgain_2413);
1659 freq = 0; /* only 2Ghz */
1660 break;
1661 case AR5K_RF2425:
1662 ath5k_rfg = rfgain_2425;
1663 size = ARRAY_SIZE(rfgain_2425);
1664 freq = 0; /* only 2Ghz */
1665 break;
1666 default:
1667 return -EINVAL;
1668 }
1669 750
1670 switch (freq) { 751 /* Lower synth voltage on Rev 2 */
1671 case AR5K_INI_RFGAIN_2GHZ: 752 ath5k_hw_rfb_op(ah, rf_regs, 2,
1672 case AR5K_INI_RFGAIN_5GHZ: 753 AR5K_RF_HIGH_VC_CP, true);
1673 break;
1674 default:
1675 return -EINVAL;
1676 }
1677 754
1678 for (i = 0; i < size; i++) { 755 ath5k_hw_rfb_op(ah, rf_regs, 2,
1679 AR5K_REG_WAIT(i); 756 AR5K_RF_MID_VC_CP, true);
1680 ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[freq],
1681 (u32)ath5k_rfg[i].rfg_register);
1682 }
1683 757
1684 return 0; 758 ath5k_hw_rfb_op(ah, rf_regs, 2,
1685} 759 AR5K_RF_LOW_VC_CP, true);
1686 760
1687enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah) 761 ath5k_hw_rfb_op(ah, rf_regs, 2,
1688{ 762 AR5K_RF_PUSH_UP, true);
1689 u32 data, type;
1690 763
1691 ATH5K_TRACE(ah->ah_sc); 764 /* Decrease power consumption on 5213+ BaseBand */
765 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
766 ath5k_hw_rfb_op(ah, rf_regs, 1,
767 AR5K_RF_PAD2GND, true);
1692 768
1693 if (ah->ah_rf_banks == NULL || !ah->ah_gain.g_active || 769 ath5k_hw_rfb_op(ah, rf_regs, 1,
1694 ah->ah_version <= AR5K_AR5211) 770 AR5K_RF_XB2_LVL, true);
1695 return AR5K_RFGAIN_INACTIVE;
1696 771
1697 if (ah->ah_rf_gain != AR5K_RFGAIN_READ_REQUESTED) 772 ath5k_hw_rfb_op(ah, rf_regs, 1,
1698 goto done; 773 AR5K_RF_XB5_LVL, true);
1699 774
1700 data = ath5k_hw_reg_read(ah, AR5K_PHY_PAPD_PROBE); 775 ath5k_hw_rfb_op(ah, rf_regs, 1,
776 AR5K_RF_PWD_167, true);
1701 777
1702 if (!(data & AR5K_PHY_PAPD_PROBE_TX_NEXT)) { 778 ath5k_hw_rfb_op(ah, rf_regs, 1,
1703 ah->ah_gain.g_current = data >> AR5K_PHY_PAPD_PROBE_GAINF_S; 779 AR5K_RF_PWD_166, true);
1704 type = AR5K_REG_MS(data, AR5K_PHY_PAPD_PROBE_TYPE); 780 }
781 }
1705 782
1706 if (type == AR5K_PHY_PAPD_PROBE_TYPE_CCK) 783 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
1707 ah->ah_gain.g_current += AR5K_GAIN_CCK_PROBE_CORR; 784 AR5K_RF_GAIN_I, true);
1708 785
1709 if (ah->ah_radio >= AR5K_RF5112) { 786 /* TODO: Half/quarter channel support */
1710 ath5k_hw_rfregs_gainf_corr(ah);
1711 ah->ah_gain.g_current =
1712 ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
1713 (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) :
1714 0;
1715 }
1716 787
1717 if (ath5k_hw_rfregs_gain_readback(ah) &&
1718 AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) &&
1719 ath5k_hw_rfregs_gain_adjust(ah))
1720 ah->ah_rf_gain = AR5K_RFGAIN_NEED_CHANGE;
1721 } 788 }
1722 789
1723done: 790 if (ah->ah_radio == AR5K_RF5413 &&
1724 return ah->ah_rf_gain; 791 channel->hw_value & CHANNEL_2GHZ) {
1725} 792
793 ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
794 true);
795
796 /* Set optimum value for early revisions (on pci-e chips) */
797 if (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
798 ah->ah_mac_srev < AR5K_SREV_AR5413)
799 ath5k_hw_rfb_op(ah, rf_regs, ath5k_hw_bitswap(6, 3),
800 AR5K_RF_PWD_ICLOBUF_2G, true);
1726 801
1727int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah) 802 }
1728{ 803
1729 /* Initialize the gain optimization values */ 804 /* Write RF banks on hw */
1730 switch (ah->ah_radio) { 805 for (i = 0; i < ah->ah_rf_banks_size; i++) {
1731 case AR5K_RF5111: 806 AR5K_REG_WAIT(i);
1732 ah->ah_gain.g_step_idx = rfgain_opt_5111.go_default; 807 ath5k_hw_reg_write(ah, rfb[i], ini_rfb[i].rfb_ctrl_register);
1733 ah->ah_gain.g_step =
1734 &rfgain_opt_5111.go_step[ah->ah_gain.g_step_idx];
1735 ah->ah_gain.g_low = 20;
1736 ah->ah_gain.g_high = 35;
1737 ah->ah_gain.g_active = 1;
1738 break;
1739 case AR5K_RF5112:
1740 ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default;
1741 ah->ah_gain.g_step =
1742 &rfgain_opt_5112.go_step[ah->ah_gain.g_step_idx];
1743 ah->ah_gain.g_low = 20;
1744 ah->ah_gain.g_high = 85;
1745 ah->ah_gain.g_active = 1;
1746 break;
1747 default:
1748 return -EINVAL;
1749 } 808 }
1750 809
1751 return 0; 810 return 0;
1752} 811}
1753 812
813
1754/**************************\ 814/**************************\
1755 PHY/RF channel functions 815 PHY/RF channel functions
1756\**************************/ 816\**************************/
@@ -2271,13 +1331,8 @@ done:
2271 * as often as I/Q calibration.*/ 1331 * as often as I/Q calibration.*/
2272 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1332 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
2273 1333
2274 /* Request RF gain */ 1334 /* Initiate a gain_F calibration */
2275 if (channel->hw_value & CHANNEL_5GHZ) { 1335 ath5k_hw_request_rfgain_probe(ah);
2276 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max,
2277 AR5K_PHY_PAPD_PROBE_TXPOWER) |
2278 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
2279 ah->ah_rf_gain = AR5K_RFGAIN_READ_REQUESTED;
2280 }
2281 1336
2282 return 0; 1337 return 0;
2283} 1338}
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath5k/qcu.c
index 1b7bc50ea8e..5094c394a4b 100644
--- a/drivers/net/wireless/ath5k/qcu.c
+++ b/drivers/net/wireless/ath5k/qcu.c
@@ -148,6 +148,7 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
148 */ 148 */
149u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) 149u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
150{ 150{
151 u32 pending;
151 ATH5K_TRACE(ah->ah_sc); 152 ATH5K_TRACE(ah->ah_sc);
152 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 153 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
153 154
@@ -159,7 +160,15 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
159 if (ah->ah_version == AR5K_AR5210) 160 if (ah->ah_version == AR5K_AR5210)
160 return false; 161 return false;
161 162
162 return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT; 163 pending = (AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT);
164
165 /* It's possible to have no frames pending even if TXE
166 * is set. To indicate that q has not stopped return
167 * true */
168 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
169 return true;
170
171 return pending;
163} 172}
164 173
165/* 174/*
@@ -324,8 +333,18 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
324 /* 333 /*
325 * Set misc registers 334 * Set misc registers
326 */ 335 */
327 ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY, 336 /* Enable DCU early termination for this queue */
328 AR5K_QUEUE_MISC(queue)); 337 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
338 AR5K_QCU_MISC_DCU_EARLY);
339
340 /* Enable DCU to wait for next fragment from QCU */
341 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
342 AR5K_DCU_MISC_FRAG_WAIT);
343
344 /* On Maui and Spirit use the global seqnum on DCU */
345 if (ah->ah_mac_version < AR5K_SREV_AR5211)
346 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
347 AR5K_DCU_MISC_SEQNUM_CTL);
329 348
330 if (tq->tqi_cbr_period) { 349 if (tq->tqi_cbr_period) {
331 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, 350 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
@@ -341,7 +360,8 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
341 AR5K_QCU_MISC_CBR_THRES_ENABLE); 360 AR5K_QCU_MISC_CBR_THRES_ENABLE);
342 } 361 }
343 362
344 if (tq->tqi_ready_time) 363 if (tq->tqi_ready_time &&
364 (tq->tqi_type != AR5K_TX_QUEUE_ID_CAB))
345 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, 365 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
346 AR5K_QCU_RDYTIMECFG_INTVAL) | 366 AR5K_QCU_RDYTIMECFG_INTVAL) |
347 AR5K_QCU_RDYTIMECFG_ENABLE, 367 AR5K_QCU_RDYTIMECFG_ENABLE,
@@ -383,13 +403,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
383 AR5K_DCU_MISC_ARBLOCK_CTL_S) | 403 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
384 AR5K_DCU_MISC_POST_FR_BKOFF_DIS | 404 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
385 AR5K_DCU_MISC_BCN_ENABLE); 405 AR5K_DCU_MISC_BCN_ENABLE);
386
387 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
388 (AR5K_TUNE_SW_BEACON_RESP -
389 AR5K_TUNE_DMA_BEACON_RESP) -
390 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
391 AR5K_QCU_RDYTIMECFG_ENABLE,
392 AR5K_QUEUE_RDYTIMECFG(queue));
393 break; 406 break;
394 407
395 case AR5K_TX_QUEUE_CAB: 408 case AR5K_TX_QUEUE_CAB:
@@ -398,6 +411,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
398 AR5K_QCU_MISC_CBREXP_DIS | 411 AR5K_QCU_MISC_CBREXP_DIS |
399 AR5K_QCU_MISC_CBREXP_BCN_DIS); 412 AR5K_QCU_MISC_CBREXP_BCN_DIS);
400 413
414 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
415 (AR5K_TUNE_SW_BEACON_RESP -
416 AR5K_TUNE_DMA_BEACON_RESP) -
417 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
418 AR5K_QCU_RDYTIMECFG_ENABLE,
419 AR5K_QUEUE_RDYTIMECFG(queue));
420
401 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 421 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
402 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 422 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
403 AR5K_DCU_MISC_ARBLOCK_CTL_S)); 423 AR5K_DCU_MISC_ARBLOCK_CTL_S));
@@ -413,6 +433,8 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
413 break; 433 break;
414 } 434 }
415 435
436 /* TODO: Handle frame compression */
437
416 /* 438 /*
417 * Enable interrupts for this tx queue 439 * Enable interrupts for this tx queue
418 * in the secondary interrupt mask registers 440 * in the secondary interrupt mask registers
@@ -483,6 +505,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
483 * by setting AR5K_TXNOFRM to zero */ 505 * by setting AR5K_TXNOFRM to zero */
484 if (ah->ah_txq_imr_nofrm == 0) 506 if (ah->ah_txq_imr_nofrm == 0)
485 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM); 507 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
508
509 /* Set QCU mask for this DCU to save power */
510 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
486 } 511 }
487 512
488 return 0; 513 return 0;
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 9189ab13286..2dc008e1022 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -187,6 +187,7 @@
187#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */ 187#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
188#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */ 188#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */
189#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */ 189#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */
190#define AR5K_TXCFG_DCU_DBL_BUF_DIS 0x00008000 /* Disable double buffering on DCU */
190#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */ 191#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */
191 192
192/* 193/*
@@ -753,7 +754,7 @@
753 */ 754 */
754#define AR5K_DCU_SEQNUM_BASE 0x1140 755#define AR5K_DCU_SEQNUM_BASE 0x1140
755#define AR5K_DCU_SEQNUM_M 0x00000fff 756#define AR5K_DCU_SEQNUM_M 0x00000fff
756#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q) 757#define AR5K_QUEUE_DCU_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q)
757 758
758/* 759/*
759 * DCU global IFS SIFS register 760 * DCU global IFS SIFS register
@@ -811,6 +812,8 @@
811 812
812/* 813/*
813 * DCU transmit filter table 0 (32 entries) 814 * DCU transmit filter table 0 (32 entries)
815 * each entry contains a 32bit slice of the
816 * 128bit tx filter for each DCU (4 slices per DCU)
814 */ 817 */
815#define AR5K_DCU_TX_FILTER_0_BASE 0x1038 818#define AR5K_DCU_TX_FILTER_0_BASE 0x1038
816#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64)) 819#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64))
@@ -819,7 +822,7 @@
819 * DCU transmit filter table 1 (16 entries) 822 * DCU transmit filter table 1 (16 entries)
820 */ 823 */
821#define AR5K_DCU_TX_FILTER_1_BASE 0x103c 824#define AR5K_DCU_TX_FILTER_1_BASE 0x103c
822#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + ((_n - 32) * 64)) 825#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + (_n * 64))
823 826
824/* 827/*
825 * DCU clear transmit filter register 828 * DCU clear transmit filter register
@@ -1447,7 +1450,7 @@
1447 AR5K_TSF_U32_5210 : AR5K_TSF_U32_5211) 1450 AR5K_TSF_U32_5210 : AR5K_TSF_U32_5211)
1448 1451
1449/* 1452/*
1450 * Last beacon timestamp register 1453 * Last beacon timestamp register (Read Only)
1451 */ 1454 */
1452#define AR5K_LAST_TSTP 0x8080 1455#define AR5K_LAST_TSTP 0x8080
1453 1456
@@ -1465,7 +1468,7 @@
1465#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */ 1468#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */
1466#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */ 1469#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */
1467#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */ 1470#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */
1468#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* Test ARM (Adaptive Radio Mode ?) */ 1471#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* ARM rx buffer for capture */
1469 1472
1470/* 1473/*
1471 * Default antenna register [5211+] 1474 * Default antenna register [5211+]
@@ -1677,7 +1680,7 @@
1677 * TSF parameter register 1680 * TSF parameter register
1678 */ 1681 */
1679#define AR5K_TSF_PARM 0x8104 /* Register Address */ 1682#define AR5K_TSF_PARM 0x8104 /* Register Address */
1680#define AR5K_TSF_PARM_INC_M 0x000000ff /* Mask for TSF increment */ 1683#define AR5K_TSF_PARM_INC 0x000000ff /* Mask for TSF increment */
1681#define AR5K_TSF_PARM_INC_S 0 1684#define AR5K_TSF_PARM_INC_S 0
1682 1685
1683/* 1686/*
@@ -1689,7 +1692,7 @@
1689#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000070 /* ??? */ 1692#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000070 /* ??? */
1690#define AR5K_QOS_NOACK_BIT_OFFSET_S 4 1693#define AR5K_QOS_NOACK_BIT_OFFSET_S 4
1691#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000180 /* ??? */ 1694#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000180 /* ??? */
1692#define AR5K_QOS_NOACK_BYTE_OFFSET_S 8 1695#define AR5K_QOS_NOACK_BYTE_OFFSET_S 7
1693 1696
1694/* 1697/*
1695 * PHY error filter register 1698 * PHY error filter register
@@ -1848,15 +1851,14 @@
1848 * TST_2 (Misc config parameters) 1851 * TST_2 (Misc config parameters)
1849 */ 1852 */
1850#define AR5K_PHY_TST2 0x9800 /* Register Address */ 1853#define AR5K_PHY_TST2 0x9800 /* Register Address */
1851#define AR5K_PHY_TST2_TRIG_SEL 0x00000001 /* Trigger select (?) (field ?) */ 1854#define AR5K_PHY_TST2_TRIG_SEL 0x00000007 /* Trigger select (?)*/
1852#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) (field ?) */ 1855#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) */
1853#define AR5K_PHY_TST2_CBUS_MODE 0x00000100 /* Cardbus mode (?) */ 1856#define AR5K_PHY_TST2_CBUS_MODE 0x00000060 /* Cardbus mode (?) */
1854/* bit reserved */
1855#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */ 1857#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */
1856#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */ 1858#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
1857#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */ 1859#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
1858#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */ 1860#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
1859#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch) */ 1861#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch ?) */
1860#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */ 1862#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */
1861#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */ 1863#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */
1862#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */ 1864#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */
@@ -1926,8 +1928,8 @@
1926#define AR5K_PHY_RF_CTL2_TXF2TXD_START_S 0 1928#define AR5K_PHY_RF_CTL2_TXF2TXD_START_S 0
1927 1929
1928#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */ 1930#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
1929#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* TX end to XLNA on */ 1931#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000ff00 /* TX end to XLNA on */
1930#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S 0 1932#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S 8
1931 1933
1932#define AR5K_PHY_ADC_CTL 0x982c 1934#define AR5K_PHY_ADC_CTL 0x982c
1933#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF 0x00000003 1935#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF 0x00000003
@@ -1961,7 +1963,7 @@
1961#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */ 1963#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
1962#define AR5K_PHY_SETTLING_AGC_S 0 1964#define AR5K_PHY_SETTLING_AGC_S 0
1963#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */ 1965#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
1964#define AR5K_PHY_SETTLINK_SWITCH_S 7 1966#define AR5K_PHY_SETTLING_SWITCH_S 7
1965 1967
1966/* 1968/*
1967 * PHY Gain registers 1969 * PHY Gain registers
@@ -2067,14 +2069,14 @@
2067 * PHY sleep registers [5112+] 2069 * PHY sleep registers [5112+]
2068 */ 2070 */
2069#define AR5K_PHY_SCR 0x9870 2071#define AR5K_PHY_SCR 0x9870
2070#define AR5K_PHY_SCR_32MHZ 0x0000001f
2071 2072
2072#define AR5K_PHY_SLMT 0x9874 2073#define AR5K_PHY_SLMT 0x9874
2073#define AR5K_PHY_SLMT_32MHZ 0x0000007f 2074#define AR5K_PHY_SLMT_32MHZ 0x0000007f
2074 2075
2075#define AR5K_PHY_SCAL 0x9878 2076#define AR5K_PHY_SCAL 0x9878
2076#define AR5K_PHY_SCAL_32MHZ 0x0000000e 2077#define AR5K_PHY_SCAL_32MHZ 0x0000000e
2077 2078#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a
2079#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032
2078 2080
2079/* 2081/*
2080 * PHY PLL (Phase Locked Loop) control register 2082 * PHY PLL (Phase Locked Loop) control register
@@ -2101,34 +2103,10 @@
2101/* 2103/*
2102 * RF Buffer register 2104 * RF Buffer register
2103 * 2105 *
2104 * There are some special control registers on the RF chip
2105 * that hold various operation settings related mostly to
2106 * the analog parts (channel, gain adjustment etc).
2107 *
2108 * We don't write on those registers directly but
2109 * we send a data packet on the buffer register and
2110 * then write on another special register to notify hw
2111 * to apply the settings. This is done so that control registers
2112 * can be dynamicaly programmed during operation and the settings
2113 * are applied faster on the hw.
2114 *
2115 * We sent such data packets during rf initialization and channel change
2116 * through ath5k_hw_rf*_rfregs and ath5k_hw_rf*_channel functions.
2117 *
2118 * The data packets we send during initializadion are inside ath5k_ini_rf
2119 * struct (see ath5k_hw.h) and each one is related to an "rf register bank".
2120 * We use *rfregs functions to modify them acording to current operation
2121 * mode and eeprom values and pass them all together to the chip.
2122 *
2123 * It's obvious from the code that 0x989c is the buffer register but 2106 * It's obvious from the code that 0x989c is the buffer register but
2124 * for the other special registers that we write to after sending each 2107 * for the other special registers that we write to after sending each
2125 * packet, i have no idea. So i'll name them BUFFER_CONTROL_X registers 2108 * packet, i have no idea. So i'll name them BUFFER_CONTROL_X registers
2126 * for now. It's interesting that they are also used for some other operations. 2109 * for now. It's interesting that they are also used for some other operations.
2127 *
2128 * Also check out hw.h and U.S. Patent 6677779 B1 (about buffer
2129 * registers and control registers):
2130 *
2131 * http://www.google.com/patents?id=qNURAAAAEBAJ
2132 */ 2110 */
2133 2111
2134#define AR5K_RF_BUFFER 0x989c 2112#define AR5K_RF_BUFFER 0x989c
@@ -2178,7 +2156,8 @@
2178#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */ 2156#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */
2179#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */ 2157#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */
2180#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */ 2158#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */
2181#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x00000010 /* Switch table idle (?) */ 2159#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x000003f0 /* Switch table idle (?) */
2160#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE_S 4
2182 2161
2183/* 2162/*
2184 * PHY receiver delay register [5111+] 2163 * PHY receiver delay register [5111+]
@@ -2218,7 +2197,7 @@
2218#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */ 2197#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
2219#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */ 2198#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
2220#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */ 2199#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
2221#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S 0 2200#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S 1
2222#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */ 2201#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
2223#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */ 2202#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
2224#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */ 2203#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
@@ -2243,9 +2222,7 @@
2243#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */ 2222#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */
2244 2223
2245/* 2224/*
2246 * PHY PAPD probe register [5111+ (?)] 2225 * PHY PAPD probe register [5111+]
2247 * Is this only present in 5212 ?
2248 * Because it's always 0 in 5211 initialization code
2249 */ 2226 */
2250#define AR5K_PHY_PAPD_PROBE 0x9930 2227#define AR5K_PHY_PAPD_PROBE 0x9930
2251#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001 2228#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001
@@ -2303,6 +2280,15 @@
2303 AR5K_PHY_FRAME_CTL_TIMING_ERR 2280 AR5K_PHY_FRAME_CTL_TIMING_ERR
2304 2281
2305/* 2282/*
2283 * PHY Tx Power adjustment register [5212A+]
2284 */
2285#define AR5K_PHY_TX_PWR_ADJ 0x994c
2286#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA 0x00000fc0
2287#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA_S 6
2288#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX 0x00fc0000
2289#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX_S 18
2290
2291/*
2306 * PHY radar detection register [5111+] 2292 * PHY radar detection register [5111+]
2307 */ 2293 */
2308#define AR5K_PHY_RADAR 0x9954 2294#define AR5K_PHY_RADAR 0x9954
@@ -2355,7 +2341,7 @@
2355#define AR5K_PHY_SIGMA_DELTA_FILT2_S 3 2341#define AR5K_PHY_SIGMA_DELTA_FILT2_S 3
2356#define AR5K_PHY_SIGMA_DELTA_FILT1 0x00001f00 2342#define AR5K_PHY_SIGMA_DELTA_FILT1 0x00001f00
2357#define AR5K_PHY_SIGMA_DELTA_FILT1_S 8 2343#define AR5K_PHY_SIGMA_DELTA_FILT1_S 8
2358#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP 0x01ff3000 2344#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000
2359#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S 13 2345#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S 13
2360 2346
2361/* 2347/*
@@ -2387,21 +2373,21 @@
2387#define AR5K_PHY_BIN_MASK2_4_MASK_4 0x00003fff 2373#define AR5K_PHY_BIN_MASK2_4_MASK_4 0x00003fff
2388#define AR5K_PHY_BIN_MASK2_4_MASK_4_S 0 2374#define AR5K_PHY_BIN_MASK2_4_MASK_4_S 0
2389 2375
2390#define AR_PHY_TIMING_9 0x9998 2376#define AR5K_PHY_TIMING_9 0x9998
2391#define AR_PHY_TIMING_10 0x999c 2377#define AR5K_PHY_TIMING_10 0x999c
2392#define AR_PHY_TIMING_10_PILOT_MASK_2 0x000fffff 2378#define AR5K_PHY_TIMING_10_PILOT_MASK_2 0x000fffff
2393#define AR_PHY_TIMING_10_PILOT_MASK_2_S 0 2379#define AR5K_PHY_TIMING_10_PILOT_MASK_2_S 0
2394 2380
2395/* 2381/*
2396 * Spur mitigation control 2382 * Spur mitigation control
2397 */ 2383 */
2398#define AR_PHY_TIMING_11 0x99a0 /* Register address */ 2384#define AR5K_PHY_TIMING_11 0x99a0 /* Register address */
2399#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE 0x000fffff /* Spur delta phase */ 2385#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE 0x000fffff /* Spur delta phase */
2400#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE_S 0 2386#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE_S 0
2401#define AR_PHY_TIMING_11_SPUR_FREQ_SD 0x3ff00000 /* Freq sigma delta */ 2387#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD 0x3ff00000 /* Freq sigma delta */
2402#define AR_PHY_TIMING_11_SPUR_FREQ_SD_S 20 2388#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD_S 20
2403#define AR_PHY_TIMING_11_USE_SPUR_IN_AGC 0x40000000 /* Spur filter in AGC detector */ 2389#define AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC 0x40000000 /* Spur filter in AGC detector */
2404#define AR_PHY_TIMING_11_USE_SPUR_IN_SELFCOR 0x80000000 /* Spur filter in OFDM self correlator */ 2390#define AR5K_PHY_TIMING_11_USE_SPUR_IN_SELFCOR 0x80000000 /* Spur filter in OFDM self correlator */
2405 2391
2406/* 2392/*
2407 * Gain tables 2393 * Gain tables
@@ -2483,17 +2469,7 @@
2483#define AR5K_PHY_SDELAY 0x99f4 2469#define AR5K_PHY_SDELAY 0x99f4
2484#define AR5K_PHY_SDELAY_32MHZ 0x000000ff 2470#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
2485#define AR5K_PHY_SPENDING 0x99f8 2471#define AR5K_PHY_SPENDING 0x99f8
2486#define AR5K_PHY_SPENDING_14 0x00000014 2472
2487#define AR5K_PHY_SPENDING_18 0x00000018
2488#define AR5K_PHY_SPENDING_RF5111 0x00000018
2489#define AR5K_PHY_SPENDING_RF5112 0x00000014
2490/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */
2491/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */
2492#define AR5K_PHY_SPENDING_RF5413 0x00000018
2493#define AR5K_PHY_SPENDING_RF2413 0x00000018
2494#define AR5K_PHY_SPENDING_RF2316 0x00000018
2495#define AR5K_PHY_SPENDING_RF2317 0x00000018
2496#define AR5K_PHY_SPENDING_RF2425 0x00000014
2497 2473
2498/* 2474/*
2499 * PHY PAPD I (power?) table (?) 2475 * PHY PAPD I (power?) table (?)
@@ -2505,11 +2481,7 @@
2505/* 2481/*
2506 * PHY PCDAC TX power table 2482 * PHY PCDAC TX power table
2507 */ 2483 */
2508#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180 2484#define AR5K_PHY_PCDAC_TXPOWER_BASE 0xa180
2509#define AR5K_PHY_PCDAC_TXPOWER_BASE_2413 0xa280
2510#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF2413 ? \
2511 AR5K_PHY_PCDAC_TXPOWER_BASE_2413 :\
2512 AR5K_PHY_PCDAC_TXPOWER_BASE_5211)
2513#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2)) 2485#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2))
2514 2486
2515/* 2487/*
@@ -2590,3 +2562,9 @@
2590#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S 16 2562#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S 16
2591#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4 0x0FC00000 2563#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4 0x0FC00000
2592#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S 22 2564#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S 22
2565
2566/*
2567 * PHY PDADC Tx power table
2568 */
2569#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280
2570#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2))
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index dc2d7d8bdb7..1531ccd3506 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -25,7 +25,8 @@
25 Reset functions and helpers 25 Reset functions and helpers
26\*****************************/ 26\*****************************/
27 27
28#include <linux/pci.h> 28#include <linux/pci.h> /* To determine if a card is pci-e */
29#include <linux/bitops.h> /* For get_bitmask_order */
29#include "ath5k.h" 30#include "ath5k.h"
30#include "reg.h" 31#include "reg.h"
31#include "base.h" 32#include "base.h"
@@ -37,10 +38,14 @@
37 * @ah: the &struct ath5k_hw 38 * @ah: the &struct ath5k_hw
38 * @channel: the currently set channel upon reset 39 * @channel: the currently set channel upon reset
39 * 40 *
40 * Write the OFDM timings for the AR5212 upon reset. This is a helper for 41 * Write the delta slope coefficient (used on pilot tracking ?) for OFDM
41 * ath5k_hw_reset(). This seems to tune the PLL a specified frequency 42 * operation on the AR5212 upon reset. This is a helper for ath5k_hw_reset().
42 * depending on the bandwidth of the channel.
43 * 43 *
44 * Since delta slope is floating point we split it on its exponent and
45 * mantissa and provide these values on hw.
46 *
47 * For more infos i think this patent is related
48 * http://www.freepatentsonline.com/7184495.html
44 */ 49 */
45static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, 50static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
46 struct ieee80211_channel *channel) 51 struct ieee80211_channel *channel)
@@ -53,23 +58,34 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
53 !(channel->hw_value & CHANNEL_OFDM)) 58 !(channel->hw_value & CHANNEL_OFDM))
54 BUG(); 59 BUG();
55 60
56 /* Seems there are two PLLs, one for baseband sampling and one 61 /* Get coefficient
57 * for tuning. Tuning basebands are 40 MHz or 80MHz when in 62 * ALGO: coef = (5 * clock * carrier_freq) / 2)
58 * turbo. */ 63 * we scale coef by shifting clock value by 24 for
59 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40; 64 * better precision since we use integers */
60 coef_scaled = ((5 * (clock << 24)) / 2) / 65 /* TODO: Half/quarter rate */
61 channel->center_freq; 66 clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
62 67
63 for (coef_exp = 31; coef_exp > 0; coef_exp--) 68 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
64 if ((coef_scaled >> coef_exp) & 0x1) 69
65 break; 70 /* Get exponent
71 * ALGO: coef_exp = 14 - highest set bit position */
72 coef_exp = get_bitmask_order(coef_scaled);
66 73
74 /* Doesn't make sense if it's zero*/
67 if (!coef_exp) 75 if (!coef_exp)
68 return -EINVAL; 76 return -EINVAL;
69 77
78 /* Note: we've shifted coef_scaled by 24 */
70 coef_exp = 14 - (coef_exp - 24); 79 coef_exp = 14 - (coef_exp - 24);
80
81
82 /* Get mantissa (significant digits)
83 * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
71 coef_man = coef_scaled + 84 coef_man = coef_scaled +
72 (1 << (24 - coef_exp - 1)); 85 (1 << (24 - coef_exp - 1));
86
87 /* Calculate delta slope coefficient exponent
88 * and mantissa (remove scaling) and set them on hw */
73 ds_coef_man = coef_man >> (24 - coef_exp); 89 ds_coef_man = coef_man >> (24 - coef_exp);
74 ds_coef_exp = coef_exp - 16; 90 ds_coef_exp = coef_exp - 16;
75 91
@@ -90,16 +106,23 @@ static int control_rates[] =
90 { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 }; 106 { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
91 107
92/** 108/**
93 * ath5k_hw_write_rate_duration - set rate duration during hw resets 109 * ath5k_hw_write_rate_duration - fill rate code to duration table
94 * 110 *
95 * @ah: the &struct ath5k_hw 111 * @ah: the &struct ath5k_hw
96 * @mode: one of enum ath5k_driver_mode 112 * @mode: one of enum ath5k_driver_mode
97 * 113 *
98 * Write the rate duration table upon hw reset. This is a helper for 114 * Write the rate code to duration table upon hw reset. This is a helper for
99 * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout for 115 * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout on
100 * the hardware for the current mode for each rate. The rates which are capable 116 * the hardware, based on current mode, for each rate. The rates which are
101 * of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have another 117 * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
102 * register for the short preamble ACK timeout calculation. 118 * different rate code so we write their value twice (one for long preample
119 * and one for short).
120 *
121 * Note: Band doesn't matter here, if we set the values for OFDM it works
122 * on both a and g modes. So all we have to do is set values for all g rates
123 * that include all OFDM and CCK rates. If we operate in turbo or xr/half/
124 * quarter rate mode, we need to use another set of bitrates (that's why we
125 * need the mode parameter) but we don't handle these proprietary modes yet.
103 */ 126 */
104static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah, 127static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
105 unsigned int mode) 128 unsigned int mode)
@@ -275,7 +298,8 @@ commit:
275} 298}
276 299
277/* 300/*
278 * Bring up MAC + PHY Chips 301 * Bring up MAC + PHY Chips and program PLL
302 * TODO: Half/Quarter rate support
279 */ 303 */
280int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) 304int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
281{ 305{
@@ -333,7 +357,11 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
333 } 357 }
334 } else if (flags & CHANNEL_5GHZ) { 358 } else if (flags & CHANNEL_5GHZ) {
335 mode |= AR5K_PHY_MODE_FREQ_5GHZ; 359 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
336 clock |= AR5K_PHY_PLL_40MHZ; 360
361 if (ah->ah_radio == AR5K_RF5413)
362 clock |= AR5K_PHY_PLL_40MHZ_5413;
363 else
364 clock |= AR5K_PHY_PLL_40MHZ;
337 365
338 if (flags & CHANNEL_OFDM) 366 if (flags & CHANNEL_OFDM)
339 mode |= AR5K_PHY_MODE_MOD_OFDM; 367 mode |= AR5K_PHY_MODE_MOD_OFDM;
@@ -391,10 +419,14 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
391 } 419 }
392 420
393 if (ah->ah_version != AR5K_AR5210) { 421 if (ah->ah_version != AR5K_AR5210) {
394 /* ...set the PHY operating mode */
395 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
396 udelay(300);
397 422
423 /* ...update PLL if needed */
424 if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
425 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
426 udelay(300);
427 }
428
429 /* ...set the PHY operating mode */
398 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE); 430 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
399 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO); 431 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
400 } 432 }
@@ -403,22 +435,393 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
403} 435}
404 436
405/* 437/*
438 * If there is an external 32KHz crystal available, use it
439 * as ref. clock instead of 32/40MHz clock and baseband clocks
440 * to save power during sleep or restore normal 32/40MHz
441 * operation.
442 *
443 * XXX: When operating on 32KHz certain PHY registers (27 - 31,
444 * 123 - 127) require delay on access.
445 */
446static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
447{
448 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
449 u32 scal, spending, usec32;
450
451 /* Only set 32KHz settings if we have an external
452 * 32KHz crystal present */
453 if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
454 AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
455 enable) {
456
457 /* 1 usec/cycle */
458 AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
459 /* Set up tsf increment on each cycle */
460 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
461
462 /* Set baseband sleep control registers
463 * and sleep control rate */
464 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
465
466 if ((ah->ah_radio == AR5K_RF5112) ||
467 (ah->ah_radio == AR5K_RF5413) ||
468 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
469 spending = 0x14;
470 else
471 spending = 0x18;
472 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
473
474 if ((ah->ah_radio == AR5K_RF5112) ||
475 (ah->ah_radio == AR5K_RF5413) ||
476 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
477 ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
478 ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
479 ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
480 ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
481 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
482 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
483 } else {
484 ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
485 ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
486 ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
487 ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
488 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
489 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
490 }
491
492 /* Enable sleep clock operation */
493 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
494 AR5K_PCICFG_SLEEP_CLOCK_EN);
495
496 } else {
497
498 /* Disable sleep clock operation and
499 * restore default parameters */
500 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
501 AR5K_PCICFG_SLEEP_CLOCK_EN);
502
503 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
504 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
505
506 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
507 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
508
509 if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
510 scal = AR5K_PHY_SCAL_32MHZ_2417;
511 else if (ath5k_eeprom_is_hb63(ah))
512 scal = AR5K_PHY_SCAL_32MHZ_HB63;
513 else
514 scal = AR5K_PHY_SCAL_32MHZ;
515 ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
516
517 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
518 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
519
520 if ((ah->ah_radio == AR5K_RF5112) ||
521 (ah->ah_radio == AR5K_RF5413) ||
522 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
523 spending = 0x14;
524 else
525 spending = 0x18;
526 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
527
528 if ((ah->ah_radio == AR5K_RF5112) ||
529 (ah->ah_radio == AR5K_RF5413))
530 usec32 = 39;
531 else
532 usec32 = 31;
533 AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, usec32);
534
535 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
536 }
537 return;
538}
539
540static bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
541 struct ieee80211_channel *channel)
542{
543 u8 refclk_freq;
544
545 if ((ah->ah_radio == AR5K_RF5112) ||
546 (ah->ah_radio == AR5K_RF5413) ||
547 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
548 refclk_freq = 40;
549 else
550 refclk_freq = 32;
551
552 if ((channel->center_freq % refclk_freq != 0) &&
553 ((channel->center_freq % refclk_freq < 10) ||
554 (channel->center_freq % refclk_freq > 22)))
555 return true;
556 else
557 return false;
558}
559
560/* TODO: Half/Quarter rate */
561static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
562 struct ieee80211_channel *channel)
563{
564 if (ah->ah_version == AR5K_AR5212 &&
565 ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
566
567 /* Setup ADC control */
568 ath5k_hw_reg_write(ah,
569 (AR5K_REG_SM(2,
570 AR5K_PHY_ADC_CTL_INBUFGAIN_OFF) |
571 AR5K_REG_SM(2,
572 AR5K_PHY_ADC_CTL_INBUFGAIN_ON) |
573 AR5K_PHY_ADC_CTL_PWD_DAC_OFF |
574 AR5K_PHY_ADC_CTL_PWD_ADC_OFF),
575 AR5K_PHY_ADC_CTL);
576
577
578
579 /* Disable barker RSSI threshold */
580 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
581 AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR);
582
583 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
584 AR5K_PHY_DAG_CCK_CTL_RSSI_THR, 2);
585
586 /* Set the mute mask */
587 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
588 }
589
590 /* Clear PHY_BLUETOOTH to allow RX_CLEAR line debug */
591 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212B)
592 ath5k_hw_reg_write(ah, 0, AR5K_PHY_BLUETOOTH);
593
594 /* Enable DCU double buffering */
595 if (ah->ah_phy_revision > AR5K_SREV_PHY_5212B)
596 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
597 AR5K_TXCFG_DCU_DBL_BUF_DIS);
598
599 /* Set DAC/ADC delays */
600 if (ah->ah_version == AR5K_AR5212) {
601 u32 scal;
602 if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
603 scal = AR5K_PHY_SCAL_32MHZ_2417;
604 else if (ath5k_eeprom_is_hb63(ah))
605 scal = AR5K_PHY_SCAL_32MHZ_HB63;
606 else
607 scal = AR5K_PHY_SCAL_32MHZ;
608 ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
609 }
610
611 /* Set fast ADC */
612 if ((ah->ah_radio == AR5K_RF5413) ||
613 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
614 u32 fast_adc = true;
615
616 if (channel->center_freq == 2462 ||
617 channel->center_freq == 2467)
618 fast_adc = 0;
619
620 /* Only update if needed */
621 if (ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ADC) != fast_adc)
622 ath5k_hw_reg_write(ah, fast_adc,
623 AR5K_PHY_FAST_ADC);
624 }
625
626 /* Fix for first revision of the RF5112 RF chipset */
627 if (ah->ah_radio == AR5K_RF5112 &&
628 ah->ah_radio_5ghz_revision <
629 AR5K_SREV_RAD_5112A) {
630 u32 data;
631 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
632 AR5K_PHY_CCKTXCTL);
633 if (channel->hw_value & CHANNEL_5GHZ)
634 data = 0xffb81020;
635 else
636 data = 0xffb80d20;
637 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
638 }
639
640 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
641 u32 usec_reg;
642 /* 5311 has different tx/rx latency masks
643 * from 5211, since we deal 5311 the same
644 * as 5211 when setting initvals, shift
645 * values here to their proper locations */
646 usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
647 ath5k_hw_reg_write(ah, usec_reg & (AR5K_USEC_1 |
648 AR5K_USEC_32 |
649 AR5K_USEC_TX_LATENCY_5211 |
650 AR5K_REG_SM(29,
651 AR5K_USEC_RX_LATENCY_5210)),
652 AR5K_USEC_5211);
653 /* Clear QCU/DCU clock gating register */
654 ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT);
655 /* Set DAC/ADC delays */
656 ath5k_hw_reg_write(ah, 0x08, AR5K_PHY_SCAL);
657 /* Enable PCU FIFO corruption ECO */
658 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
659 AR5K_DIAG_SW_ECO_ENABLE);
660 }
661}
662
663static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
664 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
665{
666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
667
668 /* Set CCK to OFDM power delta */
669 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
670 int16_t cck_ofdm_pwr_delta;
671
672 /* Adjust power delta for channel 14 */
673 if (channel->center_freq == 2484)
674 cck_ofdm_pwr_delta =
675 ((ee->ee_cck_ofdm_power_delta -
676 ee->ee_scaled_cck_delta) * 2) / 10;
677 else
678 cck_ofdm_pwr_delta =
679 (ee->ee_cck_ofdm_power_delta * 2) / 10;
680
681 if (channel->hw_value == CHANNEL_G)
682 ath5k_hw_reg_write(ah,
683 AR5K_REG_SM((ee->ee_cck_ofdm_power_delta * -1),
684 AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
685 AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
686 AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
687 AR5K_PHY_TX_PWR_ADJ);
688 else
689 ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
690 }
691
692 /* Set antenna idle switch table */
693 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
694 AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
695 (ah->ah_antenna[ee_mode][0] |
696 AR5K_PHY_ANT_CTL_TXRX_EN));
697
698 /* Set antenna switch table */
699 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
700 AR5K_PHY_ANT_SWITCH_TABLE_0);
701 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
702 AR5K_PHY_ANT_SWITCH_TABLE_1);
703
704 /* Noise floor threshold */
705 ath5k_hw_reg_write(ah,
706 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
707 AR5K_PHY_NFTHRES);
708
709 if ((channel->hw_value & CHANNEL_TURBO) &&
710 (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) {
711 /* Switch settling time (Turbo) */
712 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
713 AR5K_PHY_SETTLING_SWITCH,
714 ee->ee_switch_settling_turbo[ee_mode]);
715
716 /* Tx/Rx attenuation (Turbo) */
717 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
718 AR5K_PHY_GAIN_TXRX_ATTEN,
719 ee->ee_atn_tx_rx_turbo[ee_mode]);
720
721 /* ADC/PGA desired size (Turbo) */
722 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
723 AR5K_PHY_DESIRED_SIZE_ADC,
724 ee->ee_adc_desired_size_turbo[ee_mode]);
725
726 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
727 AR5K_PHY_DESIRED_SIZE_PGA,
728 ee->ee_pga_desired_size_turbo[ee_mode]);
729
730 /* Tx/Rx margin (Turbo) */
731 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
732 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
733 ee->ee_margin_tx_rx_turbo[ee_mode]);
734
735 } else {
736 /* Switch settling time */
737 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
738 AR5K_PHY_SETTLING_SWITCH,
739 ee->ee_switch_settling[ee_mode]);
740
741 /* Tx/Rx attenuation */
742 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
743 AR5K_PHY_GAIN_TXRX_ATTEN,
744 ee->ee_atn_tx_rx[ee_mode]);
745
746 /* ADC/PGA desired size */
747 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
748 AR5K_PHY_DESIRED_SIZE_ADC,
749 ee->ee_adc_desired_size[ee_mode]);
750
751 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
752 AR5K_PHY_DESIRED_SIZE_PGA,
753 ee->ee_pga_desired_size[ee_mode]);
754
755 /* Tx/Rx margin */
756 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
757 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
758 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
759 ee->ee_margin_tx_rx[ee_mode]);
760 }
761
762 /* XPA delays */
763 ath5k_hw_reg_write(ah,
764 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
765 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
766 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
767 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
768
769 /* XLNA delay */
770 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL3,
771 AR5K_PHY_RF_CTL3_TXE2XLNA_ON,
772 ee->ee_tx_end2xlna_enable[ee_mode]);
773
774 /* Thresh64 (ANI) */
775 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_NF,
776 AR5K_PHY_NF_THRESH62,
777 ee->ee_thr_62[ee_mode]);
778
779
780 /* False detect backoff for channels
781 * that have spur noise. Write the new
782 * cyclic power RSSI threshold. */
783 if (ath5k_hw_chan_has_spur_noise(ah, channel))
784 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
785 AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
786 AR5K_INIT_CYCRSSI_THR1 +
787 ee->ee_false_detect[ee_mode]);
788 else
789 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
790 AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
791 AR5K_INIT_CYCRSSI_THR1);
792
793 /* I/Q correction
794 * TODO: Per channel i/q infos ? */
795 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
796 AR5K_PHY_IQ_CORR_ENABLE |
797 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
798 ee->ee_q_cal[ee_mode]);
799
800 /* Heavy clipping -disable for now */
801 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
802 ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
803
804 return;
805}
806
807/*
406 * Main reset function 808 * Main reset function
407 */ 809 */
408int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 810int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
409 struct ieee80211_channel *channel, bool change_channel) 811 struct ieee80211_channel *channel, bool change_channel)
410{ 812{
411 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 813 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
412 struct pci_dev *pdev = ah->ah_sc->pdev; 814 u32 phy_tst1;
413 u32 data, s_seq, s_ant, s_led[3], dma_size; 815 u8 mode, freq, ee_mode, ant[2];
414 unsigned int i, mode, freq, ee_mode, ant[2]; 816 int i, ret;
415 int ret;
416 817
417 ATH5K_TRACE(ah->ah_sc); 818 ATH5K_TRACE(ah->ah_sc);
418 819
419 s_seq = 0;
420 s_ant = 0; 820 s_ant = 0;
421 ee_mode = 0; 821 ee_mode = 0;
822 staid1_flags = 0;
823 tsf_up = 0;
824 tsf_lo = 0;
422 freq = 0; 825 freq = 0;
423 mode = 0; 826 mode = 0;
424 827
@@ -427,48 +830,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
427 */ 830 */
428 /*DCU/Antenna selection not available on 5210*/ 831 /*DCU/Antenna selection not available on 5210*/
429 if (ah->ah_version != AR5K_AR5210) { 832 if (ah->ah_version != AR5K_AR5210) {
430 if (change_channel) {
431 /* Seq number for queue 0 -do this for all queues ? */
432 s_seq = ath5k_hw_reg_read(ah,
433 AR5K_QUEUE_DFS_SEQNUM(0));
434 /*Default antenna*/
435 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
436 }
437 }
438
439 /*GPIOs*/
440 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
441 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
442 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
443
444 if (change_channel && ah->ah_rf_banks != NULL)
445 ath5k_hw_get_rf_gain(ah);
446
447
448 /*Wakeup the device*/
449 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
450 if (ret)
451 return ret;
452
453 /*
454 * Initialize operating mode
455 */
456 ah->ah_op_mode = op_mode;
457
458 /*
459 * 5111/5112 Settings
460 * 5210 only comes with RF5110
461 */
462 if (ah->ah_version != AR5K_AR5210) {
463 if (ah->ah_radio != AR5K_RF5111 &&
464 ah->ah_radio != AR5K_RF5112 &&
465 ah->ah_radio != AR5K_RF5413 &&
466 ah->ah_radio != AR5K_RF2413 &&
467 ah->ah_radio != AR5K_RF2425) {
468 ATH5K_ERR(ah->ah_sc,
469 "invalid phy radio: %u\n", ah->ah_radio);
470 return -EINVAL;
471 }
472 833
473 switch (channel->hw_value & CHANNEL_MODES) { 834 switch (channel->hw_value & CHANNEL_MODES) {
474 case CHANNEL_A: 835 case CHANNEL_A:
@@ -491,8 +852,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
491 freq = AR5K_INI_RFGAIN_5GHZ; 852 freq = AR5K_INI_RFGAIN_5GHZ;
492 ee_mode = AR5K_EEPROM_MODE_11A; 853 ee_mode = AR5K_EEPROM_MODE_11A;
493 break; 854 break;
494 /*Is this ok on 5211 too ?*/
495 case CHANNEL_TG: 855 case CHANNEL_TG:
856 if (ah->ah_version == AR5K_AR5211) {
857 ATH5K_ERR(ah->ah_sc,
858 "TurboG mode not available on 5211");
859 return -EINVAL;
860 }
496 mode = AR5K_MODE_11G_TURBO; 861 mode = AR5K_MODE_11G_TURBO;
497 freq = AR5K_INI_RFGAIN_2GHZ; 862 freq = AR5K_INI_RFGAIN_2GHZ;
498 ee_mode = AR5K_EEPROM_MODE_11G; 863 ee_mode = AR5K_EEPROM_MODE_11G;
@@ -513,11 +878,93 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
513 return -EINVAL; 878 return -EINVAL;
514 } 879 }
515 880
516 /* PHY access enable */ 881 if (change_channel) {
517 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); 882 /*
883 * Save frame sequence count
884 * For revs. after Oahu, only save
885 * seq num for DCU 0 (Global seq num)
886 */
887 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
888
889 for (i = 0; i < 10; i++)
890 s_seq[i] = ath5k_hw_reg_read(ah,
891 AR5K_QUEUE_DCU_SEQNUM(i));
892
893 } else {
894 s_seq[0] = ath5k_hw_reg_read(ah,
895 AR5K_QUEUE_DCU_SEQNUM(0));
896 }
897
898 /* TSF accelerates on AR5211 durring reset
899 * As a workaround save it here and restore
900 * it later so that it's back in time after
901 * reset. This way it'll get re-synced on the
902 * next beacon without breaking ad-hoc.
903 *
904 * On AR5212 TSF is almost preserved across a
905 * reset so it stays back in time anyway and
906 * we don't have to save/restore it.
907 *
908 * XXX: Since this breaks power saving we have
909 * to disable power saving until we receive the
910 * next beacon, so we can resync beacon timers */
911 if (ah->ah_version == AR5K_AR5211) {
912 tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
913 tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
914 }
915 }
916
917 /* Save default antenna */
918 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
518 919
920 if (ah->ah_version == AR5K_AR5212) {
921 /* Restore normal 32/40MHz clock operation
922 * to avoid register access delay on certain
923 * PHY registers */
924 ath5k_hw_set_sleep_clock(ah, false);
925
926 /* Since we are going to write rf buffer
927 * check if we have any pending gain_F
928 * optimization settings */
929 if (change_channel && ah->ah_rf_banks != NULL)
930 ath5k_hw_gainf_calibrate(ah);
931 }
519 } 932 }
520 933
934 /*GPIOs*/
935 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) &
936 AR5K_PCICFG_LEDSTATE;
937 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
938 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
939
940 /* AR5K_STA_ID1 flags, only preserve antenna
941 * settings and ack/cts rate mode */
942 staid1_flags = ath5k_hw_reg_read(ah, AR5K_STA_ID1) &
943 (AR5K_STA_ID1_DEFAULT_ANTENNA |
944 AR5K_STA_ID1_DESC_ANTENNA |
945 AR5K_STA_ID1_RTS_DEF_ANTENNA |
946 AR5K_STA_ID1_ACKCTS_6MB |
947 AR5K_STA_ID1_BASE_RATE_11B |
948 AR5K_STA_ID1_SELFGEN_DEF_ANT);
949
950 /* Wakeup the device */
951 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
952 if (ret)
953 return ret;
954
955 /*
956 * Initialize operating mode
957 */
958 ah->ah_op_mode = op_mode;
959
960 /* PHY access enable */
961 if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
962 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
963 else
964 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ | 0x40,
965 AR5K_PHY(0));
966
967 /* Write initial settings */
521 ret = ath5k_hw_write_initvals(ah, mode, change_channel); 968 ret = ath5k_hw_write_initvals(ah, mode, change_channel);
522 if (ret) 969 if (ret)
523 return ret; 970 return ret;
@@ -526,64 +973,23 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
526 * 5211/5212 Specific 973 * 5211/5212 Specific
527 */ 974 */
528 if (ah->ah_version != AR5K_AR5210) { 975 if (ah->ah_version != AR5K_AR5210) {
976
529 /* 977 /*
530 * Write initial RF gain settings 978 * Write initial RF gain settings
531 * This should work for both 5111/5112 979 * This should work for both 5111/5112
532 */ 980 */
533 ret = ath5k_hw_rfgain(ah, freq); 981 ret = ath5k_hw_rfgain_init(ah, freq);
534 if (ret) 982 if (ret)
535 return ret; 983 return ret;
536 984
537 mdelay(1); 985 mdelay(1);
538 986
539 /* 987 /*
540 * Write some more initial register settings for revised chips 988 * Tweak initval settings for revised
989 * chipsets and add some more config
990 * bits
541 */ 991 */
542 if (ah->ah_version == AR5K_AR5212 && 992 ath5k_hw_tweak_initval_settings(ah, channel);
543 ah->ah_phy_revision > 0x41) {
544 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
545
546 if (channel->hw_value == CHANNEL_G)
547 if (ah->ah_mac_srev < AR5K_SREV_AR2413)
548 ath5k_hw_reg_write(ah, 0x00f80d80,
549 0x994c);
550 else if (ah->ah_mac_srev < AR5K_SREV_AR5424)
551 ath5k_hw_reg_write(ah, 0x00380140,
552 0x994c);
553 else if (ah->ah_mac_srev < AR5K_SREV_AR2425)
554 ath5k_hw_reg_write(ah, 0x00fc0ec0,
555 0x994c);
556 else /* 2425 */
557 ath5k_hw_reg_write(ah, 0x00fc0fc0,
558 0x994c);
559 else
560 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
561
562 /* Got this from legacy-hal */
563 AR5K_REG_DISABLE_BITS(ah, 0xa228, 0x200);
564
565 AR5K_REG_MASKED_BITS(ah, 0xa228, 0x800, 0xfffe03ff);
566
567 /* Just write 0x9b5 ? */
568 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
569 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
570 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
571 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
572 }
573
574 /* Fix for first revision of the RF5112 RF chipset */
575 if (ah->ah_radio >= AR5K_RF5112 &&
576 ah->ah_radio_5ghz_revision <
577 AR5K_SREV_RAD_5112A) {
578 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
579 AR5K_PHY_CCKTXCTL);
580 if (channel->hw_value & CHANNEL_5GHZ)
581 data = 0xffb81020;
582 else
583 data = 0xffb80d20;
584 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
585 data = 0;
586 }
587 993
588 /* 994 /*
589 * Set TX power (FIXME) 995 * Set TX power (FIXME)
@@ -601,15 +1007,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
601 ath5k_hw_write_rate_duration(ah, mode); 1007 ath5k_hw_write_rate_duration(ah, mode);
602 1008
603 /* 1009 /*
604 * Write RF registers 1010 * Write RF buffer
605 */ 1011 */
606 ret = ath5k_hw_rfregs(ah, channel, mode); 1012 ret = ath5k_hw_rfregs_init(ah, channel, mode);
607 if (ret) 1013 if (ret)
608 return ret; 1014 return ret;
609 1015
610 /*
611 * Configure additional registers
612 */
613 1016
614 /* Write OFDM timings on 5212*/ 1017 /* Write OFDM timings on 5212*/
615 if (ah->ah_version == AR5K_AR5212 && 1018 if (ah->ah_version == AR5K_AR5212 &&
@@ -631,17 +1034,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
631 } 1034 }
632 1035
633 /* 1036 /*
634 * Set channel and calibrate the PHY
635 */
636 ret = ath5k_hw_channel(ah, channel);
637 if (ret)
638 return ret;
639
640 /* Set antenna mode */
641 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
642 ah->ah_antenna[ee_mode][0], 0xfffffc06);
643
644 /*
645 * In case a fixed antenna was set as default 1037 * In case a fixed antenna was set as default
646 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE 1038 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
647 * registers. 1039 * registers.
@@ -656,54 +1048,16 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
656 ant[1] = AR5K_ANT_FIXED_B; 1048 ant[1] = AR5K_ANT_FIXED_B;
657 } 1049 }
658 1050
659 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
660 AR5K_PHY_ANT_SWITCH_TABLE_0);
661 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
662 AR5K_PHY_ANT_SWITCH_TABLE_1);
663
664 /* Commit values from EEPROM */ 1051 /* Commit values from EEPROM */
665 if (ah->ah_radio == AR5K_RF5111) 1052 ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode);
666 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
667 AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
668
669 ath5k_hw_reg_write(ah,
670 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
671 AR5K_PHY_NFTHRES);
672
673 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
674 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
675 0xffffc07f);
676 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
677 (ee->ee_atn_tx_rx[ee_mode] << 12) & 0x3f000,
678 0xfffc0fff);
679 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
680 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
681 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
682 0xffff0000);
683
684 ath5k_hw_reg_write(ah,
685 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
686 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
687 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
688 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
689
690 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
691 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
692 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
693 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
694 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
695
696 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
697 AR5K_PHY_IQ_CORR_ENABLE |
698 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
699 ee->ee_q_cal[ee_mode]);
700
701 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
702 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
703 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
704 ee->ee_margin_tx_rx[ee_mode]);
705 1053
706 } else { 1054 } else {
1055 /*
1056 * For 5210 we do all initialization using
1057 * initvals, so we don't have to modify
1058 * any settings (5210 also only supports
1059 * a/aturbo modes)
1060 */
707 mdelay(1); 1061 mdelay(1);
708 /* Disable phy and wait */ 1062 /* Disable phy and wait */
709 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); 1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
@@ -713,100 +1067,154 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
713 /* 1067 /*
714 * Restore saved values 1068 * Restore saved values
715 */ 1069 */
1070
716 /*DCU/Antenna selection not available on 5210*/ 1071 /*DCU/Antenna selection not available on 5210*/
717 if (ah->ah_version != AR5K_AR5210) { 1072 if (ah->ah_version != AR5K_AR5210) {
718 ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0)); 1073
1074 if (change_channel) {
1075 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
1076 for (i = 0; i < 10; i++)
1077 ath5k_hw_reg_write(ah, s_seq[i],
1078 AR5K_QUEUE_DCU_SEQNUM(i));
1079 } else {
1080 ath5k_hw_reg_write(ah, s_seq[0],
1081 AR5K_QUEUE_DCU_SEQNUM(0));
1082 }
1083
1084
1085 if (ah->ah_version == AR5K_AR5211) {
1086 ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
1087 ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
1088 }
1089 }
1090
719 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA); 1091 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
720 } 1092 }
1093
1094 /* Ledstate */
721 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]); 1095 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
1096
1097 /* Gpio settings */
722 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR); 1098 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
723 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO); 1099 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
724 1100
1101 /* Restore sta_id flags and preserve our mac address*/
1102 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_sta_id),
1103 AR5K_STA_ID0);
1104 ath5k_hw_reg_write(ah, staid1_flags | AR5K_HIGH_ID(ah->ah_sta_id),
1105 AR5K_STA_ID1);
1106
1107
725 /* 1108 /*
726 * Misc 1109 * Configure PCU
727 */ 1110 */
1111
1112 /* Restore bssid and bssid mask */
728 /* XXX: add ah->aid once mac80211 gives this to us */ 1113 /* XXX: add ah->aid once mac80211 gives this to us */
729 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 1114 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
730 1115
1116 /* Set PCU config */
731 ath5k_hw_set_opmode(ah); 1117 ath5k_hw_set_opmode(ah);
732 /*PISR/SISR Not available on 5210*/ 1118
733 if (ah->ah_version != AR5K_AR5210) { 1119 /* Clear any pending interrupts
1120 * PISR/SISR Not available on 5210 */
1121 if (ah->ah_version != AR5K_AR5210)
734 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); 1122 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
735 /* If we later allow tuning for this, store into sc structure */ 1123
736 data = AR5K_TUNE_RSSI_THRES | 1124 /* Set RSSI/BRSSI thresholds
737 AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S; 1125 *
738 ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR); 1126 * Note: If we decide to set this value
1127 * dynamicaly, have in mind that when AR5K_RSSI_THR
1128 * register is read it might return 0x40 if we haven't
1129 * wrote anything to it plus BMISS RSSI threshold is zeroed.
1130 * So doing a save/restore procedure here isn't the right
1131 * choice. Instead store it on ath5k_hw */
1132 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
1133 AR5K_TUNE_BMISS_THRES <<
1134 AR5K_RSSI_THR_BMISS_S),
1135 AR5K_RSSI_THR);
1136
1137 /* MIC QoS support */
1138 if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
1139 ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
1140 ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
739 } 1141 }
740 1142
1143 /* QoS NOACK Policy */
1144 if (ah->ah_version == AR5K_AR5212) {
1145 ath5k_hw_reg_write(ah,
1146 AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
1147 AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
1148 AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
1149 AR5K_QOS_NOACK);
1150 }
1151
1152
741 /* 1153 /*
742 * Set Rx/Tx DMA Configuration 1154 * Configure PHY
743 *
744 * Set maximum DMA size (512) except for PCI-E cards since
745 * it causes rx overruns and tx errors (tested on 5424 but since
746 * rx overruns also occur on 5416/5418 with madwifi we set 128
747 * for all PCI-E cards to be safe).
748 *
749 * In dumps this is 128 for allchips.
750 *
751 * XXX: need to check 5210 for this
752 * TODO: Check out tx triger level, it's always 64 on dumps but I
753 * guess we can tweak it and see how it goes ;-)
754 */ 1155 */
755 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B; 1156
756 if (ah->ah_version != AR5K_AR5210) { 1157 /* Set channel on PHY */
757 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 1158 ret = ath5k_hw_channel(ah, channel);
758 AR5K_TXCFG_SDMAMR, dma_size); 1159 if (ret)
759 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, 1160 return ret;
760 AR5K_RXCFG_SDMAMW, dma_size);
761 }
762 1161
763 /* 1162 /*
764 * Enable the PHY and wait until completion 1163 * Enable the PHY and wait until completion
1164 * This includes BaseBand and Synthesizer
1165 * activation.
765 */ 1166 */
766 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); 1167 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
767 1168
768 /* 1169 /*
769 * On 5211+ read activation -> rx delay 1170 * On 5211+ read activation -> rx delay
770 * and use it. 1171 * and use it.
1172 *
1173 * TODO: Half/quarter rate support
771 */ 1174 */
772 if (ah->ah_version != AR5K_AR5210) { 1175 if (ah->ah_version != AR5K_AR5210) {
773 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & 1176 u32 delay;
1177 delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
774 AR5K_PHY_RX_DELAY_M; 1178 AR5K_PHY_RX_DELAY_M;
775 data = (channel->hw_value & CHANNEL_CCK) ? 1179 delay = (channel->hw_value & CHANNEL_CCK) ?
776 ((data << 2) / 22) : (data / 10); 1180 ((delay << 2) / 22) : (delay / 10);
777 1181
778 udelay(100 + (2 * data)); 1182 udelay(100 + (2 * delay));
779 data = 0;
780 } else { 1183 } else {
781 mdelay(1); 1184 mdelay(1);
782 } 1185 }
783 1186
784 /* 1187 /*
785 * Perform ADC test (?) 1188 * Perform ADC test to see if baseband is ready
1189 * Set tx hold and check adc test register
786 */ 1190 */
787 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); 1191 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
788 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); 1192 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
789 for (i = 0; i <= 20; i++) { 1193 for (i = 0; i <= 20; i++) {
790 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) 1194 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
791 break; 1195 break;
792 udelay(200); 1196 udelay(200);
793 } 1197 }
794 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1); 1198 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
795 data = 0;
796 1199
797 /* 1200 /*
798 * Start automatic gain calibration 1201 * Start automatic gain control calibration
799 * 1202 *
800 * During AGC calibration RX path is re-routed to 1203 * During AGC calibration RX path is re-routed to
801 * a signal detector so we don't receive anything. 1204 * a power detector so we don't receive anything.
802 * 1205 *
803 * This method is used to calibrate some static offsets 1206 * This method is used to calibrate some static offsets
804 * used together with on-the fly I/Q calibration (the 1207 * used together with on-the fly I/Q calibration (the
805 * one performed via ath5k_hw_phy_calibrate), that doesn't 1208 * one performed via ath5k_hw_phy_calibrate), that doesn't
806 * interrupt rx path. 1209 * interrupt rx path.
807 * 1210 *
1211 * While rx path is re-routed to the power detector we also
1212 * start a noise floor calibration, to measure the
1213 * card's noise floor (the noise we measure when we are not
1214 * transmiting or receiving anything).
1215 *
808 * If we are in a noisy environment AGC calibration may time 1216 * If we are in a noisy environment AGC calibration may time
809 * out. 1217 * out and/or noise floor calibration might timeout.
810 */ 1218 */
811 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1219 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
812 AR5K_PHY_AGCCTL_CAL); 1220 AR5K_PHY_AGCCTL_CAL);
@@ -828,30 +1236,37 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
828 AR5K_PHY_AGCCTL_CAL, 0, false)) { 1236 AR5K_PHY_AGCCTL_CAL, 0, false)) {
829 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n", 1237 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
830 channel->center_freq); 1238 channel->center_freq);
831 return -EAGAIN;
832 } 1239 }
833 1240
834 /* 1241 /*
835 * Start noise floor calibration
836 *
837 * If we run NF calibration before AGC, it always times out. 1242 * If we run NF calibration before AGC, it always times out.
838 * Binary HAL starts NF and AGC calibration at the same time 1243 * Binary HAL starts NF and AGC calibration at the same time
839 * and only waits for AGC to finish. I believe that's wrong because 1244 * and only waits for AGC to finish. Also if AGC or NF cal.
840 * during NF calibration, rx path is also routed to a detector, so if 1245 * times out, reset doesn't fail on binary HAL. I believe
841 * it doesn't finish we won't have RX. 1246 * that's wrong because since rx path is routed to a detector,
842 * 1247 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
843 * XXX: Find an interval that's OK for all cards... 1248 * enables noise floor calibration after offset calibration and if noise
1249 * floor calibration fails, reset fails. I believe that's
1250 * a better approach, we just need to find a polling interval
1251 * that suits best, even if reset continues we need to make
1252 * sure that rx path is ready.
844 */ 1253 */
845 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1254 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
846 1255
1256
1257 /*
1258 * Configure QCUs/DCUs
1259 */
1260
1261 /* TODO: HW Compression support for data queues */
1262 /* TODO: Burst prefetch for data queues */
1263
847 /* 1264 /*
848 * Reset queues and start beacon timers at the end of the reset routine 1265 * Reset queues and start beacon timers at the end of the reset routine
1266 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
1267 * Note: If we want we can assign multiple qcus on one dcu.
849 */ 1268 */
850 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) { 1269 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
851 /*No QCU on 5210*/
852 if (ah->ah_version != AR5K_AR5210)
853 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
854
855 ret = ath5k_hw_reset_tx_queue(ah, i); 1270 ret = ath5k_hw_reset_tx_queue(ah, i);
856 if (ret) { 1271 if (ret) {
857 ATH5K_ERR(ah->ah_sc, 1272 ATH5K_ERR(ah->ah_sc,
@@ -860,14 +1275,40 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
860 } 1275 }
861 } 1276 }
862 1277
1278
1279 /*
1280 * Configure DMA/Interrupts
1281 */
1282
1283 /*
1284 * Set Rx/Tx DMA Configuration
1285 *
1286 * Set standard DMA size (128). Note that
1287 * a DMA size of 512 causes rx overruns and tx errors
1288 * on pci-e cards (tested on 5424 but since rx overruns
1289 * also occur on 5416/5418 with madwifi we set 128
1290 * for all PCI-E cards to be safe).
1291 *
1292 * XXX: need to check 5210 for this
1293 * TODO: Check out tx triger level, it's always 64 on dumps but I
1294 * guess we can tweak it and see how it goes ;-)
1295 */
1296 if (ah->ah_version != AR5K_AR5210) {
1297 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1298 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
1299 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
1300 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
1301 }
1302
863 /* Pre-enable interrupts on 5211/5212*/ 1303 /* Pre-enable interrupts on 5211/5212*/
864 if (ah->ah_version != AR5K_AR5210) 1304 if (ah->ah_version != AR5K_AR5210)
865 ath5k_hw_set_imr(ah, ah->ah_imr); 1305 ath5k_hw_set_imr(ah, ah->ah_imr);
866 1306
867 /* 1307 /*
868 * Set RF kill flags if supported by the device (read from the EEPROM) 1308 * Setup RFKill interrupt if rfkill flag is set on eeprom.
869 * Disable gpio_intr for now since it results system hang. 1309 * TODO: Use gpio pin and polarity infos from eeprom
870 * TODO: Handle this in ath5k_intr 1310 * TODO: Handle this in ath5k_intr because it'll result
1311 * a nasty interrupt storm.
871 */ 1312 */
872#if 0 1313#if 0
873 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) { 1314 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
@@ -880,33 +1321,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
880 } 1321 }
881#endif 1322#endif
882 1323
883 /* 1324 /* Enable 32KHz clock function for AR5212+ chips
884 * Set the 32MHz reference clock on 5212 phy clock sleep register 1325 * Set clocks to 32KHz operation and use an
885 * 1326 * external 32KHz crystal when sleeping if one
886 * TODO: Find out how to switch to external 32Khz clock to save power 1327 * exists */
887 */ 1328 if (ah->ah_version == AR5K_AR5212)
888 if (ah->ah_version == AR5K_AR5212) { 1329 ath5k_hw_set_sleep_clock(ah, true);
889 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
890 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
891 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
892 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
893 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
894 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
895
896 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
897 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
898 0x00000f80 : 0x00001380 ;
899 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
900 data = 0;
901 }
902
903 if (ah->ah_version == AR5K_AR5212) {
904 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
905 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
906 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
907 if (ah->ah_mac_srev >= AR5K_SREV_AR2413)
908 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
909 }
910 1330
911 /* 1331 /*
912 * Disable beacons and reset the register 1332 * Disable beacons and reset the register
diff --git a/drivers/net/wireless/ath5k/rfbuffer.h b/drivers/net/wireless/ath5k/rfbuffer.h
new file mode 100644
index 00000000000..e50baff6617
--- /dev/null
+++ b/drivers/net/wireless/ath5k/rfbuffer.h
@@ -0,0 +1,1181 @@
1/*
2 * RF Buffer handling functions
3 *
4 * Copyright (c) 2009 Nick Kossifidis <mickflemm@gmail.com>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 */
19
20
21/*
22 * There are some special registers on the RF chip
23 * that control various operation settings related mostly to
24 * the analog parts (channel, gain adjustment etc).
25 *
26 * We don't write on those registers directly but
27 * we send a data packet on the chip, using a special register,
28 * that holds all the settings we need. After we 've sent the
29 * data packet, we write on another special register to notify hw
30 * to apply the settings. This is done so that control registers
31 * can be dynamicaly programmed during operation and the settings
32 * are applied faster on the hw.
33 *
34 * We call each data packet an "RF Bank" and all the data we write
35 * (all RF Banks) "RF Buffer". This file holds initial RF Buffer
36 * data for the different RF chips, and various info to match RF
37 * Buffer offsets with specific RF registers so that we can access
38 * them. We tweak these settings on rfregs_init function.
39 *
40 * Also check out reg.h and U.S. Patent 6677779 B1 (about buffer
41 * registers and control registers):
42 *
43 * http://www.google.com/patents?id=qNURAAAAEBAJ
44 */
45
46
47/*
48 * Struct to hold default mode specific RF
49 * register values (RF Banks)
50 */
51struct ath5k_ini_rfbuffer {
52 u8 rfb_bank; /* RF Bank number */
53 u16 rfb_ctrl_register; /* RF Buffer control register */
54 u32 rfb_mode_data[5]; /* RF Buffer data for each mode */
55};
56
57/*
58 * Struct to hold RF Buffer field
59 * infos used to access certain RF
60 * analog registers
61 */
62struct ath5k_rfb_field {
63 u8 len; /* Field length */
64 u16 pos; /* Offset on the raw packet */
65 u8 col; /* Column -used for shifting */
66};
67
68/*
69 * RF analog register definition
70 */
71struct ath5k_rf_reg {
72 u8 bank; /* RF Buffer Bank number */
73 u8 index; /* Register's index on rf_regs_idx */
74 struct ath5k_rfb_field field; /* RF Buffer field for this register */
75};
76
77/* Map RF registers to indexes
78 * We do this to handle common bits and make our
79 * life easier by using an index for each register
80 * instead of a full rfb_field */
81enum ath5k_rf_regs_idx {
82 /* BANK 6 */
83 AR5K_RF_OB_2GHZ = 0,
84 AR5K_RF_OB_5GHZ,
85 AR5K_RF_DB_2GHZ,
86 AR5K_RF_DB_5GHZ,
87 AR5K_RF_FIXED_BIAS_A,
88 AR5K_RF_FIXED_BIAS_B,
89 AR5K_RF_PWD_XPD,
90 AR5K_RF_XPD_SEL,
91 AR5K_RF_XPD_GAIN,
92 AR5K_RF_PD_GAIN_LO,
93 AR5K_RF_PD_GAIN_HI,
94 AR5K_RF_HIGH_VC_CP,
95 AR5K_RF_MID_VC_CP,
96 AR5K_RF_LOW_VC_CP,
97 AR5K_RF_PUSH_UP,
98 AR5K_RF_PAD2GND,
99 AR5K_RF_XB2_LVL,
100 AR5K_RF_XB5_LVL,
101 AR5K_RF_PWD_ICLOBUF_2G,
102 AR5K_RF_PWD_84,
103 AR5K_RF_PWD_90,
104 AR5K_RF_PWD_130,
105 AR5K_RF_PWD_131,
106 AR5K_RF_PWD_132,
107 AR5K_RF_PWD_136,
108 AR5K_RF_PWD_137,
109 AR5K_RF_PWD_138,
110 AR5K_RF_PWD_166,
111 AR5K_RF_PWD_167,
112 AR5K_RF_DERBY_CHAN_SEL_MODE,
113 /* BANK 7 */
114 AR5K_RF_GAIN_I,
115 AR5K_RF_PLO_SEL,
116 AR5K_RF_RFGAIN_SEL,
117 AR5K_RF_RFGAIN_STEP,
118 AR5K_RF_WAIT_S,
119 AR5K_RF_WAIT_I,
120 AR5K_RF_MAX_TIME,
121 AR5K_RF_MIXVGA_OVR,
122 AR5K_RF_MIXGAIN_OVR,
123 AR5K_RF_MIXGAIN_STEP,
124 AR5K_RF_PD_DELAY_A,
125 AR5K_RF_PD_DELAY_B,
126 AR5K_RF_PD_DELAY_XR,
127 AR5K_RF_PD_PERIOD_A,
128 AR5K_RF_PD_PERIOD_B,
129 AR5K_RF_PD_PERIOD_XR,
130};
131
132
133/*******************\
134* RF5111 (Sombrero) *
135\*******************/
136
137/* BANK 6 len pos col */
138#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 }
139#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 }
140
141#define AR5K_RF5111_OB_5GHZ { 3, 104, 0 }
142#define AR5K_RF5111_DB_5GHZ { 3, 107, 0 }
143
144#define AR5K_RF5111_PWD_XPD { 1, 95, 0 }
145#define AR5K_RF5111_XPD_GAIN { 4, 96, 0 }
146
147/* Access to PWD registers */
148#define AR5K_RF5111_PWD(_n) { 1, (135 - _n), 3 }
149
150/* BANK 7 len pos col */
151#define AR5K_RF5111_GAIN_I { 6, 29, 0 }
152#define AR5K_RF5111_PLO_SEL { 1, 4, 0 }
153#define AR5K_RF5111_RFGAIN_SEL { 1, 36, 0 }
154#define AR5K_RF5111_RFGAIN_STEP { 6, 37, 0 }
155/* Only on AR5212 BaseBand and up */
156#define AR5K_RF5111_WAIT_S { 5, 19, 0 }
157#define AR5K_RF5111_WAIT_I { 5, 24, 0 }
158#define AR5K_RF5111_MAX_TIME { 2, 49, 0 }
159
160static const struct ath5k_rf_reg rf_regs_5111[] = {
161 {6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ},
162 {6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ},
163 {6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ},
164 {6, AR5K_RF_DB_5GHZ, AR5K_RF5111_DB_5GHZ},
165 {6, AR5K_RF_PWD_XPD, AR5K_RF5111_PWD_XPD},
166 {6, AR5K_RF_XPD_GAIN, AR5K_RF5111_XPD_GAIN},
167 {6, AR5K_RF_PWD_84, AR5K_RF5111_PWD(84)},
168 {6, AR5K_RF_PWD_90, AR5K_RF5111_PWD(90)},
169 {7, AR5K_RF_GAIN_I, AR5K_RF5111_GAIN_I},
170 {7, AR5K_RF_PLO_SEL, AR5K_RF5111_PLO_SEL},
171 {7, AR5K_RF_RFGAIN_SEL, AR5K_RF5111_RFGAIN_SEL},
172 {7, AR5K_RF_RFGAIN_STEP, AR5K_RF5111_RFGAIN_STEP},
173 {7, AR5K_RF_WAIT_S, AR5K_RF5111_WAIT_S},
174 {7, AR5K_RF_WAIT_I, AR5K_RF5111_WAIT_I},
175 {7, AR5K_RF_MAX_TIME, AR5K_RF5111_MAX_TIME}
176};
177
178/* Default mode specific settings */
179static const struct ath5k_ini_rfbuffer rfb_5111[] = {
180 { 0, 0x989c,
181 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
182 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
183 { 0, 0x989c,
184 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
185 { 0, 0x989c,
186 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
187 { 0, 0x989c,
188 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
189 { 0, 0x989c,
190 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
191 { 0, 0x989c,
192 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
193 { 0, 0x989c,
194 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
195 { 0, 0x989c,
196 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
197 { 0, 0x989c,
198 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
199 { 0, 0x989c,
200 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
201 { 0, 0x989c,
202 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
203 { 0, 0x989c,
204 { 0x00380000, 0x00380000, 0x00380000, 0x00380000, 0x00380000 } },
205 { 0, 0x989c,
206 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
207 { 0, 0x989c,
208 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
209 { 0, 0x989c,
210 { 0x00000000, 0x00000000, 0x000000c0, 0x00000080, 0x00000080 } },
211 { 0, 0x989c,
212 { 0x000400f9, 0x000400f9, 0x000400ff, 0x000400fd, 0x000400fd } },
213 { 0, 0x98d4,
214 { 0x00000000, 0x00000000, 0x00000004, 0x00000004, 0x00000004 } },
215 { 1, 0x98d4,
216 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
217 { 2, 0x98d4,
218 { 0x00000010, 0x00000014, 0x00000010, 0x00000010, 0x00000014 } },
219 { 3, 0x98d8,
220 { 0x00601068, 0x00601068, 0x00601068, 0x00601068, 0x00601068 } },
221 { 6, 0x989c,
222 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
223 { 6, 0x989c,
224 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
225 { 6, 0x989c,
226 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
227 { 6, 0x989c,
228 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
229 { 6, 0x989c,
230 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
231 { 6, 0x989c,
232 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
233 { 6, 0x989c,
234 { 0x04000000, 0x04000000, 0x04000000, 0x04000000, 0x04000000 } },
235 { 6, 0x989c,
236 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
237 { 6, 0x989c,
238 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
239 { 6, 0x989c,
240 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
241 { 6, 0x989c,
242 { 0x00000000, 0x00000000, 0x0a000000, 0x00000000, 0x00000000 } },
243 { 6, 0x989c,
244 { 0x003800c0, 0x00380080, 0x023800c0, 0x003800c0, 0x003800c0 } },
245 { 6, 0x989c,
246 { 0x00020006, 0x00020006, 0x00000006, 0x00020006, 0x00020006 } },
247 { 6, 0x989c,
248 { 0x00000089, 0x00000089, 0x00000089, 0x00000089, 0x00000089 } },
249 { 6, 0x989c,
250 { 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0 } },
251 { 6, 0x989c,
252 { 0x00040007, 0x00040007, 0x00040007, 0x00040007, 0x00040007 } },
253 { 6, 0x98d4,
254 { 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a } },
255 { 7, 0x989c,
256 { 0x00000040, 0x00000048, 0x00000040, 0x00000040, 0x00000040 } },
257 { 7, 0x989c,
258 { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } },
259 { 7, 0x989c,
260 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
261 { 7, 0x989c,
262 { 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f } },
263 { 7, 0x989c,
264 { 0x000000f1, 0x000000f1, 0x00000061, 0x000000f1, 0x000000f1 } },
265 { 7, 0x989c,
266 { 0x0000904f, 0x0000904f, 0x0000904c, 0x0000904f, 0x0000904f } },
267 { 7, 0x989c,
268 { 0x0000125a, 0x0000125a, 0x0000129a, 0x0000125a, 0x0000125a } },
269 { 7, 0x98cc,
270 { 0x0000000e, 0x0000000e, 0x0000000f, 0x0000000e, 0x0000000e } },
271};
272
273
274
275/***********************\
276* RF5112/RF2112 (Derby) *
277\***********************/
278
279/* BANK 7 (Common) len pos col */
280#define AR5K_RF5112X_GAIN_I { 6, 14, 0 }
281#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 }
282#define AR5K_RF5112X_MIXGAIN_OVR { 2, 37, 0 }
283#define AR5K_RF5112X_MIXGAIN_STEP { 4, 32, 0 }
284#define AR5K_RF5112X_PD_DELAY_A { 4, 58, 0 }
285#define AR5K_RF5112X_PD_DELAY_B { 4, 62, 0 }
286#define AR5K_RF5112X_PD_DELAY_XR { 4, 66, 0 }
287#define AR5K_RF5112X_PD_PERIOD_A { 4, 70, 0 }
288#define AR5K_RF5112X_PD_PERIOD_B { 4, 74, 0 }
289#define AR5K_RF5112X_PD_PERIOD_XR { 4, 78, 0 }
290
291/* RFX112 (Derby 1) */
292
293/* BANK 6 len pos col */
294#define AR5K_RF5112_OB_2GHZ { 3, 269, 0 }
295#define AR5K_RF5112_DB_2GHZ { 3, 272, 0 }
296
297#define AR5K_RF5112_OB_5GHZ { 3, 261, 0 }
298#define AR5K_RF5112_DB_5GHZ { 3, 264, 0 }
299
300#define AR5K_RF5112_FIXED_BIAS_A { 1, 260, 0 }
301#define AR5K_RF5112_FIXED_BIAS_B { 1, 259, 0 }
302
303#define AR5K_RF5112_XPD_SEL { 1, 284, 0 }
304#define AR5K_RF5112_XPD_GAIN { 2, 252, 0 }
305
306/* Access to PWD registers */
307#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 }
308
309static const struct ath5k_rf_reg rf_regs_5112[] = {
310 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ},
311 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ},
312 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ},
313 {6, AR5K_RF_DB_5GHZ, AR5K_RF5112_DB_5GHZ},
314 {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112_FIXED_BIAS_A},
315 {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112_FIXED_BIAS_B},
316 {6, AR5K_RF_XPD_SEL, AR5K_RF5112_XPD_SEL},
317 {6, AR5K_RF_XPD_GAIN, AR5K_RF5112_XPD_GAIN},
318 {6, AR5K_RF_PWD_130, AR5K_RF5112_PWD(130)},
319 {6, AR5K_RF_PWD_131, AR5K_RF5112_PWD(131)},
320 {6, AR5K_RF_PWD_132, AR5K_RF5112_PWD(132)},
321 {6, AR5K_RF_PWD_136, AR5K_RF5112_PWD(136)},
322 {6, AR5K_RF_PWD_137, AR5K_RF5112_PWD(137)},
323 {6, AR5K_RF_PWD_138, AR5K_RF5112_PWD(138)},
324 {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
325 {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
326 {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
327 {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
328 {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
329 {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
330 {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
331 {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
332 {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
333 {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
334};
335
336/* Default mode specific settings */
337static const struct ath5k_ini_rfbuffer rfb_5112[] = {
338 { 1, 0x98d4,
339 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
340 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
341 { 2, 0x98d0,
342 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
343 { 3, 0x98dc,
344 { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
345 { 6, 0x989c,
346 { 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000 } },
347 { 6, 0x989c,
348 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
349 { 6, 0x989c,
350 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
351 { 6, 0x989c,
352 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
353 { 6, 0x989c,
354 { 0x00660000, 0x00660000, 0x00660000, 0x00660000, 0x00660000 } },
355 { 6, 0x989c,
356 { 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000 } },
357 { 6, 0x989c,
358 { 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000 } },
359 { 6, 0x989c,
360 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
361 { 6, 0x989c,
362 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
363 { 6, 0x989c,
364 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
365 { 6, 0x989c,
366 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
367 { 6, 0x989c,
368 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
369 { 6, 0x989c,
370 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
371 { 6, 0x989c,
372 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
373 { 6, 0x989c,
374 { 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000 } },
375 { 6, 0x989c,
376 { 0x00600000, 0x00600000, 0x00600000, 0x00600000, 0x00600000 } },
377 { 6, 0x989c,
378 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
379 { 6, 0x989c,
380 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
381 { 6, 0x989c,
382 { 0x00640000, 0x00640000, 0x00640000, 0x00640000, 0x00640000 } },
383 { 6, 0x989c,
384 { 0x00200000, 0x00200000, 0x00200000, 0x00200000, 0x00200000 } },
385 { 6, 0x989c,
386 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
387 { 6, 0x989c,
388 { 0x00250000, 0x00250000, 0x00250000, 0x00250000, 0x00250000 } },
389 { 6, 0x989c,
390 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
391 { 6, 0x989c,
392 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
393 { 6, 0x989c,
394 { 0x00510000, 0x00510000, 0x00510000, 0x00510000, 0x00510000 } },
395 { 6, 0x989c,
396 { 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000 } },
397 { 6, 0x989c,
398 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
399 { 6, 0x989c,
400 { 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000 } },
401 { 6, 0x989c,
402 { 0x00400000, 0x00400000, 0x00400000, 0x00400000, 0x00400000 } },
403 { 6, 0x989c,
404 { 0x03090000, 0x03090000, 0x03090000, 0x03090000, 0x03090000 } },
405 { 6, 0x989c,
406 { 0x06000000, 0x06000000, 0x06000000, 0x06000000, 0x06000000 } },
407 { 6, 0x989c,
408 { 0x000000b0, 0x000000b0, 0x000000a8, 0x000000a8, 0x000000a8 } },
409 { 6, 0x989c,
410 { 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e } },
411 { 6, 0x989c,
412 { 0x006c4a41, 0x006c4a41, 0x006c4af1, 0x006c4a61, 0x006c4a61 } },
413 { 6, 0x989c,
414 { 0x0050892a, 0x0050892a, 0x0050892b, 0x0050892b, 0x0050892b } },
415 { 6, 0x989c,
416 { 0x00842400, 0x00842400, 0x00842400, 0x00842400, 0x00842400 } },
417 { 6, 0x989c,
418 { 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200 } },
419 { 6, 0x98d0,
420 { 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c } },
421 { 7, 0x989c,
422 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
423 { 7, 0x989c,
424 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
425 { 7, 0x989c,
426 { 0x0000000a, 0x0000000a, 0x00000012, 0x00000012, 0x00000012 } },
427 { 7, 0x989c,
428 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
429 { 7, 0x989c,
430 { 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1 } },
431 { 7, 0x989c,
432 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
433 { 7, 0x989c,
434 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
435 { 7, 0x989c,
436 { 0x00000022, 0x00000022, 0x00000022, 0x00000022, 0x00000022 } },
437 { 7, 0x989c,
438 { 0x00000092, 0x00000092, 0x00000092, 0x00000092, 0x00000092 } },
439 { 7, 0x989c,
440 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
441 { 7, 0x989c,
442 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
443 { 7, 0x989c,
444 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
445 { 7, 0x98c4,
446 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
447};
448
449/* RFX112A (Derby 2) */
450
451/* BANK 6 len pos col */
452#define AR5K_RF5112A_OB_2GHZ { 3, 287, 0 }
453#define AR5K_RF5112A_DB_2GHZ { 3, 290, 0 }
454
455#define AR5K_RF5112A_OB_5GHZ { 3, 279, 0 }
456#define AR5K_RF5112A_DB_5GHZ { 3, 282, 0 }
457
458#define AR5K_RF5112A_FIXED_BIAS_A { 1, 278, 0 }
459#define AR5K_RF5112A_FIXED_BIAS_B { 1, 277, 0 }
460
461#define AR5K_RF5112A_XPD_SEL { 1, 302, 0 }
462#define AR5K_RF5112A_PDGAINLO { 2, 270, 0 }
463#define AR5K_RF5112A_PDGAINHI { 2, 257, 0 }
464
465/* Access to PWD registers */
466#define AR5K_RF5112A_PWD(_n) { 1, (306 - _n), 3 }
467
468/* Voltage regulators */
469#define AR5K_RF5112A_HIGH_VC_CP { 2, 90, 2 }
470#define AR5K_RF5112A_MID_VC_CP { 2, 92, 2 }
471#define AR5K_RF5112A_LOW_VC_CP { 2, 94, 2 }
472#define AR5K_RF5112A_PUSH_UP { 1, 254, 2 }
473
474/* Power consumption */
475#define AR5K_RF5112A_PAD2GND { 1, 281, 1 }
476#define AR5K_RF5112A_XB2_LVL { 2, 1, 3 }
477#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 }
478
479static const struct ath5k_rf_reg rf_regs_5112a[] = {
480 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ},
481 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ},
482 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ},
483 {6, AR5K_RF_DB_5GHZ, AR5K_RF5112A_DB_5GHZ},
484 {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112A_FIXED_BIAS_A},
485 {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112A_FIXED_BIAS_B},
486 {6, AR5K_RF_XPD_SEL, AR5K_RF5112A_XPD_SEL},
487 {6, AR5K_RF_PD_GAIN_LO, AR5K_RF5112A_PDGAINLO},
488 {6, AR5K_RF_PD_GAIN_HI, AR5K_RF5112A_PDGAINHI},
489 {6, AR5K_RF_PWD_130, AR5K_RF5112A_PWD(130)},
490 {6, AR5K_RF_PWD_131, AR5K_RF5112A_PWD(131)},
491 {6, AR5K_RF_PWD_132, AR5K_RF5112A_PWD(132)},
492 {6, AR5K_RF_PWD_136, AR5K_RF5112A_PWD(136)},
493 {6, AR5K_RF_PWD_137, AR5K_RF5112A_PWD(137)},
494 {6, AR5K_RF_PWD_138, AR5K_RF5112A_PWD(138)},
495 {6, AR5K_RF_PWD_166, AR5K_RF5112A_PWD(166)},
496 {6, AR5K_RF_PWD_167, AR5K_RF5112A_PWD(167)},
497 {6, AR5K_RF_HIGH_VC_CP, AR5K_RF5112A_HIGH_VC_CP},
498 {6, AR5K_RF_MID_VC_CP, AR5K_RF5112A_MID_VC_CP},
499 {6, AR5K_RF_LOW_VC_CP, AR5K_RF5112A_LOW_VC_CP},
500 {6, AR5K_RF_PUSH_UP, AR5K_RF5112A_PUSH_UP},
501 {6, AR5K_RF_PAD2GND, AR5K_RF5112A_PAD2GND},
502 {6, AR5K_RF_XB2_LVL, AR5K_RF5112A_XB2_LVL},
503 {6, AR5K_RF_XB5_LVL, AR5K_RF5112A_XB5_LVL},
504 {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
505 {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
506 {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
507 {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
508 {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
509 {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
510 {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
511 {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
512 {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
513 {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
514};
515
516/* Default mode specific settings */
517static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
518 { 1, 0x98d4,
519 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
520 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
521 { 2, 0x98d0,
522 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
523 { 3, 0x98dc,
524 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
525 { 6, 0x989c,
526 { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } },
527 { 6, 0x989c,
528 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
529 { 6, 0x989c,
530 { 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000 } },
531 { 6, 0x989c,
532 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
533 { 6, 0x989c,
534 { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
535 { 6, 0x989c,
536 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
537 { 6, 0x989c,
538 { 0x00180000, 0x00180000, 0x00180000, 0x00180000, 0x00180000 } },
539 { 6, 0x989c,
540 { 0x00600000, 0x00600000, 0x006e0000, 0x006e0000, 0x006e0000 } },
541 { 6, 0x989c,
542 { 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000 } },
543 { 6, 0x989c,
544 { 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000 } },
545 { 6, 0x989c,
546 { 0x04480000, 0x04480000, 0x04480000, 0x04480000, 0x04480000 } },
547 { 6, 0x989c,
548 { 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000 } },
549 { 6, 0x989c,
550 { 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000 } },
551 { 6, 0x989c,
552 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
553 { 6, 0x989c,
554 { 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
555 { 6, 0x989c,
556 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
557 { 6, 0x989c,
558 { 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000 } },
559 { 6, 0x989c,
560 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
561 { 6, 0x989c,
562 { 0x02190000, 0x02190000, 0x02190000, 0x02190000, 0x02190000 } },
563 { 6, 0x989c,
564 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
565 { 6, 0x989c,
566 { 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000 } },
567 { 6, 0x989c,
568 { 0x00990000, 0x00990000, 0x00990000, 0x00990000, 0x00990000 } },
569 { 6, 0x989c,
570 { 0x00500000, 0x00500000, 0x00500000, 0x00500000, 0x00500000 } },
571 { 6, 0x989c,
572 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
573 { 6, 0x989c,
574 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
575 { 6, 0x989c,
576 { 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000 } },
577 { 6, 0x989c,
578 { 0x01740000, 0x01740000, 0x01740000, 0x01740000, 0x01740000 } },
579 { 6, 0x989c,
580 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
581 { 6, 0x989c,
582 { 0x86280000, 0x86280000, 0x86280000, 0x86280000, 0x86280000 } },
583 { 6, 0x989c,
584 { 0x31840000, 0x31840000, 0x31840000, 0x31840000, 0x31840000 } },
585 { 6, 0x989c,
586 { 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080 } },
587 { 6, 0x989c,
588 { 0x00270019, 0x00270019, 0x00270019, 0x00270019, 0x00270019 } },
589 { 6, 0x989c,
590 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
591 { 6, 0x989c,
592 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
593 { 6, 0x989c,
594 { 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2 } },
595 { 6, 0x989c,
596 { 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084 } },
597 { 6, 0x989c,
598 { 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4 } },
599 { 6, 0x989c,
600 { 0x00119220, 0x00119220, 0x00119220, 0x00119220, 0x00119220 } },
601 { 6, 0x989c,
602 { 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800 } },
603 { 6, 0x98d8,
604 { 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230 } },
605 { 7, 0x989c,
606 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
607 { 7, 0x989c,
608 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
609 { 7, 0x989c,
610 { 0x00000012, 0x00000012, 0x00000012, 0x00000012, 0x00000012 } },
611 { 7, 0x989c,
612 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
613 { 7, 0x989c,
614 { 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9 } },
615 { 7, 0x989c,
616 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
617 { 7, 0x989c,
618 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
619 { 7, 0x989c,
620 { 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2 } },
621 { 7, 0x989c,
622 { 0x00000052, 0x00000052, 0x00000052, 0x00000052, 0x00000052 } },
623 { 7, 0x989c,
624 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
625 { 7, 0x989c,
626 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
627 { 7, 0x989c,
628 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
629 { 7, 0x98c4,
630 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
631};
632
633
634
635/******************\
636* RF2413 (Griffin) *
637\******************/
638
639/* BANK 6 len pos col */
640#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
641#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
642
643static const struct ath5k_rf_reg rf_regs_2413[] = {
644 {6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ},
645 {6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ},
646};
647
648/* Default mode specific settings
649 * XXX: a/aTurbo ???
650 */
651static const struct ath5k_ini_rfbuffer rfb_2413[] = {
652 { 1, 0x98d4,
653 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
654 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
655 { 2, 0x98d0,
656 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
657 { 3, 0x98dc,
658 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
659 { 6, 0x989c,
660 { 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000 } },
661 { 6, 0x989c,
662 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
663 { 6, 0x989c,
664 { 0x03000000, 0x03000000, 0x03000000, 0x03000000, 0x03000000 } },
665 { 6, 0x989c,
666 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
667 { 6, 0x989c,
668 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
669 { 6, 0x989c,
670 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
671 { 6, 0x989c,
672 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
673 { 6, 0x989c,
674 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
675 { 6, 0x989c,
676 { 0x40400000, 0x40400000, 0x40400000, 0x40400000, 0x40400000 } },
677 { 6, 0x989c,
678 { 0x65050000, 0x65050000, 0x65050000, 0x65050000, 0x65050000 } },
679 { 6, 0x989c,
680 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
681 { 6, 0x989c,
682 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
683 { 6, 0x989c,
684 { 0x00420000, 0x00420000, 0x00420000, 0x00420000, 0x00420000 } },
685 { 6, 0x989c,
686 { 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000 } },
687 { 6, 0x989c,
688 { 0x00030000, 0x00030000, 0x00030000, 0x00030000, 0x00030000 } },
689 { 6, 0x989c,
690 { 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000 } },
691 { 6, 0x989c,
692 { 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000 } },
693 { 6, 0x989c,
694 { 0x00220000, 0x00220000, 0x00220000, 0x00220000, 0x00220000 } },
695 { 6, 0x989c,
696 { 0x04220000, 0x04220000, 0x04220000, 0x04220000, 0x04220000 } },
697 { 6, 0x989c,
698 { 0x00230018, 0x00230018, 0x00230018, 0x00230018, 0x00230018 } },
699 { 6, 0x989c,
700 { 0x00280000, 0x00280000, 0x00280060, 0x00280060, 0x00280060 } },
701 { 6, 0x989c,
702 { 0x005000c0, 0x005000c0, 0x005000c3, 0x005000c3, 0x005000c3 } },
703 { 6, 0x989c,
704 { 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f } },
705 { 6, 0x989c,
706 { 0x00000458, 0x00000458, 0x00000458, 0x00000458, 0x00000458 } },
707 { 6, 0x989c,
708 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
709 { 6, 0x989c,
710 { 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000 } },
711 { 6, 0x98d8,
712 { 0x00400230, 0x00400230, 0x00400230, 0x00400230, 0x00400230 } },
713 { 7, 0x989c,
714 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
715 { 7, 0x989c,
716 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
717 { 7, 0x98cc,
718 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
719};
720
721
722
723/***************************\
724* RF2315/RF2316 (Cobra SoC) *
725\***************************/
726
727/* BANK 6 len pos col */
728#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 }
729#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 }
730
731static const struct ath5k_rf_reg rf_regs_2316[] = {
732 {6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ},
733 {6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ},
734};
735
736/* Default mode specific settings */
737static const struct ath5k_ini_rfbuffer rfb_2316[] = {
738 { 1, 0x98d4,
739 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
740 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
741 { 2, 0x98d0,
742 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
743 { 3, 0x98dc,
744 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
745 { 6, 0x989c,
746 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
747 { 6, 0x989c,
748 { 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000 } },
749 { 6, 0x989c,
750 { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } },
751 { 6, 0x989c,
752 { 0x02000000, 0x02000000, 0x02000000, 0x02000000, 0x02000000 } },
753 { 6, 0x989c,
754 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
755 { 6, 0x989c,
756 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
757 { 6, 0x989c,
758 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
759 { 6, 0x989c,
760 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
761 { 6, 0x989c,
762 { 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000 } },
763 { 6, 0x989c,
764 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
765 { 6, 0x989c,
766 { 0x95150000, 0x95150000, 0x95150000, 0x95150000, 0x95150000 } },
767 { 6, 0x989c,
768 { 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000 } },
769 { 6, 0x989c,
770 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
771 { 6, 0x989c,
772 { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000 } },
773 { 6, 0x989c,
774 { 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000 } },
775 { 6, 0x989c,
776 { 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000 } },
777 { 6, 0x989c,
778 { 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
779 { 6, 0x989c,
780 { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
781 { 6, 0x989c,
782 { 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000 } },
783 { 6, 0x989c,
784 { 0x10880000, 0x10880000, 0x10880000, 0x10880000, 0x10880000 } },
785 { 6, 0x989c,
786 { 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060 } },
787 { 6, 0x989c,
788 { 0x00a00000, 0x00a00000, 0x00a00080, 0x00a00080, 0x00a00080 } },
789 { 6, 0x989c,
790 { 0x00400000, 0x00400000, 0x0040000d, 0x0040000d, 0x0040000d } },
791 { 6, 0x989c,
792 { 0x00110400, 0x00110400, 0x00110400, 0x00110400, 0x00110400 } },
793 { 6, 0x989c,
794 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
795 { 6, 0x989c,
796 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
797 { 6, 0x989c,
798 { 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00 } },
799 { 6, 0x989c,
800 { 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8 } },
801 { 6, 0x98c0,
802 { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
803 { 7, 0x989c,
804 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
805 { 7, 0x989c,
806 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
807 { 7, 0x98cc,
808 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
809};
810
811
812
813/******************************\
814* RF5413/RF5424 (Eagle/Condor) *
815\******************************/
816
817/* BANK 6 len pos col */
818#define AR5K_RF5413_OB_2GHZ { 3, 241, 0 }
819#define AR5K_RF5413_DB_2GHZ { 3, 238, 0 }
820
821#define AR5K_RF5413_OB_5GHZ { 3, 247, 0 }
822#define AR5K_RF5413_DB_5GHZ { 3, 244, 0 }
823
824#define AR5K_RF5413_PWD_ICLOBUF2G { 3, 131, 3 }
825#define AR5K_RF5413_DERBY_CHAN_SEL_MODE { 1, 291, 2 }
826
827static const struct ath5k_rf_reg rf_regs_5413[] = {
828 {6, AR5K_RF_OB_2GHZ, AR5K_RF5413_OB_2GHZ},
829 {6, AR5K_RF_DB_2GHZ, AR5K_RF5413_DB_2GHZ},
830 {6, AR5K_RF_OB_5GHZ, AR5K_RF5413_OB_5GHZ},
831 {6, AR5K_RF_DB_5GHZ, AR5K_RF5413_DB_5GHZ},
832 {6, AR5K_RF_PWD_ICLOBUF_2G, AR5K_RF5413_PWD_ICLOBUF2G},
833 {6, AR5K_RF_DERBY_CHAN_SEL_MODE, AR5K_RF5413_DERBY_CHAN_SEL_MODE},
834};
835
836/* Default mode specific settings */
837static const struct ath5k_ini_rfbuffer rfb_5413[] = {
838 { 1, 0x98d4,
839 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
840 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
841 { 2, 0x98d0,
842 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
843 { 3, 0x98dc,
844 { 0x00a000c0, 0x00a000c0, 0x00e000c0, 0x00e000c0, 0x00e000c0 } },
845 { 6, 0x989c,
846 { 0x33000000, 0x33000000, 0x33000000, 0x33000000, 0x33000000 } },
847 { 6, 0x989c,
848 { 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000 } },
849 { 6, 0x989c,
850 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
851 { 6, 0x989c,
852 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
853 { 6, 0x989c,
854 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
855 { 6, 0x989c,
856 { 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000 } },
857 { 6, 0x989c,
858 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
859 { 6, 0x989c,
860 { 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000 } },
861 { 6, 0x989c,
862 { 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000 } },
863 { 6, 0x989c,
864 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
865 { 6, 0x989c,
866 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
867 { 6, 0x989c,
868 { 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000 } },
869 { 6, 0x989c,
870 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
871 { 6, 0x989c,
872 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
873 { 6, 0x989c,
874 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
875 { 6, 0x989c,
876 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
877 { 6, 0x989c,
878 { 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000 } },
879 { 6, 0x989c,
880 { 0x00610000, 0x00610000, 0x00610000, 0x00610000, 0x00610000 } },
881 { 6, 0x989c,
882 { 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
883 { 6, 0x989c,
884 { 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000 } },
885 { 6, 0x989c,
886 { 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000 } },
887 { 6, 0x989c,
888 { 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000 } },
889 { 6, 0x989c,
890 { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
891 { 6, 0x989c,
892 { 0x00440000, 0x00440000, 0x00440000, 0x00440000, 0x00440000 } },
893 { 6, 0x989c,
894 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
895 { 6, 0x989c,
896 { 0x00100080, 0x00100080, 0x00100080, 0x00100080, 0x00100080 } },
897 { 6, 0x989c,
898 { 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034 } },
899 { 6, 0x989c,
900 { 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0 } },
901 { 6, 0x989c,
902 { 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f } },
903 { 6, 0x989c,
904 { 0x00510040, 0x00510040, 0x00510040, 0x00510040, 0x00510040 } },
905 { 6, 0x989c,
906 { 0x005000da, 0x005000da, 0x005000da, 0x005000da, 0x005000da } },
907 { 6, 0x989c,
908 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
909 { 6, 0x989c,
910 { 0x00004044, 0x00004044, 0x00004044, 0x00004044, 0x00004044 } },
911 { 6, 0x989c,
912 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
913 { 6, 0x989c,
914 { 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0 } },
915 { 6, 0x989c,
916 { 0x00002c00, 0x00002c00, 0x00003600, 0x00003600, 0x00002c00 } },
917 { 6, 0x98c8,
918 { 0x00000403, 0x00000403, 0x00040403, 0x00040403, 0x00040403 } },
919 { 7, 0x989c,
920 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
921 { 7, 0x989c,
922 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
923 { 7, 0x98cc,
924 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
925};
926
927
928
929/***************************\
930* RF2425/RF2417 (Swan/Nala) *
931* AR2317 (Spider SoC) *
932\***************************/
933
934/* BANK 6 len pos col */
935#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 }
936#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 }
937
938static const struct ath5k_rf_reg rf_regs_2425[] = {
939 {6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ},
940 {6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ},
941};
942
943/* Default mode specific settings
944 * XXX: a/aTurbo ?
945 */
946static const struct ath5k_ini_rfbuffer rfb_2425[] = {
947 { 1, 0x98d4,
948 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
949 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
950 { 2, 0x98d0,
951 { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } },
952 { 3, 0x98dc,
953 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
954 { 6, 0x989c,
955 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
956 { 6, 0x989c,
957 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
958 { 6, 0x989c,
959 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
960 { 6, 0x989c,
961 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
962 { 6, 0x989c,
963 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
964 { 6, 0x989c,
965 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
966 { 6, 0x989c,
967 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
968 { 6, 0x989c,
969 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
970 { 6, 0x989c,
971 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
972 { 6, 0x989c,
973 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
974 { 6, 0x989c,
975 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
976 { 6, 0x989c,
977 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
978 { 6, 0x989c,
979 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
980 { 6, 0x989c,
981 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
982 { 6, 0x989c,
983 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
984 { 6, 0x989c,
985 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
986 { 6, 0x989c,
987 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
988 { 6, 0x989c,
989 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
990 { 6, 0x989c,
991 { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
992 { 6, 0x989c,
993 { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
994 { 6, 0x989c,
995 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
996 { 6, 0x989c,
997 { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
998 { 6, 0x989c,
999 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1000 { 6, 0x989c,
1001 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1002 { 6, 0x989c,
1003 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1004 { 6, 0x989c,
1005 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1006 { 6, 0x989c,
1007 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1008 { 6, 0x989c,
1009 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1010 { 6, 0x989c,
1011 { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
1012 { 6, 0x98c4,
1013 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1014 { 7, 0x989c,
1015 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1016 { 7, 0x989c,
1017 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1018 { 7, 0x98cc,
1019 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1020};
1021
1022/*
1023 * TODO: Handle the few differences with swan during
1024 * bank modification and get rid of this
1025 */
1026static const struct ath5k_ini_rfbuffer rfb_2317[] = {
1027 { 1, 0x98d4,
1028 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
1029 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
1030 { 2, 0x98d0,
1031 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
1032 { 3, 0x98dc,
1033 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
1034 { 6, 0x989c,
1035 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
1036 { 6, 0x989c,
1037 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1038 { 6, 0x989c,
1039 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1040 { 6, 0x989c,
1041 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1042 { 6, 0x989c,
1043 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1044 { 6, 0x989c,
1045 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1046 { 6, 0x989c,
1047 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1048 { 6, 0x989c,
1049 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1050 { 6, 0x989c,
1051 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1052 { 6, 0x989c,
1053 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1054 { 6, 0x989c,
1055 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1056 { 6, 0x989c,
1057 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
1058 { 6, 0x989c,
1059 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1060 { 6, 0x989c,
1061 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1062 { 6, 0x989c,
1063 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
1064 { 6, 0x989c,
1065 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
1066 { 6, 0x989c,
1067 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
1068 { 6, 0x989c,
1069 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
1070 { 6, 0x989c,
1071 { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
1072 { 6, 0x989c,
1073 { 0x00140100, 0x00140100, 0x00140100, 0x00140100, 0x00140100 } },
1074 { 6, 0x989c,
1075 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
1076 { 6, 0x989c,
1077 { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
1078 { 6, 0x989c,
1079 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1080 { 6, 0x989c,
1081 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1082 { 6, 0x989c,
1083 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1084 { 6, 0x989c,
1085 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1086 { 6, 0x989c,
1087 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1088 { 6, 0x989c,
1089 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1090 { 6, 0x989c,
1091 { 0x00009688, 0x00009688, 0x00009688, 0x00009688, 0x00009688 } },
1092 { 6, 0x98c4,
1093 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1094 { 7, 0x989c,
1095 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1096 { 7, 0x989c,
1097 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1098 { 7, 0x98cc,
1099 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1100};
1101
1102/*
1103 * TODO: Handle the few differences with swan during
1104 * bank modification and get rid of this
1105 * XXX: a/aTurbo ?
1106 */
1107static const struct ath5k_ini_rfbuffer rfb_2417[] = {
1108 { 1, 0x98d4,
1109 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
1110 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
1111 { 2, 0x98d0,
1112 { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } },
1113 { 3, 0x98dc,
1114 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
1115 { 6, 0x989c,
1116 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
1117 { 6, 0x989c,
1118 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1119 { 6, 0x989c,
1120 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1121 { 6, 0x989c,
1122 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1123 { 6, 0x989c,
1124 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1125 { 6, 0x989c,
1126 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1127 { 6, 0x989c,
1128 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1129 { 6, 0x989c,
1130 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1131 { 6, 0x989c,
1132 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1133 { 6, 0x989c,
1134 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1135 { 6, 0x989c,
1136 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1137 { 6, 0x989c,
1138 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
1139 { 6, 0x989c,
1140 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1141 { 6, 0x989c,
1142 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1143 { 6, 0x989c,
1144 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
1145 { 6, 0x989c,
1146 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
1147 { 6, 0x989c,
1148 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
1149 { 6, 0x989c,
1150 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
1151 { 6, 0x989c,
1152 { 0x00e70000, 0x00e70000, 0x80e70000, 0x80e70000, 0x00e70000 } },
1153 { 6, 0x989c,
1154 { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
1155 { 6, 0x989c,
1156 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
1157 { 6, 0x989c,
1158 { 0x0007001a, 0x0007001a, 0x0207001a, 0x0207001a, 0x0007001a } },
1159 { 6, 0x989c,
1160 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1161 { 6, 0x989c,
1162 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1163 { 6, 0x989c,
1164 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1165 { 6, 0x989c,
1166 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1167 { 6, 0x989c,
1168 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1169 { 6, 0x989c,
1170 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1171 { 6, 0x989c,
1172 { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
1173 { 6, 0x98c4,
1174 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1175 { 7, 0x989c,
1176 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1177 { 7, 0x989c,
1178 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1179 { 7, 0x98cc,
1180 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1181};
diff --git a/drivers/net/wireless/ath5k/rfgain.h b/drivers/net/wireless/ath5k/rfgain.h
new file mode 100644
index 00000000000..1354d8c392c
--- /dev/null
+++ b/drivers/net/wireless/ath5k/rfgain.h
@@ -0,0 +1,516 @@
1/*
2 * RF Gain optimization
3 *
4 * Copyright (c) 2004-2009 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 */
20
21/*
22 * Mode-specific RF Gain table (64bytes) for RF5111/5112
23 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
24 * RF Gain values are included in AR5K_AR5210_INI)
25 */
26struct ath5k_ini_rfgain {
27 u16 rfg_register; /* RF Gain register address */
28 u32 rfg_value[2]; /* [freq (see below)] */
29};
30
31/* Initial RF Gain settings for RF5111 */
32static const struct ath5k_ini_rfgain rfgain_5111[] = {
33 /* 5Ghz 2Ghz */
34 { AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } },
35 { AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } },
36 { AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } },
37 { AR5K_RF_GAIN(3), { 0x00000069, 0x00000150 } },
38 { AR5K_RF_GAIN(4), { 0x00000199, 0x00000190 } },
39 { AR5K_RF_GAIN(5), { 0x000001d9, 0x000001d0 } },
40 { AR5K_RF_GAIN(6), { 0x00000019, 0x00000010 } },
41 { AR5K_RF_GAIN(7), { 0x00000059, 0x00000044 } },
42 { AR5K_RF_GAIN(8), { 0x00000099, 0x00000084 } },
43 { AR5K_RF_GAIN(9), { 0x000001a5, 0x00000148 } },
44 { AR5K_RF_GAIN(10), { 0x000001e5, 0x00000188 } },
45 { AR5K_RF_GAIN(11), { 0x00000025, 0x000001c8 } },
46 { AR5K_RF_GAIN(12), { 0x000001c8, 0x00000014 } },
47 { AR5K_RF_GAIN(13), { 0x00000008, 0x00000042 } },
48 { AR5K_RF_GAIN(14), { 0x00000048, 0x00000082 } },
49 { AR5K_RF_GAIN(15), { 0x00000088, 0x00000178 } },
50 { AR5K_RF_GAIN(16), { 0x00000198, 0x000001b8 } },
51 { AR5K_RF_GAIN(17), { 0x000001d8, 0x000001f8 } },
52 { AR5K_RF_GAIN(18), { 0x00000018, 0x00000012 } },
53 { AR5K_RF_GAIN(19), { 0x00000058, 0x00000052 } },
54 { AR5K_RF_GAIN(20), { 0x00000098, 0x00000092 } },
55 { AR5K_RF_GAIN(21), { 0x000001a4, 0x0000017c } },
56 { AR5K_RF_GAIN(22), { 0x000001e4, 0x000001bc } },
57 { AR5K_RF_GAIN(23), { 0x00000024, 0x000001fc } },
58 { AR5K_RF_GAIN(24), { 0x00000064, 0x0000000a } },
59 { AR5K_RF_GAIN(25), { 0x000000a4, 0x0000004a } },
60 { AR5K_RF_GAIN(26), { 0x000000e4, 0x0000008a } },
61 { AR5K_RF_GAIN(27), { 0x0000010a, 0x0000015a } },
62 { AR5K_RF_GAIN(28), { 0x0000014a, 0x0000019a } },
63 { AR5K_RF_GAIN(29), { 0x0000018a, 0x000001da } },
64 { AR5K_RF_GAIN(30), { 0x000001ca, 0x0000000e } },
65 { AR5K_RF_GAIN(31), { 0x0000000a, 0x0000004e } },
66 { AR5K_RF_GAIN(32), { 0x0000004a, 0x0000008e } },
67 { AR5K_RF_GAIN(33), { 0x0000008a, 0x0000015e } },
68 { AR5K_RF_GAIN(34), { 0x000001ba, 0x0000019e } },
69 { AR5K_RF_GAIN(35), { 0x000001fa, 0x000001de } },
70 { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000009 } },
71 { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000049 } },
72 { AR5K_RF_GAIN(38), { 0x00000186, 0x00000089 } },
73 { AR5K_RF_GAIN(39), { 0x000001c6, 0x00000179 } },
74 { AR5K_RF_GAIN(40), { 0x00000006, 0x000001b9 } },
75 { AR5K_RF_GAIN(41), { 0x00000046, 0x000001f9 } },
76 { AR5K_RF_GAIN(42), { 0x00000086, 0x00000039 } },
77 { AR5K_RF_GAIN(43), { 0x000000c6, 0x00000079 } },
78 { AR5K_RF_GAIN(44), { 0x000000c6, 0x000000b9 } },
79 { AR5K_RF_GAIN(45), { 0x000000c6, 0x000001bd } },
80 { AR5K_RF_GAIN(46), { 0x000000c6, 0x000001fd } },
81 { AR5K_RF_GAIN(47), { 0x000000c6, 0x0000003d } },
82 { AR5K_RF_GAIN(48), { 0x000000c6, 0x0000007d } },
83 { AR5K_RF_GAIN(49), { 0x000000c6, 0x000000bd } },
84 { AR5K_RF_GAIN(50), { 0x000000c6, 0x000000fd } },
85 { AR5K_RF_GAIN(51), { 0x000000c6, 0x000000fd } },
86 { AR5K_RF_GAIN(52), { 0x000000c6, 0x000000fd } },
87 { AR5K_RF_GAIN(53), { 0x000000c6, 0x000000fd } },
88 { AR5K_RF_GAIN(54), { 0x000000c6, 0x000000fd } },
89 { AR5K_RF_GAIN(55), { 0x000000c6, 0x000000fd } },
90 { AR5K_RF_GAIN(56), { 0x000000c6, 0x000000fd } },
91 { AR5K_RF_GAIN(57), { 0x000000c6, 0x000000fd } },
92 { AR5K_RF_GAIN(58), { 0x000000c6, 0x000000fd } },
93 { AR5K_RF_GAIN(59), { 0x000000c6, 0x000000fd } },
94 { AR5K_RF_GAIN(60), { 0x000000c6, 0x000000fd } },
95 { AR5K_RF_GAIN(61), { 0x000000c6, 0x000000fd } },
96 { AR5K_RF_GAIN(62), { 0x000000c6, 0x000000fd } },
97 { AR5K_RF_GAIN(63), { 0x000000c6, 0x000000fd } },
98};
99
100/* Initial RF Gain settings for RF5112 */
101static const struct ath5k_ini_rfgain rfgain_5112[] = {
102 /* 5Ghz 2Ghz */
103 { AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } },
104 { AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } },
105 { AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } },
106 { AR5K_RF_GAIN(3), { 0x000001a0, 0x000001a0 } },
107 { AR5K_RF_GAIN(4), { 0x000001e0, 0x000001e0 } },
108 { AR5K_RF_GAIN(5), { 0x00000020, 0x00000020 } },
109 { AR5K_RF_GAIN(6), { 0x00000060, 0x00000060 } },
110 { AR5K_RF_GAIN(7), { 0x000001a1, 0x000001a1 } },
111 { AR5K_RF_GAIN(8), { 0x000001e1, 0x000001e1 } },
112 { AR5K_RF_GAIN(9), { 0x00000021, 0x00000021 } },
113 { AR5K_RF_GAIN(10), { 0x00000061, 0x00000061 } },
114 { AR5K_RF_GAIN(11), { 0x00000162, 0x00000162 } },
115 { AR5K_RF_GAIN(12), { 0x000001a2, 0x000001a2 } },
116 { AR5K_RF_GAIN(13), { 0x000001e2, 0x000001e2 } },
117 { AR5K_RF_GAIN(14), { 0x00000022, 0x00000022 } },
118 { AR5K_RF_GAIN(15), { 0x00000062, 0x00000062 } },
119 { AR5K_RF_GAIN(16), { 0x00000163, 0x00000163 } },
120 { AR5K_RF_GAIN(17), { 0x000001a3, 0x000001a3 } },
121 { AR5K_RF_GAIN(18), { 0x000001e3, 0x000001e3 } },
122 { AR5K_RF_GAIN(19), { 0x00000023, 0x00000023 } },
123 { AR5K_RF_GAIN(20), { 0x00000063, 0x00000063 } },
124 { AR5K_RF_GAIN(21), { 0x00000184, 0x00000184 } },
125 { AR5K_RF_GAIN(22), { 0x000001c4, 0x000001c4 } },
126 { AR5K_RF_GAIN(23), { 0x00000004, 0x00000004 } },
127 { AR5K_RF_GAIN(24), { 0x000001ea, 0x0000000b } },
128 { AR5K_RF_GAIN(25), { 0x0000002a, 0x0000004b } },
129 { AR5K_RF_GAIN(26), { 0x0000006a, 0x0000008b } },
130 { AR5K_RF_GAIN(27), { 0x000000aa, 0x000001ac } },
131 { AR5K_RF_GAIN(28), { 0x000001ab, 0x000001ec } },
132 { AR5K_RF_GAIN(29), { 0x000001eb, 0x0000002c } },
133 { AR5K_RF_GAIN(30), { 0x0000002b, 0x00000012 } },
134 { AR5K_RF_GAIN(31), { 0x0000006b, 0x00000052 } },
135 { AR5K_RF_GAIN(32), { 0x000000ab, 0x00000092 } },
136 { AR5K_RF_GAIN(33), { 0x000001ac, 0x00000193 } },
137 { AR5K_RF_GAIN(34), { 0x000001ec, 0x000001d3 } },
138 { AR5K_RF_GAIN(35), { 0x0000002c, 0x00000013 } },
139 { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000053 } },
140 { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000093 } },
141 { AR5K_RF_GAIN(38), { 0x000000ba, 0x00000194 } },
142 { AR5K_RF_GAIN(39), { 0x000001bb, 0x000001d4 } },
143 { AR5K_RF_GAIN(40), { 0x000001fb, 0x00000014 } },
144 { AR5K_RF_GAIN(41), { 0x0000003b, 0x0000003a } },
145 { AR5K_RF_GAIN(42), { 0x0000007b, 0x0000007a } },
146 { AR5K_RF_GAIN(43), { 0x000000bb, 0x000000ba } },
147 { AR5K_RF_GAIN(44), { 0x000001bc, 0x000001bb } },
148 { AR5K_RF_GAIN(45), { 0x000001fc, 0x000001fb } },
149 { AR5K_RF_GAIN(46), { 0x0000003c, 0x0000003b } },
150 { AR5K_RF_GAIN(47), { 0x0000007c, 0x0000007b } },
151 { AR5K_RF_GAIN(48), { 0x000000bc, 0x000000bb } },
152 { AR5K_RF_GAIN(49), { 0x000000fc, 0x000001bc } },
153 { AR5K_RF_GAIN(50), { 0x000000fc, 0x000001fc } },
154 { AR5K_RF_GAIN(51), { 0x000000fc, 0x0000003c } },
155 { AR5K_RF_GAIN(52), { 0x000000fc, 0x0000007c } },
156 { AR5K_RF_GAIN(53), { 0x000000fc, 0x000000bc } },
157 { AR5K_RF_GAIN(54), { 0x000000fc, 0x000000fc } },
158 { AR5K_RF_GAIN(55), { 0x000000fc, 0x000000fc } },
159 { AR5K_RF_GAIN(56), { 0x000000fc, 0x000000fc } },
160 { AR5K_RF_GAIN(57), { 0x000000fc, 0x000000fc } },
161 { AR5K_RF_GAIN(58), { 0x000000fc, 0x000000fc } },
162 { AR5K_RF_GAIN(59), { 0x000000fc, 0x000000fc } },
163 { AR5K_RF_GAIN(60), { 0x000000fc, 0x000000fc } },
164 { AR5K_RF_GAIN(61), { 0x000000fc, 0x000000fc } },
165 { AR5K_RF_GAIN(62), { 0x000000fc, 0x000000fc } },
166 { AR5K_RF_GAIN(63), { 0x000000fc, 0x000000fc } },
167};
168
169/* Initial RF Gain settings for RF2413 */
170static const struct ath5k_ini_rfgain rfgain_2413[] = {
171 { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
172 { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
173 { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
174 { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
175 { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
176 { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
177 { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
178 { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
179 { AR5K_RF_GAIN(8), { 0x00000000, 0x00000168 } },
180 { AR5K_RF_GAIN(9), { 0x00000000, 0x000001a8 } },
181 { AR5K_RF_GAIN(10), { 0x00000000, 0x000001e8 } },
182 { AR5K_RF_GAIN(11), { 0x00000000, 0x00000028 } },
183 { AR5K_RF_GAIN(12), { 0x00000000, 0x00000068 } },
184 { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
185 { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
186 { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
187 { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
188 { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
189 { AR5K_RF_GAIN(18), { 0x00000000, 0x00000190 } },
190 { AR5K_RF_GAIN(19), { 0x00000000, 0x000001d0 } },
191 { AR5K_RF_GAIN(20), { 0x00000000, 0x00000010 } },
192 { AR5K_RF_GAIN(21), { 0x00000000, 0x00000050 } },
193 { AR5K_RF_GAIN(22), { 0x00000000, 0x00000090 } },
194 { AR5K_RF_GAIN(23), { 0x00000000, 0x00000191 } },
195 { AR5K_RF_GAIN(24), { 0x00000000, 0x000001d1 } },
196 { AR5K_RF_GAIN(25), { 0x00000000, 0x00000011 } },
197 { AR5K_RF_GAIN(26), { 0x00000000, 0x00000051 } },
198 { AR5K_RF_GAIN(27), { 0x00000000, 0x00000091 } },
199 { AR5K_RF_GAIN(28), { 0x00000000, 0x00000178 } },
200 { AR5K_RF_GAIN(29), { 0x00000000, 0x000001b8 } },
201 { AR5K_RF_GAIN(30), { 0x00000000, 0x000001f8 } },
202 { AR5K_RF_GAIN(31), { 0x00000000, 0x00000038 } },
203 { AR5K_RF_GAIN(32), { 0x00000000, 0x00000078 } },
204 { AR5K_RF_GAIN(33), { 0x00000000, 0x00000199 } },
205 { AR5K_RF_GAIN(34), { 0x00000000, 0x000001d9 } },
206 { AR5K_RF_GAIN(35), { 0x00000000, 0x00000019 } },
207 { AR5K_RF_GAIN(36), { 0x00000000, 0x00000059 } },
208 { AR5K_RF_GAIN(37), { 0x00000000, 0x00000099 } },
209 { AR5K_RF_GAIN(38), { 0x00000000, 0x000000d9 } },
210 { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
211 { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
212 { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
213 { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
214 { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
215 { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
216 { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
217 { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
218 { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
219 { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
220 { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
221 { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
222 { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
223 { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
224 { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
225 { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
226 { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
227 { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
228 { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
229 { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
230 { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
231 { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
232 { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
233 { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
234 { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
235};
236
237/* Initial RF Gain settings for AR2316 */
238static const struct ath5k_ini_rfgain rfgain_2316[] = {
239 { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
240 { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
241 { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
242 { AR5K_RF_GAIN(3), { 0x00000000, 0x000000c0 } },
243 { AR5K_RF_GAIN(4), { 0x00000000, 0x000000e0 } },
244 { AR5K_RF_GAIN(5), { 0x00000000, 0x000000e0 } },
245 { AR5K_RF_GAIN(6), { 0x00000000, 0x00000128 } },
246 { AR5K_RF_GAIN(7), { 0x00000000, 0x00000128 } },
247 { AR5K_RF_GAIN(8), { 0x00000000, 0x00000128 } },
248 { AR5K_RF_GAIN(9), { 0x00000000, 0x00000168 } },
249 { AR5K_RF_GAIN(10), { 0x00000000, 0x000001a8 } },
250 { AR5K_RF_GAIN(11), { 0x00000000, 0x000001e8 } },
251 { AR5K_RF_GAIN(12), { 0x00000000, 0x00000028 } },
252 { AR5K_RF_GAIN(13), { 0x00000000, 0x00000068 } },
253 { AR5K_RF_GAIN(14), { 0x00000000, 0x000000a8 } },
254 { AR5K_RF_GAIN(15), { 0x00000000, 0x000000e8 } },
255 { AR5K_RF_GAIN(16), { 0x00000000, 0x000000e8 } },
256 { AR5K_RF_GAIN(17), { 0x00000000, 0x00000130 } },
257 { AR5K_RF_GAIN(18), { 0x00000000, 0x00000130 } },
258 { AR5K_RF_GAIN(19), { 0x00000000, 0x00000170 } },
259 { AR5K_RF_GAIN(20), { 0x00000000, 0x000001b0 } },
260 { AR5K_RF_GAIN(21), { 0x00000000, 0x000001f0 } },
261 { AR5K_RF_GAIN(22), { 0x00000000, 0x00000030 } },
262 { AR5K_RF_GAIN(23), { 0x00000000, 0x00000070 } },
263 { AR5K_RF_GAIN(24), { 0x00000000, 0x000000b0 } },
264 { AR5K_RF_GAIN(25), { 0x00000000, 0x000000f0 } },
265 { AR5K_RF_GAIN(26), { 0x00000000, 0x000000f0 } },
266 { AR5K_RF_GAIN(27), { 0x00000000, 0x000000f0 } },
267 { AR5K_RF_GAIN(28), { 0x00000000, 0x000000f0 } },
268 { AR5K_RF_GAIN(29), { 0x00000000, 0x000000f0 } },
269 { AR5K_RF_GAIN(30), { 0x00000000, 0x000000f0 } },
270 { AR5K_RF_GAIN(31), { 0x00000000, 0x000000f0 } },
271 { AR5K_RF_GAIN(32), { 0x00000000, 0x000000f0 } },
272 { AR5K_RF_GAIN(33), { 0x00000000, 0x000000f0 } },
273 { AR5K_RF_GAIN(34), { 0x00000000, 0x000000f0 } },
274 { AR5K_RF_GAIN(35), { 0x00000000, 0x000000f0 } },
275 { AR5K_RF_GAIN(36), { 0x00000000, 0x000000f0 } },
276 { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f0 } },
277 { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f0 } },
278 { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f0 } },
279 { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f0 } },
280 { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f0 } },
281 { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f0 } },
282 { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f0 } },
283 { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f0 } },
284 { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f0 } },
285 { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f0 } },
286 { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f0 } },
287 { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f0 } },
288 { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f0 } },
289 { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f0 } },
290 { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f0 } },
291 { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f0 } },
292 { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f0 } },
293 { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f0 } },
294 { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f0 } },
295 { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f0 } },
296 { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f0 } },
297 { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f0 } },
298 { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f0 } },
299 { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f0 } },
300 { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f0 } },
301 { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f0 } },
302 { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f0 } },
303};
304
305
306/* Initial RF Gain settings for RF5413 */
307static const struct ath5k_ini_rfgain rfgain_5413[] = {
308 /* 5Ghz 2Ghz */
309 { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
310 { AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } },
311 { AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } },
312 { AR5K_RF_GAIN(3), { 0x000001a1, 0x00000161 } },
313 { AR5K_RF_GAIN(4), { 0x000001e1, 0x000001a1 } },
314 { AR5K_RF_GAIN(5), { 0x00000021, 0x000001e1 } },
315 { AR5K_RF_GAIN(6), { 0x00000061, 0x00000021 } },
316 { AR5K_RF_GAIN(7), { 0x00000188, 0x00000061 } },
317 { AR5K_RF_GAIN(8), { 0x000001c8, 0x00000188 } },
318 { AR5K_RF_GAIN(9), { 0x00000008, 0x000001c8 } },
319 { AR5K_RF_GAIN(10), { 0x00000048, 0x00000008 } },
320 { AR5K_RF_GAIN(11), { 0x00000088, 0x00000048 } },
321 { AR5K_RF_GAIN(12), { 0x000001a9, 0x00000088 } },
322 { AR5K_RF_GAIN(13), { 0x000001e9, 0x00000169 } },
323 { AR5K_RF_GAIN(14), { 0x00000029, 0x000001a9 } },
324 { AR5K_RF_GAIN(15), { 0x00000069, 0x000001e9 } },
325 { AR5K_RF_GAIN(16), { 0x000001d0, 0x00000029 } },
326 { AR5K_RF_GAIN(17), { 0x00000010, 0x00000069 } },
327 { AR5K_RF_GAIN(18), { 0x00000050, 0x00000190 } },
328 { AR5K_RF_GAIN(19), { 0x00000090, 0x000001d0 } },
329 { AR5K_RF_GAIN(20), { 0x000001b1, 0x00000010 } },
330 { AR5K_RF_GAIN(21), { 0x000001f1, 0x00000050 } },
331 { AR5K_RF_GAIN(22), { 0x00000031, 0x00000090 } },
332 { AR5K_RF_GAIN(23), { 0x00000071, 0x00000171 } },
333 { AR5K_RF_GAIN(24), { 0x000001b8, 0x000001b1 } },
334 { AR5K_RF_GAIN(25), { 0x000001f8, 0x000001f1 } },
335 { AR5K_RF_GAIN(26), { 0x00000038, 0x00000031 } },
336 { AR5K_RF_GAIN(27), { 0x00000078, 0x00000071 } },
337 { AR5K_RF_GAIN(28), { 0x00000199, 0x00000198 } },
338 { AR5K_RF_GAIN(29), { 0x000001d9, 0x000001d8 } },
339 { AR5K_RF_GAIN(30), { 0x00000019, 0x00000018 } },
340 { AR5K_RF_GAIN(31), { 0x00000059, 0x00000058 } },
341 { AR5K_RF_GAIN(32), { 0x00000099, 0x00000098 } },
342 { AR5K_RF_GAIN(33), { 0x000000d9, 0x00000179 } },
343 { AR5K_RF_GAIN(34), { 0x000000f9, 0x000001b9 } },
344 { AR5K_RF_GAIN(35), { 0x000000f9, 0x000001f9 } },
345 { AR5K_RF_GAIN(36), { 0x000000f9, 0x00000039 } },
346 { AR5K_RF_GAIN(37), { 0x000000f9, 0x00000079 } },
347 { AR5K_RF_GAIN(38), { 0x000000f9, 0x000000b9 } },
348 { AR5K_RF_GAIN(39), { 0x000000f9, 0x000000f9 } },
349 { AR5K_RF_GAIN(40), { 0x000000f9, 0x000000f9 } },
350 { AR5K_RF_GAIN(41), { 0x000000f9, 0x000000f9 } },
351 { AR5K_RF_GAIN(42), { 0x000000f9, 0x000000f9 } },
352 { AR5K_RF_GAIN(43), { 0x000000f9, 0x000000f9 } },
353 { AR5K_RF_GAIN(44), { 0x000000f9, 0x000000f9 } },
354 { AR5K_RF_GAIN(45), { 0x000000f9, 0x000000f9 } },
355 { AR5K_RF_GAIN(46), { 0x000000f9, 0x000000f9 } },
356 { AR5K_RF_GAIN(47), { 0x000000f9, 0x000000f9 } },
357 { AR5K_RF_GAIN(48), { 0x000000f9, 0x000000f9 } },
358 { AR5K_RF_GAIN(49), { 0x000000f9, 0x000000f9 } },
359 { AR5K_RF_GAIN(50), { 0x000000f9, 0x000000f9 } },
360 { AR5K_RF_GAIN(51), { 0x000000f9, 0x000000f9 } },
361 { AR5K_RF_GAIN(52), { 0x000000f9, 0x000000f9 } },
362 { AR5K_RF_GAIN(53), { 0x000000f9, 0x000000f9 } },
363 { AR5K_RF_GAIN(54), { 0x000000f9, 0x000000f9 } },
364 { AR5K_RF_GAIN(55), { 0x000000f9, 0x000000f9 } },
365 { AR5K_RF_GAIN(56), { 0x000000f9, 0x000000f9 } },
366 { AR5K_RF_GAIN(57), { 0x000000f9, 0x000000f9 } },
367 { AR5K_RF_GAIN(58), { 0x000000f9, 0x000000f9 } },
368 { AR5K_RF_GAIN(59), { 0x000000f9, 0x000000f9 } },
369 { AR5K_RF_GAIN(60), { 0x000000f9, 0x000000f9 } },
370 { AR5K_RF_GAIN(61), { 0x000000f9, 0x000000f9 } },
371 { AR5K_RF_GAIN(62), { 0x000000f9, 0x000000f9 } },
372 { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } },
373};
374
375
376/* Initial RF Gain settings for RF2425 */
377static const struct ath5k_ini_rfgain rfgain_2425[] = {
378 { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
379 { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
380 { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
381 { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
382 { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
383 { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
384 { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
385 { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
386 { AR5K_RF_GAIN(8), { 0x00000000, 0x00000188 } },
387 { AR5K_RF_GAIN(9), { 0x00000000, 0x000001c8 } },
388 { AR5K_RF_GAIN(10), { 0x00000000, 0x00000008 } },
389 { AR5K_RF_GAIN(11), { 0x00000000, 0x00000048 } },
390 { AR5K_RF_GAIN(12), { 0x00000000, 0x00000088 } },
391 { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
392 { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
393 { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
394 { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
395 { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
396 { AR5K_RF_GAIN(18), { 0x00000000, 0x000001b0 } },
397 { AR5K_RF_GAIN(19), { 0x00000000, 0x000001f0 } },
398 { AR5K_RF_GAIN(20), { 0x00000000, 0x00000030 } },
399 { AR5K_RF_GAIN(21), { 0x00000000, 0x00000070 } },
400 { AR5K_RF_GAIN(22), { 0x00000000, 0x00000171 } },
401 { AR5K_RF_GAIN(23), { 0x00000000, 0x000001b1 } },
402 { AR5K_RF_GAIN(24), { 0x00000000, 0x000001f1 } },
403 { AR5K_RF_GAIN(25), { 0x00000000, 0x00000031 } },
404 { AR5K_RF_GAIN(26), { 0x00000000, 0x00000071 } },
405 { AR5K_RF_GAIN(27), { 0x00000000, 0x000001b8 } },
406 { AR5K_RF_GAIN(28), { 0x00000000, 0x000001f8 } },
407 { AR5K_RF_GAIN(29), { 0x00000000, 0x00000038 } },
408 { AR5K_RF_GAIN(30), { 0x00000000, 0x00000078 } },
409 { AR5K_RF_GAIN(31), { 0x00000000, 0x000000b8 } },
410 { AR5K_RF_GAIN(32), { 0x00000000, 0x000001b9 } },
411 { AR5K_RF_GAIN(33), { 0x00000000, 0x000001f9 } },
412 { AR5K_RF_GAIN(34), { 0x00000000, 0x00000039 } },
413 { AR5K_RF_GAIN(35), { 0x00000000, 0x00000079 } },
414 { AR5K_RF_GAIN(36), { 0x00000000, 0x000000b9 } },
415 { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f9 } },
416 { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f9 } },
417 { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
418 { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
419 { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
420 { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
421 { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
422 { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
423 { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
424 { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
425 { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
426 { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
427 { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
428 { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
429 { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
430 { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
431 { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
432 { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
433 { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
434 { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
435 { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
436 { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
437 { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
438 { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
439 { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
440 { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
441 { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
442};
443
444#define AR5K_GAIN_CRN_FIX_BITS_5111 4
445#define AR5K_GAIN_CRN_FIX_BITS_5112 7
446#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
447#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
448#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
449#define AR5K_GAIN_CCK_PROBE_CORR 5
450#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
451#define AR5K_GAIN_STEP_COUNT 10
452
453/* Check if our current measurement is inside our
454 * current variable attenuation window */
455#define AR5K_GAIN_CHECK_ADJUST(_g) \
456 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
457
458struct ath5k_gain_opt_step {
459 s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
460 s8 gos_gain;
461};
462
463struct ath5k_gain_opt {
464 u8 go_default;
465 u8 go_steps_count;
466 const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
467};
468
469/*
470 * Parameters on gos_param:
471 * 1) Tx clip PHY register
472 * 2) PWD 90 RF register
473 * 3) PWD 84 RF register
474 * 4) RFGainSel RF register
475 */
476static const struct ath5k_gain_opt rfgain_opt_5111 = {
477 4,
478 9,
479 {
480 { { 4, 1, 1, 1 }, 6 },
481 { { 4, 0, 1, 1 }, 4 },
482 { { 3, 1, 1, 1 }, 3 },
483 { { 4, 0, 0, 1 }, 1 },
484 { { 4, 1, 1, 0 }, 0 },
485 { { 4, 0, 1, 0 }, -2 },
486 { { 3, 1, 1, 0 }, -3 },
487 { { 4, 0, 0, 0 }, -4 },
488 { { 2, 1, 1, 0 }, -6 }
489 }
490};
491
492/*
493 * Parameters on gos_param:
494 * 1) Mixgain ovr RF register
495 * 2) PWD 138 RF register
496 * 3) PWD 137 RF register
497 * 4) PWD 136 RF register
498 * 5) PWD 132 RF register
499 * 6) PWD 131 RF register
500 * 7) PWD 130 RF register
501 */
502static const struct ath5k_gain_opt rfgain_opt_5112 = {
503 1,
504 8,
505 {
506 { { 3, 0, 0, 0, 0, 0, 0 }, 6 },
507 { { 2, 0, 0, 0, 0, 0, 0 }, 0 },
508 { { 1, 0, 0, 0, 0, 0, 0 }, -3 },
509 { { 0, 0, 0, 0, 0, 0, 0 }, -6 },
510 { { 0, 1, 1, 0, 0, 0, 0 }, -8 },
511 { { 0, 1, 1, 0, 1, 1, 0 }, -10 },
512 { { 0, 1, 0, 1, 1, 1, 0 }, -13 },
513 { { 0, 1, 0, 1, 1, 0, 1 }, -16 },
514 }
515};
516
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
index 1209d14613a..00629587b79 100644
--- a/drivers/net/wireless/ath9k/Makefile
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -11,6 +11,8 @@ ath9k-y += hw.o \
11 xmit.o \ 11 xmit.o \
12 rc.o 12 rc.o
13 13
14ath9k-$(CONFIG_PCI) += pci.o
15ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
14ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o 16ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o
15 17
16obj-$(CONFIG_ATH9K) += ath9k.o 18obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath9k/ahb.c
new file mode 100644
index 00000000000..391c9fd3b64
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ahb.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/nl80211.h>
20#include <linux/platform_device.h>
21#include <linux/ath9k_platform.h>
22#include "ath9k.h"
23
24/* return bus cachesize in 4B word units */
25static void ath_ahb_read_cachesize(struct ath_softc *sc, int *csz)
26{
27 *csz = L1_CACHE_BYTES >> 2;
28}
29
30static void ath_ahb_cleanup(struct ath_softc *sc)
31{
32 iounmap(sc->mem);
33}
34
35static bool ath_ahb_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
36{
37 struct ath_softc *sc = ah->ah_sc;
38 struct platform_device *pdev = to_platform_device(sc->dev);
39 struct ath9k_platform_data *pdata;
40
41 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
42 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
43 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
44 "%s: flash read failed, offset %08x is out of range\n",
45 __func__, off);
46 return false;
47 }
48
49 *data = pdata->eeprom_data[off];
50 return true;
51}
52
53static struct ath_bus_ops ath_ahb_bus_ops = {
54 .read_cachesize = ath_ahb_read_cachesize,
55 .cleanup = ath_ahb_cleanup,
56
57 .eeprom_read = ath_ahb_eeprom_read,
58};
59
60static int ath_ahb_probe(struct platform_device *pdev)
61{
62 void __iomem *mem;
63 struct ath_softc *sc;
64 struct ieee80211_hw *hw;
65 struct resource *res;
66 int irq;
67 int ret = 0;
68 struct ath_hw *ah;
69
70 if (!pdev->dev.platform_data) {
71 dev_err(&pdev->dev, "no platform data specified\n");
72 ret = -EINVAL;
73 goto err_out;
74 }
75
76 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
77 if (res == NULL) {
78 dev_err(&pdev->dev, "no memory resource found\n");
79 ret = -ENXIO;
80 goto err_out;
81 }
82
83 mem = ioremap_nocache(res->start, res->end - res->start + 1);
84 if (mem == NULL) {
85 dev_err(&pdev->dev, "ioremap failed\n");
86 ret = -ENOMEM;
87 goto err_out;
88 }
89
90 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
91 if (res == NULL) {
92 dev_err(&pdev->dev, "no IRQ resource found\n");
93 ret = -ENXIO;
94 goto err_iounmap;
95 }
96
97 irq = res->start;
98
99 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
100 if (hw == NULL) {
101 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
102 ret = -ENOMEM;
103 goto err_iounmap;
104 }
105
106 SET_IEEE80211_DEV(hw, &pdev->dev);
107 platform_set_drvdata(pdev, hw);
108
109 sc = hw->priv;
110 sc->hw = hw;
111 sc->dev = &pdev->dev;
112 sc->mem = mem;
113 sc->bus_ops = &ath_ahb_bus_ops;
114 sc->irq = irq;
115
116 ret = ath_attach(AR5416_AR9100_DEVID, sc);
117 if (ret != 0) {
118 dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
119 ret = -ENODEV;
120 goto err_free_hw;
121 }
122
123 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
124 if (ret) {
125 dev_err(&pdev->dev, "request_irq failed, err=%d\n", ret);
126 ret = -EIO;
127 goto err_detach;
128 }
129
130 ah = sc->sc_ah;
131 printk(KERN_INFO
132 "%s: Atheros AR%s MAC/BB Rev:%x, "
133 "AR%s RF Rev:%x, mem=0x%lx, irq=%d\n",
134 wiphy_name(hw->wiphy),
135 ath_mac_bb_name(ah->hw_version.macVersion),
136 ah->hw_version.macRev,
137 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
138 ah->hw_version.phyRev,
139 (unsigned long)mem, irq);
140
141 return 0;
142
143 err_detach:
144 ath_detach(sc);
145 err_free_hw:
146 ieee80211_free_hw(hw);
147 platform_set_drvdata(pdev, NULL);
148 err_iounmap:
149 iounmap(mem);
150 err_out:
151 return ret;
152}
153
154static int ath_ahb_remove(struct platform_device *pdev)
155{
156 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
157
158 if (hw) {
159 struct ath_softc *sc = hw->priv;
160
161 ath_cleanup(sc);
162 platform_set_drvdata(pdev, NULL);
163 }
164
165 return 0;
166}
167
168static struct platform_driver ath_ahb_driver = {
169 .probe = ath_ahb_probe,
170 .remove = ath_ahb_remove,
171 .driver = {
172 .name = "ath9k",
173 .owner = THIS_MODULE,
174 },
175};
176
177int ath_ahb_init(void)
178{
179 return platform_driver_register(&ath_ahb_driver);
180}
181
182void ath_ahb_exit(void)
183{
184 platform_driver_unregister(&ath_ahb_driver);
185}
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c
index 251e2d9a7a4..d4df7e611df 100644
--- a/drivers/net/wireless/ath9k/ani.c
+++ b/drivers/net/wireless/ath9k/ani.c
@@ -14,23 +14,19 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21 18
22static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah, 19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
23 struct ath9k_channel *chan) 20 struct ath9k_channel *chan)
24{ 21{
25 struct ath_hal_5416 *ahp = AH5416(ah);
26 int i; 22 int i;
27 23
28 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) { 24 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
29 if (ahp->ah_ani[i].c.channel == chan->channel) 25 if (ah->ani[i].c &&
26 ah->ani[i].c->channel == chan->channel)
30 return i; 27 return i;
31 if (ahp->ah_ani[i].c.channel == 0) { 28 if (ah->ani[i].c == NULL) {
32 ahp->ah_ani[i].c.channel = chan->channel; 29 ah->ani[i].c = chan;
33 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
34 return i; 30 return i;
35 } 31 }
36 } 32 }
@@ -41,41 +37,40 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
41 return 0; 37 return 0;
42} 38}
43 39
44static bool ath9k_hw_ani_control(struct ath_hal *ah, 40static bool ath9k_hw_ani_control(struct ath_hw *ah,
45 enum ath9k_ani_cmd cmd, int param) 41 enum ath9k_ani_cmd cmd, int param)
46{ 42{
47 struct ath_hal_5416 *ahp = AH5416(ah); 43 struct ar5416AniState *aniState = ah->curani;
48 struct ar5416AniState *aniState = ahp->ah_curani;
49 44
50 switch (cmd & ahp->ah_ani_function) { 45 switch (cmd & ah->ani_function) {
51 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ 46 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
52 u32 level = param; 47 u32 level = param;
53 48
54 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) { 49 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
55 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 50 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
56 "level out of range (%u > %u)\n", 51 "level out of range (%u > %u)\n",
57 level, 52 level,
58 (unsigned)ARRAY_SIZE(ahp->ah_totalSizeDesired)); 53 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
59 return false; 54 return false;
60 } 55 }
61 56
62 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, 57 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
63 AR_PHY_DESIRED_SZ_TOT_DES, 58 AR_PHY_DESIRED_SZ_TOT_DES,
64 ahp->ah_totalSizeDesired[level]); 59 ah->totalSizeDesired[level]);
65 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, 60 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
66 AR_PHY_AGC_CTL1_COARSE_LOW, 61 AR_PHY_AGC_CTL1_COARSE_LOW,
67 ahp->ah_coarseLow[level]); 62 ah->coarse_low[level]);
68 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, 63 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
69 AR_PHY_AGC_CTL1_COARSE_HIGH, 64 AR_PHY_AGC_CTL1_COARSE_HIGH,
70 ahp->ah_coarseHigh[level]); 65 ah->coarse_high[level]);
71 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 66 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
72 AR_PHY_FIND_SIG_FIRPWR, 67 AR_PHY_FIND_SIG_FIRPWR,
73 ahp->ah_firpwr[level]); 68 ah->firpwr[level]);
74 69
75 if (level > aniState->noiseImmunityLevel) 70 if (level > aniState->noiseImmunityLevel)
76 ahp->ah_stats.ast_ani_niup++; 71 ah->stats.ast_ani_niup++;
77 else if (level < aniState->noiseImmunityLevel) 72 else if (level < aniState->noiseImmunityLevel)
78 ahp->ah_stats.ast_ani_nidown++; 73 ah->stats.ast_ani_nidown++;
79 aniState->noiseImmunityLevel = level; 74 aniState->noiseImmunityLevel = level;
80 break; 75 break;
81 } 76 }
@@ -129,9 +124,9 @@ static bool ath9k_hw_ani_control(struct ath_hal *ah,
129 124
130 if (!on != aniState->ofdmWeakSigDetectOff) { 125 if (!on != aniState->ofdmWeakSigDetectOff) {
131 if (on) 126 if (on)
132 ahp->ah_stats.ast_ani_ofdmon++; 127 ah->stats.ast_ani_ofdmon++;
133 else 128 else
134 ahp->ah_stats.ast_ani_ofdmoff++; 129 ah->stats.ast_ani_ofdmoff++;
135 aniState->ofdmWeakSigDetectOff = !on; 130 aniState->ofdmWeakSigDetectOff = !on;
136 } 131 }
137 break; 132 break;
@@ -145,9 +140,9 @@ static bool ath9k_hw_ani_control(struct ath_hal *ah,
145 weakSigThrCck[high]); 140 weakSigThrCck[high]);
146 if (high != aniState->cckWeakSigThreshold) { 141 if (high != aniState->cckWeakSigThreshold) {
147 if (high) 142 if (high)
148 ahp->ah_stats.ast_ani_cckhigh++; 143 ah->stats.ast_ani_cckhigh++;
149 else 144 else
150 ahp->ah_stats.ast_ani_ccklow++; 145 ah->stats.ast_ani_ccklow++;
151 aniState->cckWeakSigThreshold = high; 146 aniState->cckWeakSigThreshold = high;
152 } 147 }
153 break; 148 break;
@@ -167,9 +162,9 @@ static bool ath9k_hw_ani_control(struct ath_hal *ah,
167 AR_PHY_FIND_SIG_FIRSTEP, 162 AR_PHY_FIND_SIG_FIRSTEP,
168 firstep[level]); 163 firstep[level]);
169 if (level > aniState->firstepLevel) 164 if (level > aniState->firstepLevel)
170 ahp->ah_stats.ast_ani_stepup++; 165 ah->stats.ast_ani_stepup++;
171 else if (level < aniState->firstepLevel) 166 else if (level < aniState->firstepLevel)
172 ahp->ah_stats.ast_ani_stepdown++; 167 ah->stats.ast_ani_stepdown++;
173 aniState->firstepLevel = level; 168 aniState->firstepLevel = level;
174 break; 169 break;
175 } 170 }
@@ -190,9 +185,9 @@ static bool ath9k_hw_ani_control(struct ath_hal *ah,
190 AR_PHY_TIMING5_CYCPWR_THR1, 185 AR_PHY_TIMING5_CYCPWR_THR1,
191 cycpwrThr1[level]); 186 cycpwrThr1[level]);
192 if (level > aniState->spurImmunityLevel) 187 if (level > aniState->spurImmunityLevel)
193 ahp->ah_stats.ast_ani_spurup++; 188 ah->stats.ast_ani_spurup++;
194 else if (level < aniState->spurImmunityLevel) 189 else if (level < aniState->spurImmunityLevel)
195 ahp->ah_stats.ast_ani_spurdown++; 190 ah->stats.ast_ani_spurdown++;
196 aniState->spurImmunityLevel = level; 191 aniState->spurImmunityLevel = level;
197 break; 192 break;
198 } 193 }
@@ -223,7 +218,7 @@ static bool ath9k_hw_ani_control(struct ath_hal *ah,
223 return true; 218 return true;
224} 219}
225 220
226static void ath9k_hw_update_mibstats(struct ath_hal *ah, 221static void ath9k_hw_update_mibstats(struct ath_hw *ah,
227 struct ath9k_mib_stats *stats) 222 struct ath9k_mib_stats *stats)
228{ 223{
229 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); 224 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
@@ -233,18 +228,17 @@ static void ath9k_hw_update_mibstats(struct ath_hal *ah,
233 stats->beacons += REG_READ(ah, AR_BEACON_CNT); 228 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
234} 229}
235 230
236static void ath9k_ani_restart(struct ath_hal *ah) 231static void ath9k_ani_restart(struct ath_hw *ah)
237{ 232{
238 struct ath_hal_5416 *ahp = AH5416(ah);
239 struct ar5416AniState *aniState; 233 struct ar5416AniState *aniState;
240 234
241 if (!DO_ANI(ah)) 235 if (!DO_ANI(ah))
242 return; 236 return;
243 237
244 aniState = ahp->ah_curani; 238 aniState = ah->curani;
245 239
246 aniState->listenTime = 0; 240 aniState->listenTime = 0;
247 if (ahp->ah_hasHwPhyCounters) { 241 if (ah->has_hw_phycounters) {
248 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { 242 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
249 aniState->ofdmPhyErrBase = 0; 243 aniState->ofdmPhyErrBase = 0;
250 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 244 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
@@ -270,24 +264,22 @@ static void ath9k_ani_restart(struct ath_hal *ah)
270 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 264 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
271 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 265 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
272 266
273 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 267 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
274 } 268 }
275 aniState->ofdmPhyErrCount = 0; 269 aniState->ofdmPhyErrCount = 0;
276 aniState->cckPhyErrCount = 0; 270 aniState->cckPhyErrCount = 0;
277} 271}
278 272
279static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah) 273static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
280{ 274{
281 struct ath_hal_5416 *ahp = AH5416(ah); 275 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
282 struct ath9k_channel *chan = ah->ah_curchan;
283 struct ar5416AniState *aniState; 276 struct ar5416AniState *aniState;
284 enum wireless_mode mode;
285 int32_t rssi; 277 int32_t rssi;
286 278
287 if (!DO_ANI(ah)) 279 if (!DO_ANI(ah))
288 return; 280 return;
289 281
290 aniState = ahp->ah_curani; 282 aniState = ah->curani;
291 283
292 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { 284 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
293 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 285 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
@@ -303,14 +295,14 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
303 } 295 }
304 } 296 }
305 297
306 if (ah->ah_opmode == NL80211_IFTYPE_AP) { 298 if (ah->opmode == NL80211_IFTYPE_AP) {
307 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { 299 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
308 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 300 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
309 aniState->firstepLevel + 1); 301 aniState->firstepLevel + 1);
310 } 302 }
311 return; 303 return;
312 } 304 }
313 rssi = BEACON_RSSI(ahp); 305 rssi = BEACON_RSSI(ah);
314 if (rssi > aniState->rssiThrHigh) { 306 if (rssi > aniState->rssiThrHigh) {
315 if (!aniState->ofdmWeakSigDetectOff) { 307 if (!aniState->ofdmWeakSigDetectOff) {
316 if (ath9k_hw_ani_control(ah, 308 if (ath9k_hw_ani_control(ah,
@@ -336,8 +328,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
336 aniState->firstepLevel + 1); 328 aniState->firstepLevel + 1);
337 return; 329 return;
338 } else { 330 } else {
339 mode = ath9k_hw_chan2wmode(ah, chan); 331 if (conf->channel->band == IEEE80211_BAND_2GHZ) {
340 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
341 if (!aniState->ofdmWeakSigDetectOff) 332 if (!aniState->ofdmWeakSigDetectOff)
342 ath9k_hw_ani_control(ah, 333 ath9k_hw_ani_control(ah,
343 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 334 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
@@ -350,39 +341,36 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
350 } 341 }
351} 342}
352 343
353static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah) 344static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
354{ 345{
355 struct ath_hal_5416 *ahp = AH5416(ah); 346 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
356 struct ath9k_channel *chan = ah->ah_curchan;
357 struct ar5416AniState *aniState; 347 struct ar5416AniState *aniState;
358 enum wireless_mode mode;
359 int32_t rssi; 348 int32_t rssi;
360 349
361 if (!DO_ANI(ah)) 350 if (!DO_ANI(ah))
362 return; 351 return;
363 352
364 aniState = ahp->ah_curani; 353 aniState = ah->curani;
365 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { 354 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
366 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 355 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
367 aniState->noiseImmunityLevel + 1)) { 356 aniState->noiseImmunityLevel + 1)) {
368 return; 357 return;
369 } 358 }
370 } 359 }
371 if (ah->ah_opmode == NL80211_IFTYPE_AP) { 360 if (ah->opmode == NL80211_IFTYPE_AP) {
372 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { 361 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
373 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 362 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
374 aniState->firstepLevel + 1); 363 aniState->firstepLevel + 1);
375 } 364 }
376 return; 365 return;
377 } 366 }
378 rssi = BEACON_RSSI(ahp); 367 rssi = BEACON_RSSI(ah);
379 if (rssi > aniState->rssiThrLow) { 368 if (rssi > aniState->rssiThrLow) {
380 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) 369 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
381 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 370 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
382 aniState->firstepLevel + 1); 371 aniState->firstepLevel + 1);
383 } else { 372 } else {
384 mode = ath9k_hw_chan2wmode(ah, chan); 373 if (conf->channel->band == IEEE80211_BAND_2GHZ) {
385 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
386 if (aniState->firstepLevel > 0) 374 if (aniState->firstepLevel > 0)
387 ath9k_hw_ani_control(ah, 375 ath9k_hw_ani_control(ah,
388 ATH9K_ANI_FIRSTEP_LEVEL, 0); 376 ATH9K_ANI_FIRSTEP_LEVEL, 0);
@@ -390,22 +378,21 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
390 } 378 }
391} 379}
392 380
393static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah) 381static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
394{ 382{
395 struct ath_hal_5416 *ahp = AH5416(ah);
396 struct ar5416AniState *aniState; 383 struct ar5416AniState *aniState;
397 int32_t rssi; 384 int32_t rssi;
398 385
399 aniState = ahp->ah_curani; 386 aniState = ah->curani;
400 387
401 if (ah->ah_opmode == NL80211_IFTYPE_AP) { 388 if (ah->opmode == NL80211_IFTYPE_AP) {
402 if (aniState->firstepLevel > 0) { 389 if (aniState->firstepLevel > 0) {
403 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 390 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
404 aniState->firstepLevel - 1)) 391 aniState->firstepLevel - 1))
405 return; 392 return;
406 } 393 }
407 } else { 394 } else {
408 rssi = BEACON_RSSI(ahp); 395 rssi = BEACON_RSSI(ah);
409 if (rssi > aniState->rssiThrHigh) { 396 if (rssi > aniState->rssiThrHigh) {
410 /* XXX: Handle me */ 397 /* XXX: Handle me */
411 } else if (rssi > aniState->rssiThrLow) { 398 } else if (rssi > aniState->rssiThrLow) {
@@ -444,9 +431,8 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
444 } 431 }
445} 432}
446 433
447static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah) 434static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
448{ 435{
449 struct ath_hal_5416 *ahp = AH5416(ah);
450 struct ar5416AniState *aniState; 436 struct ar5416AniState *aniState;
451 u32 txFrameCount, rxFrameCount, cycleCount; 437 u32 txFrameCount, rxFrameCount, cycleCount;
452 int32_t listenTime; 438 int32_t listenTime;
@@ -455,11 +441,11 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
455 rxFrameCount = REG_READ(ah, AR_RFCNT); 441 rxFrameCount = REG_READ(ah, AR_RFCNT);
456 cycleCount = REG_READ(ah, AR_CCCNT); 442 cycleCount = REG_READ(ah, AR_CCCNT);
457 443
458 aniState = ahp->ah_curani; 444 aniState = ah->curani;
459 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) { 445 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
460 446
461 listenTime = 0; 447 listenTime = 0;
462 ahp->ah_stats.ast_ani_lzero++; 448 ah->stats.ast_ani_lzero++;
463 } else { 449 } else {
464 int32_t ccdelta = cycleCount - aniState->cycleCount; 450 int32_t ccdelta = cycleCount - aniState->cycleCount;
465 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount; 451 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
@@ -473,25 +459,24 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
473 return listenTime; 459 return listenTime;
474} 460}
475 461
476void ath9k_ani_reset(struct ath_hal *ah) 462void ath9k_ani_reset(struct ath_hw *ah)
477{ 463{
478 struct ath_hal_5416 *ahp = AH5416(ah);
479 struct ar5416AniState *aniState; 464 struct ar5416AniState *aniState;
480 struct ath9k_channel *chan = ah->ah_curchan; 465 struct ath9k_channel *chan = ah->curchan;
481 int index; 466 int index;
482 467
483 if (!DO_ANI(ah)) 468 if (!DO_ANI(ah))
484 return; 469 return;
485 470
486 index = ath9k_hw_get_ani_channel_idx(ah, chan); 471 index = ath9k_hw_get_ani_channel_idx(ah, chan);
487 aniState = &ahp->ah_ani[index]; 472 aniState = &ah->ani[index];
488 ahp->ah_curani = aniState; 473 ah->curani = aniState;
489 474
490 if (DO_ANI(ah) && ah->ah_opmode != NL80211_IFTYPE_STATION 475 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION
491 && ah->ah_opmode != NL80211_IFTYPE_ADHOC) { 476 && ah->opmode != NL80211_IFTYPE_ADHOC) {
492 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 477 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
493 "Reset ANI state opmode %u\n", ah->ah_opmode); 478 "Reset ANI state opmode %u\n", ah->opmode);
494 ahp->ah_stats.ast_ani_reset++; 479 ah->stats.ast_ani_reset++;
495 480
496 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); 481 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
497 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); 482 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
@@ -504,15 +489,15 @@ void ath9k_ani_reset(struct ath_hal *ah)
504 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) | 489 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
505 ATH9K_RX_FILTER_PHYERR); 490 ATH9K_RX_FILTER_PHYERR);
506 491
507 if (ah->ah_opmode == NL80211_IFTYPE_AP) { 492 if (ah->opmode == NL80211_IFTYPE_AP) {
508 ahp->ah_curani->ofdmTrigHigh = 493 ah->curani->ofdmTrigHigh =
509 ah->ah_config.ofdm_trig_high; 494 ah->config.ofdm_trig_high;
510 ahp->ah_curani->ofdmTrigLow = 495 ah->curani->ofdmTrigLow =
511 ah->ah_config.ofdm_trig_low; 496 ah->config.ofdm_trig_low;
512 ahp->ah_curani->cckTrigHigh = 497 ah->curani->cckTrigHigh =
513 ah->ah_config.cck_trig_high; 498 ah->config.cck_trig_high;
514 ahp->ah_curani->cckTrigLow = 499 ah->curani->cckTrigLow =
515 ah->ah_config.cck_trig_low; 500 ah->config.cck_trig_low;
516 } 501 }
517 ath9k_ani_restart(ah); 502 ath9k_ani_restart(ah);
518 return; 503 return;
@@ -533,7 +518,7 @@ void ath9k_ani_reset(struct ath_hal *ah)
533 if (aniState->firstepLevel != 0) 518 if (aniState->firstepLevel != 0)
534 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 519 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
535 aniState->firstepLevel); 520 aniState->firstepLevel);
536 if (ahp->ah_hasHwPhyCounters) { 521 if (ah->has_hw_phycounters) {
537 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & 522 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
538 ~ATH9K_RX_FILTER_PHYERR); 523 ~ATH9K_RX_FILTER_PHYERR);
539 ath9k_ani_restart(ah); 524 ath9k_ani_restart(ah);
@@ -547,31 +532,33 @@ void ath9k_ani_reset(struct ath_hal *ah)
547 } 532 }
548} 533}
549 534
550void ath9k_hw_ani_monitor(struct ath_hal *ah, 535void ath9k_hw_ani_monitor(struct ath_hw *ah,
551 const struct ath9k_node_stats *stats, 536 const struct ath9k_node_stats *stats,
552 struct ath9k_channel *chan) 537 struct ath9k_channel *chan)
553{ 538{
554 struct ath_hal_5416 *ahp = AH5416(ah);
555 struct ar5416AniState *aniState; 539 struct ar5416AniState *aniState;
556 int32_t listenTime; 540 int32_t listenTime;
557 541
558 aniState = ahp->ah_curani; 542 if (!DO_ANI(ah))
559 ahp->ah_stats.ast_nodestats = *stats; 543 return;
544
545 aniState = ah->curani;
546 ah->stats.ast_nodestats = *stats;
560 547
561 listenTime = ath9k_hw_ani_get_listen_time(ah); 548 listenTime = ath9k_hw_ani_get_listen_time(ah);
562 if (listenTime < 0) { 549 if (listenTime < 0) {
563 ahp->ah_stats.ast_ani_lneg++; 550 ah->stats.ast_ani_lneg++;
564 ath9k_ani_restart(ah); 551 ath9k_ani_restart(ah);
565 return; 552 return;
566 } 553 }
567 554
568 aniState->listenTime += listenTime; 555 aniState->listenTime += listenTime;
569 556
570 if (ahp->ah_hasHwPhyCounters) { 557 if (ah->has_hw_phycounters) {
571 u32 phyCnt1, phyCnt2; 558 u32 phyCnt1, phyCnt2;
572 u32 ofdmPhyErrCnt, cckPhyErrCnt; 559 u32 ofdmPhyErrCnt, cckPhyErrCnt;
573 560
574 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 561 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
575 562
576 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 563 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
577 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 564 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
@@ -604,27 +591,24 @@ void ath9k_hw_ani_monitor(struct ath_hal *ah,
604 } 591 }
605 592
606 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; 593 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
607 ahp->ah_stats.ast_ani_ofdmerrs += 594 ah->stats.ast_ani_ofdmerrs +=
608 ofdmPhyErrCnt - aniState->ofdmPhyErrCount; 595 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
609 aniState->ofdmPhyErrCount = ofdmPhyErrCnt; 596 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
610 597
611 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; 598 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
612 ahp->ah_stats.ast_ani_cckerrs += 599 ah->stats.ast_ani_cckerrs +=
613 cckPhyErrCnt - aniState->cckPhyErrCount; 600 cckPhyErrCnt - aniState->cckPhyErrCount;
614 aniState->cckPhyErrCount = cckPhyErrCnt; 601 aniState->cckPhyErrCount = cckPhyErrCnt;
615 } 602 }
616 603
617 if (!DO_ANI(ah)) 604 if (aniState->listenTime > 5 * ah->aniperiod) {
618 return;
619
620 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
621 if (aniState->ofdmPhyErrCount <= aniState->listenTime * 605 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
622 aniState->ofdmTrigLow / 1000 && 606 aniState->ofdmTrigLow / 1000 &&
623 aniState->cckPhyErrCount <= aniState->listenTime * 607 aniState->cckPhyErrCount <= aniState->listenTime *
624 aniState->cckTrigLow / 1000) 608 aniState->cckTrigLow / 1000)
625 ath9k_hw_ani_lower_immunity(ah); 609 ath9k_hw_ani_lower_immunity(ah);
626 ath9k_ani_restart(ah); 610 ath9k_ani_restart(ah);
627 } else if (aniState->listenTime > ahp->ah_aniPeriod) { 611 } else if (aniState->listenTime > ah->aniperiod) {
628 if (aniState->ofdmPhyErrCount > aniState->listenTime * 612 if (aniState->ofdmPhyErrCount > aniState->listenTime *
629 aniState->ofdmTrigHigh / 1000) { 613 aniState->ofdmTrigHigh / 1000) {
630 ath9k_hw_ani_ofdm_err_trigger(ah); 614 ath9k_hw_ani_ofdm_err_trigger(ah);
@@ -638,20 +622,16 @@ void ath9k_hw_ani_monitor(struct ath_hal *ah,
638 } 622 }
639} 623}
640 624
641bool ath9k_hw_phycounters(struct ath_hal *ah) 625bool ath9k_hw_phycounters(struct ath_hw *ah)
642{ 626{
643 struct ath_hal_5416 *ahp = AH5416(ah); 627 return ah->has_hw_phycounters ? true : false;
644
645 return ahp->ah_hasHwPhyCounters ? true : false;
646} 628}
647 629
648void ath9k_enable_mib_counters(struct ath_hal *ah) 630void ath9k_enable_mib_counters(struct ath_hw *ah)
649{ 631{
650 struct ath_hal_5416 *ahp = AH5416(ah);
651
652 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n"); 632 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n");
653 633
654 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 634 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
655 635
656 REG_WRITE(ah, AR_FILT_OFDM, 0); 636 REG_WRITE(ah, AR_FILT_OFDM, 0);
657 REG_WRITE(ah, AR_FILT_CCK, 0); 637 REG_WRITE(ah, AR_FILT_CCK, 0);
@@ -662,21 +642,19 @@ void ath9k_enable_mib_counters(struct ath_hal *ah)
662 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 642 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
663} 643}
664 644
665void ath9k_hw_disable_mib_counters(struct ath_hal *ah) 645void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
666{ 646{
667 struct ath_hal_5416 *ahp = AH5416(ah);
668
669 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n"); 647 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n");
670 648
671 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC); 649 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
672 650
673 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 651 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
674 652
675 REG_WRITE(ah, AR_FILT_OFDM, 0); 653 REG_WRITE(ah, AR_FILT_OFDM, 0);
676 REG_WRITE(ah, AR_FILT_CCK, 0); 654 REG_WRITE(ah, AR_FILT_CCK, 0);
677} 655}
678 656
679u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah, 657u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
680 u32 *rxc_pcnt, 658 u32 *rxc_pcnt,
681 u32 *rxf_pcnt, 659 u32 *rxf_pcnt,
682 u32 *txf_pcnt) 660 u32 *txf_pcnt)
@@ -721,10 +699,9 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
721 * any of the MIB counters overflow/trigger so don't assume we're 699 * any of the MIB counters overflow/trigger so don't assume we're
722 * here because a PHY error counter triggered. 700 * here because a PHY error counter triggered.
723 */ 701 */
724void ath9k_hw_procmibevent(struct ath_hal *ah, 702void ath9k_hw_procmibevent(struct ath_hw *ah,
725 const struct ath9k_node_stats *stats) 703 const struct ath9k_node_stats *stats)
726{ 704{
727 struct ath_hal_5416 *ahp = AH5416(ah);
728 u32 phyCnt1, phyCnt2; 705 u32 phyCnt1, phyCnt2;
729 706
730 /* Reset these counters regardless */ 707 /* Reset these counters regardless */
@@ -734,8 +711,8 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
734 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR); 711 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
735 712
736 /* Clear the mib counters and save them in the stats */ 713 /* Clear the mib counters and save them in the stats */
737 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 714 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
738 ahp->ah_stats.ast_nodestats = *stats; 715 ah->stats.ast_nodestats = *stats;
739 716
740 if (!DO_ANI(ah)) 717 if (!DO_ANI(ah))
741 return; 718 return;
@@ -745,17 +722,17 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
745 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 722 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
746 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || 723 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
747 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) { 724 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
748 struct ar5416AniState *aniState = ahp->ah_curani; 725 struct ar5416AniState *aniState = ah->curani;
749 u32 ofdmPhyErrCnt, cckPhyErrCnt; 726 u32 ofdmPhyErrCnt, cckPhyErrCnt;
750 727
751 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */ 728 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
752 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; 729 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
753 ahp->ah_stats.ast_ani_ofdmerrs += 730 ah->stats.ast_ani_ofdmerrs +=
754 ofdmPhyErrCnt - aniState->ofdmPhyErrCount; 731 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
755 aniState->ofdmPhyErrCount = ofdmPhyErrCnt; 732 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
756 733
757 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; 734 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
758 ahp->ah_stats.ast_ani_cckerrs += 735 ah->stats.ast_ani_cckerrs +=
759 cckPhyErrCnt - aniState->cckPhyErrCount; 736 cckPhyErrCnt - aniState->cckPhyErrCount;
760 aniState->cckPhyErrCount = cckPhyErrCnt; 737 aniState->cckPhyErrCount = cckPhyErrCnt;
761 738
@@ -774,9 +751,8 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
774 } 751 }
775} 752}
776 753
777void ath9k_hw_ani_setup(struct ath_hal *ah) 754void ath9k_hw_ani_setup(struct ath_hw *ah)
778{ 755{
779 struct ath_hal_5416 *ahp = AH5416(ah);
780 int i; 756 int i;
781 757
782 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; 758 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
@@ -785,66 +761,63 @@ void ath9k_hw_ani_setup(struct ath_hal *ah)
785 const int firpwr[] = { -78, -78, -78, -78, -80 }; 761 const int firpwr[] = { -78, -78, -78, -78, -80 };
786 762
787 for (i = 0; i < 5; i++) { 763 for (i = 0; i < 5; i++) {
788 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i]; 764 ah->totalSizeDesired[i] = totalSizeDesired[i];
789 ahp->ah_coarseHigh[i] = coarseHigh[i]; 765 ah->coarse_high[i] = coarseHigh[i];
790 ahp->ah_coarseLow[i] = coarseLow[i]; 766 ah->coarse_low[i] = coarseLow[i];
791 ahp->ah_firpwr[i] = firpwr[i]; 767 ah->firpwr[i] = firpwr[i];
792 } 768 }
793} 769}
794 770
795void ath9k_hw_ani_attach(struct ath_hal *ah) 771void ath9k_hw_ani_attach(struct ath_hw *ah)
796{ 772{
797 struct ath_hal_5416 *ahp = AH5416(ah);
798 int i; 773 int i;
799 774
800 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Attach ANI\n"); 775 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Attach ANI\n");
801 776
802 ahp->ah_hasHwPhyCounters = 1; 777 ah->has_hw_phycounters = 1;
803 778
804 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani)); 779 memset(ah->ani, 0, sizeof(ah->ani));
805 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) { 780 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
806 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH; 781 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
807 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW; 782 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
808 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH; 783 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
809 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW; 784 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
810 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; 785 ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
811 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; 786 ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
812 ahp->ah_ani[i].ofdmWeakSigDetectOff = 787 ah->ani[i].ofdmWeakSigDetectOff =
813 !ATH9K_ANI_USE_OFDM_WEAK_SIG; 788 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
814 ahp->ah_ani[i].cckWeakSigThreshold = 789 ah->ani[i].cckWeakSigThreshold =
815 ATH9K_ANI_CCK_WEAK_SIG_THR; 790 ATH9K_ANI_CCK_WEAK_SIG_THR;
816 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 791 ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
817 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 792 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
818 if (ahp->ah_hasHwPhyCounters) { 793 if (ah->has_hw_phycounters) {
819 ahp->ah_ani[i].ofdmPhyErrBase = 794 ah->ani[i].ofdmPhyErrBase =
820 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH; 795 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
821 ahp->ah_ani[i].cckPhyErrBase = 796 ah->ani[i].cckPhyErrBase =
822 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; 797 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
823 } 798 }
824 } 799 }
825 if (ahp->ah_hasHwPhyCounters) { 800 if (ah->has_hw_phycounters) {
826 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 801 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
827 "Setting OfdmErrBase = 0x%08x\n", 802 "Setting OfdmErrBase = 0x%08x\n",
828 ahp->ah_ani[0].ofdmPhyErrBase); 803 ah->ani[0].ofdmPhyErrBase);
829 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", 804 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
830 ahp->ah_ani[0].cckPhyErrBase); 805 ah->ani[0].cckPhyErrBase);
831 806
832 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase); 807 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
833 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase); 808 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
834 ath9k_enable_mib_counters(ah); 809 ath9k_enable_mib_counters(ah);
835 } 810 }
836 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD; 811 ah->aniperiod = ATH9K_ANI_PERIOD;
837 if (ah->ah_config.enable_ani) 812 if (ah->config.enable_ani)
838 ahp->ah_procPhyErr |= HAL_PROCESS_ANI; 813 ah->proc_phyerr |= HAL_PROCESS_ANI;
839} 814}
840 815
841void ath9k_hw_ani_detach(struct ath_hal *ah) 816void ath9k_hw_ani_detach(struct ath_hw *ah)
842{ 817{
843 struct ath_hal_5416 *ahp = AH5416(ah);
844
845 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detach ANI\n"); 818 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detach ANI\n");
846 819
847 if (ahp->ah_hasHwPhyCounters) { 820 if (ah->has_hw_phycounters) {
848 ath9k_hw_disable_mib_counters(ah); 821 ath9k_hw_disable_mib_counters(ah);
849 REG_WRITE(ah, AR_PHY_ERR_1, 0); 822 REG_WRITE(ah, AR_PHY_ERR_1, 0);
850 REG_WRITE(ah, AR_PHY_ERR_2, 0); 823 REG_WRITE(ah, AR_PHY_ERR_2, 0);
diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath9k/ani.h
new file mode 100644
index 00000000000..7315761f6d7
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ani.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ANI_H
18#define ANI_H
19
20#define HAL_PROCESS_ANI 0x00000001
21#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
22
23#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI))
24
25#define HAL_EP_RND(x, mul) \
26 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
27#define BEACON_RSSI(ahp) \
28 HAL_EP_RND(ahp->stats.ast_nodestats.ns_avgbrssi, \
29 ATH9K_RSSI_EP_MULTIPLIER)
30
31#define ATH9K_ANI_OFDM_TRIG_HIGH 500
32#define ATH9K_ANI_OFDM_TRIG_LOW 200
33#define ATH9K_ANI_CCK_TRIG_HIGH 200
34#define ATH9K_ANI_CCK_TRIG_LOW 100
35#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
36#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
37#define ATH9K_ANI_CCK_WEAK_SIG_THR false
38#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
39#define ATH9K_ANI_FIRSTEP_LVL 0
40#define ATH9K_ANI_RSSI_THR_HIGH 40
41#define ATH9K_ANI_RSSI_THR_LOW 7
42#define ATH9K_ANI_PERIOD 100
43
44#define HAL_NOISE_IMMUNE_MAX 4
45#define HAL_SPUR_IMMUNE_MAX 7
46#define HAL_FIRST_STEP_MAX 2
47
48enum ath9k_ani_cmd {
49 ATH9K_ANI_PRESENT = 0x1,
50 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
51 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
52 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
53 ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
54 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
55 ATH9K_ANI_MODE = 0x40,
56 ATH9K_ANI_PHYERR_RESET = 0x80,
57 ATH9K_ANI_ALL = 0xff
58};
59
60struct ath9k_mib_stats {
61 u32 ackrcv_bad;
62 u32 rts_bad;
63 u32 rts_good;
64 u32 fcs_bad;
65 u32 beacons;
66};
67
68struct ath9k_node_stats {
69 u32 ns_avgbrssi;
70 u32 ns_avgrssi;
71 u32 ns_avgtxrssi;
72 u32 ns_avgtxrate;
73};
74
75struct ar5416AniState {
76 struct ath9k_channel *c;
77 u8 noiseImmunityLevel;
78 u8 spurImmunityLevel;
79 u8 firstepLevel;
80 u8 ofdmWeakSigDetectOff;
81 u8 cckWeakSigThreshold;
82 u32 listenTime;
83 u32 ofdmTrigHigh;
84 u32 ofdmTrigLow;
85 int32_t cckTrigHigh;
86 int32_t cckTrigLow;
87 int32_t rssiThrLow;
88 int32_t rssiThrHigh;
89 u32 noiseFloor;
90 u32 txFrameCount;
91 u32 rxFrameCount;
92 u32 cycleCount;
93 u32 ofdmPhyErrCount;
94 u32 cckPhyErrCount;
95 u32 ofdmPhyErrBase;
96 u32 cckPhyErrBase;
97 int16_t pktRssi[2];
98 int16_t ofdmErrRssi[2];
99 int16_t cckErrRssi[2];
100};
101
102struct ar5416Stats {
103 u32 ast_ani_niup;
104 u32 ast_ani_nidown;
105 u32 ast_ani_spurup;
106 u32 ast_ani_spurdown;
107 u32 ast_ani_ofdmon;
108 u32 ast_ani_ofdmoff;
109 u32 ast_ani_cckhigh;
110 u32 ast_ani_ccklow;
111 u32 ast_ani_stepup;
112 u32 ast_ani_stepdown;
113 u32 ast_ani_ofdmerrs;
114 u32 ast_ani_cckerrs;
115 u32 ast_ani_reset;
116 u32 ast_ani_lzero;
117 u32 ast_ani_lneg;
118 struct ath9k_mib_stats ast_mibstats;
119 struct ath9k_node_stats ast_nodestats;
120};
121#define ah_mibStats stats.ast_mibstats
122
123void ath9k_ani_reset(struct ath_hw *ah);
124void ath9k_hw_ani_monitor(struct ath_hw *ah,
125 const struct ath9k_node_stats *stats,
126 struct ath9k_channel *chan);
127bool ath9k_hw_phycounters(struct ath_hw *ah);
128void ath9k_enable_mib_counters(struct ath_hw *ah);
129void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
130u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
131 u32 *rxf_pcnt, u32 *txf_pcnt);
132void ath9k_hw_procmibevent(struct ath_hw *ah,
133 const struct ath9k_node_stats *stats);
134void ath9k_hw_ani_setup(struct ath_hw *ah);
135void ath9k_hw_ani_attach(struct ath_hw *ah);
136void ath9k_hw_ani_detach(struct ath_hw *ah);
137
138#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index d2781350295..0b0f82c83ff 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -17,1041 +17,670 @@
17#ifndef ATH9K_H 17#ifndef ATH9K_H
18#define ATH9K_H 18#define ATH9K_H
19 19
20#include <linux/io.h> 20#include <linux/etherdevice.h>
21 21#include <linux/device.h>
22#define ATHEROS_VENDOR_ID 0x168c 22#include <net/mac80211.h>
23 23#include <linux/leds.h>
24#define AR5416_DEVID_PCI 0x0023 24#include <linux/rfkill.h>
25#define AR5416_DEVID_PCIE 0x0024 25
26#define AR9160_DEVID_PCI 0x0027 26#include "hw.h"
27#define AR9280_DEVID_PCI 0x0029 27#include "rc.h"
28#define AR9280_DEVID_PCIE 0x002a 28#include "debug.h"
29#define AR9285_DEVID_PCIE 0x002b 29
30 30struct ath_node;
31#define AR5416_AR9100_DEVID 0x000b 31
32 32/* Macro to expand scalars to 64-bit objects */
33#define AR_SUBVENDOR_ID_NOG 0x0e11 33
34#define AR_SUBVENDOR_ID_NEW_A 0x7065 34#define ito64(x) (sizeof(x) == 8) ? \
35 35 (((unsigned long long int)(x)) & (0xff)) : \
36#define ATH9K_TXERR_XRETRY 0x01 36 (sizeof(x) == 16) ? \
37#define ATH9K_TXERR_FILT 0x02 37 (((unsigned long long int)(x)) & 0xffff) : \
38#define ATH9K_TXERR_FIFO 0x04 38 ((sizeof(x) == 32) ? \
39#define ATH9K_TXERR_XTXOP 0x08 39 (((unsigned long long int)(x)) & 0xffffffff) : \
40#define ATH9K_TXERR_TIMER_EXPIRED 0x10 40 (unsigned long long int)(x))
41 41
42#define ATH9K_TX_BA 0x01 42/* increment with wrap-around */
43#define ATH9K_TX_PWRMGMT 0x02 43#define INCR(_l, _sz) do { \
44#define ATH9K_TX_DESC_CFG_ERR 0x04 44 (_l)++; \
45#define ATH9K_TX_DATA_UNDERRUN 0x08 45 (_l) &= ((_sz) - 1); \
46#define ATH9K_TX_DELIM_UNDERRUN 0x10 46 } while (0)
47#define ATH9K_TX_SW_ABORTED 0x40 47
48#define ATH9K_TX_SW_FILTERED 0x80 48/* decrement with wrap-around */
49 49#define DECR(_l, _sz) do { \
50#define NBBY 8 50 (_l)--; \
51 51 (_l) &= ((_sz) - 1); \
52struct ath_tx_status { 52 } while (0)
53 u32 ts_tstamp; 53
54 u16 ts_seqnum; 54#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
55 u8 ts_status; 55
56 u8 ts_ratecode; 56#define ASSERT(exp) do { \
57 u8 ts_rateindex; 57 if (unlikely(!(exp))) { \
58 int8_t ts_rssi; 58 BUG(); \
59 u8 ts_shortretry; 59 } \
60 u8 ts_longretry; 60 } while (0)
61 u8 ts_virtcol; 61
62 u8 ts_antenna; 62#define TSF_TO_TU(_h,_l) \
63 u8 ts_flags; 63 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
64 int8_t ts_rssi_ctl0; 64
65 int8_t ts_rssi_ctl1; 65#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
66 int8_t ts_rssi_ctl2; 66
67 int8_t ts_rssi_ext0; 67static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
68 int8_t ts_rssi_ext1; 68
69 int8_t ts_rssi_ext2; 69struct ath_config {
70 u8 pad[3]; 70 u32 ath_aggr_prot;
71 u32 ba_low; 71 u16 txpowlimit;
72 u32 ba_high; 72 u8 cabqReadytime;
73 u32 evm0; 73 u8 swBeaconProcess;
74 u32 evm1; 74};
75 u32 evm2; 75
76}; 76/*************************/
77 77/* Descriptor Management */
78struct ath_rx_status { 78/*************************/
79 u32 rs_tstamp; 79
80 u16 rs_datalen; 80#define ATH_TXBUF_RESET(_bf) do { \
81 u8 rs_status; 81 (_bf)->bf_status = 0; \
82 u8 rs_phyerr; 82 (_bf)->bf_lastbf = NULL; \
83 int8_t rs_rssi; 83 (_bf)->bf_next = NULL; \
84 u8 rs_keyix; 84 memset(&((_bf)->bf_state), 0, \
85 u8 rs_rate; 85 sizeof(struct ath_buf_state)); \
86 u8 rs_antenna; 86 } while (0)
87 u8 rs_more; 87
88 int8_t rs_rssi_ctl0; 88/**
89 int8_t rs_rssi_ctl1; 89 * enum buffer_type - Buffer type flags
90 int8_t rs_rssi_ctl2; 90 *
91 int8_t rs_rssi_ext0; 91 * @BUF_HT: Send this buffer using HT capabilities
92 int8_t rs_rssi_ext1; 92 * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
93 int8_t rs_rssi_ext2; 93 * @BUF_AGGR: Indicates whether the buffer can be aggregated
94 u8 rs_isaggr; 94 * (used in aggregation scheduling)
95 u8 rs_moreaggr; 95 * @BUF_RETRY: Indicates whether the buffer is retried
96 u8 rs_num_delims; 96 * @BUF_XRETRY: To denote excessive retries of the buffer
97 u8 rs_flags; 97 */
98 u32 evm0; 98enum buffer_type {
99 u32 evm1; 99 BUF_HT = BIT(1),
100 u32 evm2; 100 BUF_AMPDU = BIT(2),
101}; 101 BUF_AGGR = BIT(3),
102 102 BUF_RETRY = BIT(4),
103#define ATH9K_RXERR_CRC 0x01 103 BUF_XRETRY = BIT(5),
104#define ATH9K_RXERR_PHY 0x02 104};
105#define ATH9K_RXERR_FIFO 0x04 105
106#define ATH9K_RXERR_DECRYPT 0x08 106struct ath_buf_state {
107#define ATH9K_RXERR_MIC 0x10 107 int bfs_nframes;
108 108 u16 bfs_al;
109#define ATH9K_RX_MORE 0x01 109 u16 bfs_frmlen;
110#define ATH9K_RX_MORE_AGGR 0x02 110 int bfs_seqno;
111#define ATH9K_RX_GI 0x04 111 int bfs_tidno;
112#define ATH9K_RX_2040 0x08 112 int bfs_retries;
113#define ATH9K_RX_DELIM_CRC_PRE 0x10 113 u32 bf_type;
114#define ATH9K_RX_DELIM_CRC_POST 0x20 114 u32 bfs_keyix;
115#define ATH9K_RX_DECRYPT_BUSY 0x40 115 enum ath9k_key_type bfs_keytype;
116 116};
117#define ATH9K_RXKEYIX_INVALID ((u8)-1) 117
118#define ATH9K_TXKEYIX_INVALID ((u32)-1) 118#define bf_nframes bf_state.bfs_nframes
119 119#define bf_al bf_state.bfs_al
120struct ath_desc { 120#define bf_frmlen bf_state.bfs_frmlen
121 u32 ds_link; 121#define bf_retries bf_state.bfs_retries
122 u32 ds_data; 122#define bf_seqno bf_state.bfs_seqno
123 u32 ds_ctl0; 123#define bf_tidno bf_state.bfs_tidno
124 u32 ds_ctl1; 124#define bf_keyix bf_state.bfs_keyix
125 u32 ds_hw[20]; 125#define bf_keytype bf_state.bfs_keytype
126 union { 126#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
127 struct ath_tx_status tx; 127#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
128 struct ath_rx_status rx; 128#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
129 void *stats; 129#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
130 } ds_us; 130#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
131 void *ds_vdata; 131
132} __packed; 132struct ath_buf {
133 133 struct list_head list;
134#define ds_txstat ds_us.tx 134 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
135#define ds_rxstat ds_us.rx 135 an aggregate) */
136#define ds_stat ds_us.stats 136 struct ath_buf *bf_next; /* next subframe in the aggregate */
137 137 void *bf_mpdu; /* enclosing frame structure */
138#define ATH9K_TXDESC_CLRDMASK 0x0001 138 struct ath_desc *bf_desc; /* virtual addr of desc */
139#define ATH9K_TXDESC_NOACK 0x0002 139 dma_addr_t bf_daddr; /* physical addr of desc */
140#define ATH9K_TXDESC_RTSENA 0x0004 140 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
141#define ATH9K_TXDESC_CTSENA 0x0008 141 u32 bf_status;
142/* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for 142 u16 bf_flags;
143 * the descriptor its marked on. We take a tx interrupt to reap 143 struct ath_buf_state bf_state;
144 * descriptors when the h/w hits an EOL condition or 144 dma_addr_t bf_dmacontext;
145 * when the descriptor is specifically marked to generate 145};
146 * an interrupt with this flag. Descriptors should be 146
147 * marked periodically to insure timely replenishing of the 147#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
148 * supply needed for sending frames. Defering interrupts 148#define ATH_BUFSTATUS_STALE 0x00000002
149 * reduces system load and potentially allows more concurrent 149
150 * work to be done but if done to aggressively can cause 150struct ath_descdma {
151 * senders to backup. When the hardware queue is left too 151 const char *dd_name;
152 * large rate control information may also be too out of 152 struct ath_desc *dd_desc;
153 * date. An Alternative for this is TX interrupt mitigation 153 dma_addr_t dd_desc_paddr;
154 * but this needs more testing. */ 154 u32 dd_desc_len;
155#define ATH9K_TXDESC_INTREQ 0x0010 155 struct ath_buf *dd_bufptr;
156#define ATH9K_TXDESC_VEOL 0x0020 156 dma_addr_t dd_dmacontext;
157#define ATH9K_TXDESC_EXT_ONLY 0x0040 157};
158#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 158
159#define ATH9K_TXDESC_VMF 0x0100 159int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
160#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 160 struct list_head *head, const char *name,
161#define ATH9K_TXDESC_CAB 0x0400 161 int nbuf, int ndesc);
162 162void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
163#define ATH9K_RXDESC_INTREQ 0x0020 163 struct list_head *head);
164 164
165enum wireless_mode { 165/***********/
166 ATH9K_MODE_11A = 0, 166/* RX / TX */
167 ATH9K_MODE_11B = 2, 167/***********/
168 ATH9K_MODE_11G = 3, 168
169 ATH9K_MODE_11NA_HT20 = 6, 169#define ATH_MAX_ANTENNA 3
170 ATH9K_MODE_11NG_HT20 = 7, 170#define ATH_RXBUF 512
171 ATH9K_MODE_11NA_HT40PLUS = 8, 171#define WME_NUM_TID 16
172 ATH9K_MODE_11NA_HT40MINUS = 9, 172#define ATH_TXBUF 512
173 ATH9K_MODE_11NG_HT40PLUS = 10, 173#define ATH_TXMAXTRY 13
174 ATH9K_MODE_11NG_HT40MINUS = 11, 174#define ATH_11N_TXMAXTRY 10
175 ATH9K_MODE_MAX 175#define ATH_MGT_TXMAXTRY 4
176}; 176#define WME_BA_BMP_SIZE 64
177 177#define WME_MAX_BA WME_BA_BMP_SIZE
178enum ath9k_hw_caps { 178#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
179 ATH9K_HW_CAP_CHAN_SPREAD = BIT(0), 179
180 ATH9K_HW_CAP_MIC_AESCCM = BIT(1), 180#define TID_TO_WME_AC(_tid) \
181 ATH9K_HW_CAP_MIC_CKIP = BIT(2), 181 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
182 ATH9K_HW_CAP_MIC_TKIP = BIT(3), 182 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
183 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4), 183 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
184 ATH9K_HW_CAP_CIPHER_CKIP = BIT(5), 184 WME_AC_VO)
185 ATH9K_HW_CAP_CIPHER_TKIP = BIT(6), 185
186 ATH9K_HW_CAP_VEOL = BIT(7), 186#define WME_AC_BE 0
187 ATH9K_HW_CAP_BSSIDMASK = BIT(8), 187#define WME_AC_BK 1
188 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9), 188#define WME_AC_VI 2
189 ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10), 189#define WME_AC_VO 3
190 ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11), 190#define WME_NUM_AC 4
191 ATH9K_HW_CAP_HT = BIT(12), 191
192 ATH9K_HW_CAP_GTT = BIT(13), 192#define ADDBA_EXCHANGE_ATTEMPTS 10
193 ATH9K_HW_CAP_FASTCC = BIT(14), 193#define ATH_AGGR_DELIM_SZ 4
194 ATH9K_HW_CAP_RFSILENT = BIT(15), 194#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
195 ATH9K_HW_CAP_WOW = BIT(16), 195/* number of delimiters for encryption padding */
196 ATH9K_HW_CAP_CST = BIT(17), 196#define ATH_AGGR_ENCRYPTDELIM 10
197 ATH9K_HW_CAP_ENHANCEDPM = BIT(18), 197/* minimum h/w qdepth to be sustained to maximize aggregation */
198 ATH9K_HW_CAP_AUTOSLEEP = BIT(19), 198#define ATH_AGGR_MIN_QDEPTH 2
199 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20), 199#define ATH_AMPDU_SUBFRAME_DEFAULT 32
200 ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21), 200#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
201}; 201#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
202 202
203enum ath9k_capability_type { 203#define IEEE80211_SEQ_SEQ_SHIFT 4
204 ATH9K_CAP_CIPHER = 0, 204#define IEEE80211_SEQ_MAX 4096
205 ATH9K_CAP_TKIP_MIC, 205#define IEEE80211_MIN_AMPDU_BUF 0x8
206 ATH9K_CAP_TKIP_SPLIT, 206#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
207 ATH9K_CAP_PHYCOUNTERS, 207#define IEEE80211_WEP_IVLEN 3
208 ATH9K_CAP_DIVERSITY, 208#define IEEE80211_WEP_KIDLEN 1
209 ATH9K_CAP_TXPOW, 209#define IEEE80211_WEP_CRCLEN 4
210 ATH9K_CAP_PHYDIAG, 210#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
211 ATH9K_CAP_MCAST_KEYSRCH, 211 (IEEE80211_WEP_IVLEN + \
212 ATH9K_CAP_TSF_ADJUST, 212 IEEE80211_WEP_KIDLEN + \
213 ATH9K_CAP_WME_TKIPMIC, 213 IEEE80211_WEP_CRCLEN))
214 ATH9K_CAP_RFSILENT, 214
215 ATH9K_CAP_ANT_CFG_2GHZ, 215/* return whether a bit at index _n in bitmap _bm is set
216 ATH9K_CAP_ANT_CFG_5GHZ 216 * _sz is the size of the bitmap */
217}; 217#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
218 218 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
219struct ath9k_hw_capabilities { 219
220 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ 220/* return block-ack bitmap index given sequence and starting sequence */
221 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */ 221#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
222 u16 total_queues; 222
223 u16 keycache_size; 223/* returns delimiter padding required given the packet length */
224 u16 low_5ghz_chan, high_5ghz_chan; 224#define ATH_AGGR_GET_NDELIM(_len) \
225 u16 low_2ghz_chan, high_2ghz_chan; 225 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
226 u16 num_mr_retries; 226 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
227 u16 rts_aggr_limit; 227
228 u8 tx_chainmask; 228#define BAW_WITHIN(_start, _bawsz, _seqno) \
229 u8 rx_chainmask; 229 ((((_seqno) - (_start)) & 4095) < (_bawsz))
230 u16 tx_triglevel_max; 230
231 u16 reg_cap; 231#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
232 u8 num_gpio_pins; 232#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
233 u8 num_antcfg_2ghz; 233#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
234 u8 num_antcfg_5ghz; 234#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
235}; 235
236 236enum ATH_AGGR_STATUS {
237struct ath9k_ops_config { 237 ATH_AGGR_DONE,
238 int dma_beacon_response_time; 238 ATH_AGGR_BAW_CLOSED,
239 int sw_beacon_response_time; 239 ATH_AGGR_LIMITED,
240 int additional_swba_backoff; 240};
241 int ack_6mb; 241
242 int cwm_ignore_extcca; 242struct ath_txq {
243 u8 pcie_powersave_enable; 243 u32 axq_qnum;
244 u8 pcie_l1skp_enable; 244 u32 *axq_link;
245 u8 pcie_clock_req; 245 struct list_head axq_q;
246 u32 pcie_waen; 246 spinlock_t axq_lock;
247 int pcie_power_reset; 247 u32 axq_depth;
248 u8 pcie_restore; 248 u8 axq_aggr_depth;
249 u8 analog_shiftreg; 249 u32 axq_totalqueued;
250 u8 ht_enable; 250 bool stopped;
251 u32 ofdm_trig_low; 251 struct ath_buf *axq_linkbuf;
252 u32 ofdm_trig_high; 252
253 u32 cck_trig_high; 253 /* first desc of the last descriptor that contains CTS */
254 u32 cck_trig_low; 254 struct ath_desc *axq_lastdsWithCTS;
255 u32 enable_ani; 255
256 u8 noise_immunity_level; 256 /* final desc of the gating desc that determines whether
257 u32 ofdm_weaksignal_det; 257 lastdsWithCTS has been DMA'ed or not */
258 u32 cck_weaksignal_thr; 258 struct ath_desc *axq_gatingds;
259 u8 spur_immunity_level; 259
260 u8 firstep_level; 260 struct list_head axq_acq;
261 int8_t rssi_thr_high; 261};
262 int8_t rssi_thr_low; 262
263 u16 diversity_control; 263#define AGGR_CLEANUP BIT(1)
264 u16 antenna_switch_swap; 264#define AGGR_ADDBA_COMPLETE BIT(2)
265 int serialize_regmode; 265#define AGGR_ADDBA_PROGRESS BIT(3)
266 int intr_mitigation; 266
267#define SPUR_DISABLE 0 267struct ath_atx_tid {
268#define SPUR_ENABLE_IOCTL 1 268 struct list_head list;
269#define SPUR_ENABLE_EEPROM 2 269 struct list_head buf_q;
270#define AR_EEPROM_MODAL_SPURS 5 270 struct ath_node *an;
271#define AR_SPUR_5413_1 1640 271 struct ath_atx_ac *ac;
272#define AR_SPUR_5413_2 1200 272 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
273#define AR_NO_SPUR 0x8000 273 u16 seq_start;
274#define AR_BASE_FREQ_2GHZ 2300 274 u16 seq_next;
275#define AR_BASE_FREQ_5GHZ 4900 275 u16 baw_size;
276#define AR_SPUR_FEEQ_BOUND_HT40 19 276 int tidno;
277#define AR_SPUR_FEEQ_BOUND_HT20 10 277 int baw_head; /* first un-acked tx buffer */
278 int spurmode; 278 int baw_tail; /* next unused tx buffer slot */
279 u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; 279 int sched;
280}; 280 int paused;
281 281 u8 state;
282enum ath9k_tx_queue { 282 int addba_exchangeattempts;
283 ATH9K_TX_QUEUE_INACTIVE = 0, 283};
284 ATH9K_TX_QUEUE_DATA, 284
285 ATH9K_TX_QUEUE_BEACON, 285struct ath_atx_ac {
286 ATH9K_TX_QUEUE_CAB, 286 int sched;
287 ATH9K_TX_QUEUE_UAPSD, 287 int qnum;
288 ATH9K_TX_QUEUE_PSPOLL 288 struct list_head list;
289}; 289 struct list_head tid_q;
290 290};
291#define ATH9K_NUM_TX_QUEUES 10 291
292 292struct ath_tx_control {
293enum ath9k_tx_queue_subtype { 293 struct ath_txq *txq;
294 ATH9K_WME_AC_BK = 0, 294 int if_id;
295 ATH9K_WME_AC_BE, 295};
296 ATH9K_WME_AC_VI, 296
297 ATH9K_WME_AC_VO, 297struct ath_xmit_status {
298 ATH9K_WME_UPSD 298 int retries;
299}; 299 int flags;
300 300#define ATH_TX_ERROR 0x01
301enum ath9k_tx_queue_flags { 301#define ATH_TX_XRETRY 0x02
302 TXQ_FLAG_TXOKINT_ENABLE = 0x0001, 302#define ATH_TX_BAR 0x04
303 TXQ_FLAG_TXERRINT_ENABLE = 0x0001, 303};
304 TXQ_FLAG_TXDESCINT_ENABLE = 0x0002, 304
305 TXQ_FLAG_TXEOLINT_ENABLE = 0x0004, 305/* All RSSI values are noise floor adjusted */
306 TXQ_FLAG_TXURNINT_ENABLE = 0x0008, 306struct ath_tx_stat {
307 TXQ_FLAG_BACKOFF_DISABLE = 0x0010, 307 int rssi;
308 TXQ_FLAG_COMPRESSION_ENABLE = 0x0020, 308 int rssictl[ATH_MAX_ANTENNA];
309 TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040, 309 int rssiextn[ATH_MAX_ANTENNA];
310 TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080, 310 int rateieee;
311}; 311 int rateKbps;
312 312 int ratecode;
313#define ATH9K_TXQ_USEDEFAULT ((u32) -1) 313 int flags;
314 314 u32 airtime; /* time on air per final tx rate */
315#define ATH9K_DECOMP_MASK_SIZE 128 315};
316#define ATH9K_READY_TIME_LO_BOUND 50 316
317#define ATH9K_READY_TIME_HI_BOUND 96 317struct aggr_rifs_param {
318 318 int param_max_frames;
319enum ath9k_pkt_type { 319 int param_max_len;
320 ATH9K_PKT_TYPE_NORMAL = 0, 320 int param_rl;
321 ATH9K_PKT_TYPE_ATIM, 321 int param_al;
322 ATH9K_PKT_TYPE_PSPOLL, 322 struct ath_rc_series *param_rcs;
323 ATH9K_PKT_TYPE_BEACON, 323};
324 ATH9K_PKT_TYPE_PROBE_RESP, 324
325 ATH9K_PKT_TYPE_CHIRP, 325struct ath_node {
326 ATH9K_PKT_TYPE_GRP_POLL, 326 struct ath_softc *an_sc;
327}; 327 struct ath_atx_tid tid[WME_NUM_TID];
328 struct ath_atx_ac ac[WME_NUM_AC];
329 u16 maxampdu;
330 u8 mpdudensity;
331};
332
333struct ath_tx {
334 u16 seq_no;
335 u32 txqsetup;
336 int hwq_map[ATH9K_WME_AC_VO+1];
337 spinlock_t txbuflock;
338 struct list_head txbuf;
339 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
340 struct ath_descdma txdma;
341};
342
343struct ath_rx {
344 u8 defant;
345 u8 rxotherant;
346 u32 *rxlink;
347 int bufsize;
348 unsigned int rxfilter;
349 spinlock_t rxflushlock;
350 spinlock_t rxbuflock;
351 struct list_head rxbuf;
352 struct ath_descdma rxdma;
353};
354
355int ath_startrecv(struct ath_softc *sc);
356bool ath_stoprecv(struct ath_softc *sc);
357void ath_flushrecv(struct ath_softc *sc);
358u32 ath_calcrxfilter(struct ath_softc *sc);
359int ath_rx_init(struct ath_softc *sc, int nbufs);
360void ath_rx_cleanup(struct ath_softc *sc);
361int ath_rx_tasklet(struct ath_softc *sc, int flush);
362struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
363void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
364int ath_tx_setup(struct ath_softc *sc, int haltype);
365void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
366void ath_draintxq(struct ath_softc *sc,
367 struct ath_txq *txq, bool retry_tx);
368void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
369void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
370void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
371int ath_tx_init(struct ath_softc *sc, int nbufs);
372int ath_tx_cleanup(struct ath_softc *sc);
373struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
374int ath_txq_update(struct ath_softc *sc, int qnum,
375 struct ath9k_tx_queue_info *q);
376int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
377 struct ath_tx_control *txctl);
378void ath_tx_tasklet(struct ath_softc *sc);
379void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
380bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
381int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
382 u16 tid, u16 *ssn);
383int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
384void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
385
386/********/
387/* VIFs */
388/********/
328 389
329struct ath9k_tx_queue_info { 390/*
330 u32 tqi_ver; 391 * Define the scheme that we select MAC address for multiple
331 enum ath9k_tx_queue tqi_type; 392 * BSS on the same radio. The very first VIF will just use the MAC
332 enum ath9k_tx_queue_subtype tqi_subtype; 393 * address from the EEPROM. For the next 3 VIFs, we set the
333 enum ath9k_tx_queue_flags tqi_qflags; 394 * U/L bit (bit 1) in MAC address, and use the next two bits as the
334 u32 tqi_priority; 395 * index of the VIF.
335 u32 tqi_aifs; 396 */
336 u32 tqi_cwmin;
337 u32 tqi_cwmax;
338 u16 tqi_shretry;
339 u16 tqi_lgretry;
340 u32 tqi_cbrPeriod;
341 u32 tqi_cbrOverflowLimit;
342 u32 tqi_burstTime;
343 u32 tqi_readyTime;
344 u32 tqi_physCompBuf;
345 u32 tqi_intFlags;
346};
347 397
348enum ath9k_rx_filter { 398#define ATH_SET_VIF_BSSID_MASK(bssid_mask) \
349 ATH9K_RX_FILTER_UCAST = 0x00000001, 399 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
350 ATH9K_RX_FILTER_MCAST = 0x00000002,
351 ATH9K_RX_FILTER_BCAST = 0x00000004,
352 ATH9K_RX_FILTER_CONTROL = 0x00000008,
353 ATH9K_RX_FILTER_BEACON = 0x00000010,
354 ATH9K_RX_FILTER_PROM = 0x00000020,
355 ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
356 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
357 ATH9K_RX_FILTER_PHYERR = 0x00000100,
358 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
359};
360 400
361enum ath9k_int { 401struct ath_vif {
362 ATH9K_INT_RX = 0x00000001, 402 int av_bslot;
363 ATH9K_INT_RXDESC = 0x00000002, 403 enum nl80211_iftype av_opmode;
364 ATH9K_INT_RXNOFRM = 0x00000008, 404 struct ath_buf *av_bcbuf;
365 ATH9K_INT_RXEOL = 0x00000010, 405 struct ath_tx_control av_btxctl;
366 ATH9K_INT_RXORN = 0x00000020,
367 ATH9K_INT_TX = 0x00000040,
368 ATH9K_INT_TXDESC = 0x00000080,
369 ATH9K_INT_TIM_TIMER = 0x00000100,
370 ATH9K_INT_TXURN = 0x00000800,
371 ATH9K_INT_MIB = 0x00001000,
372 ATH9K_INT_RXPHY = 0x00004000,
373 ATH9K_INT_RXKCM = 0x00008000,
374 ATH9K_INT_SWBA = 0x00010000,
375 ATH9K_INT_BMISS = 0x00040000,
376 ATH9K_INT_BNR = 0x00100000,
377 ATH9K_INT_TIM = 0x00200000,
378 ATH9K_INT_DTIM = 0x00400000,
379 ATH9K_INT_DTIMSYNC = 0x00800000,
380 ATH9K_INT_GPIO = 0x01000000,
381 ATH9K_INT_CABEND = 0x02000000,
382 ATH9K_INT_CST = 0x10000000,
383 ATH9K_INT_GTT = 0x20000000,
384 ATH9K_INT_FATAL = 0x40000000,
385 ATH9K_INT_GLOBAL = 0x80000000,
386 ATH9K_INT_BMISC = ATH9K_INT_TIM |
387 ATH9K_INT_DTIM |
388 ATH9K_INT_DTIMSYNC |
389 ATH9K_INT_CABEND,
390 ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
391 ATH9K_INT_RXDESC |
392 ATH9K_INT_RXEOL |
393 ATH9K_INT_RXORN |
394 ATH9K_INT_TXURN |
395 ATH9K_INT_TXDESC |
396 ATH9K_INT_MIB |
397 ATH9K_INT_RXPHY |
398 ATH9K_INT_RXKCM |
399 ATH9K_INT_SWBA |
400 ATH9K_INT_BMISS |
401 ATH9K_INT_GPIO,
402 ATH9K_INT_NOCARD = 0xffffffff
403}; 406};
404 407
405#define ATH9K_RATESERIES_RTS_CTS 0x0001 408/*******************/
406#define ATH9K_RATESERIES_2040 0x0002 409/* Beacon Handling */
407#define ATH9K_RATESERIES_HALFGI 0x0004 410/*******************/
408 411
409struct ath9k_11n_rate_series { 412/*
410 u32 Tries; 413 * Regardless of the number of beacons we stagger, (i.e. regardless of the
411 u32 Rate; 414 * number of BSSIDs) if a given beacon does not go out even after waiting this
412 u32 PktDuration; 415 * number of beacon intervals, the game's up.
413 u32 ChSel; 416 */
414 u32 RateFlags; 417#define BSTUCK_THRESH (9 * ATH_BCBUF)
415}; 418#define ATH_BCBUF 1
419#define ATH_DEFAULT_BINTVAL 100 /* TU */
420#define ATH_DEFAULT_BMISS_LIMIT 10
421#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
422
423struct ath_beacon_config {
424 u16 beacon_interval;
425 u16 listen_interval;
426 u16 dtim_period;
427 u16 bmiss_timeout;
428 u8 dtim_count;
429 u8 tim_offset;
430 union {
431 u64 last_tsf;
432 u8 last_tstamp[8];
433 } u; /* last received beacon/probe response timestamp of this BSS. */
434};
435
436struct ath_beacon {
437 enum {
438 OK, /* no change needed */
439 UPDATE, /* update pending */
440 COMMIT /* beacon sent, commit change */
441 } updateslot; /* slot time update fsm */
442
443 u32 beaconq;
444 u32 bmisscnt;
445 u32 ast_be_xmit;
446 u64 bc_tstamp;
447 int bslot[ATH_BCBUF];
448 int slottime;
449 int slotupdate;
450 struct ath9k_tx_queue_info beacon_qi;
451 struct ath_descdma bdma;
452 struct ath_txq *cabq;
453 struct list_head bbuf;
454};
455
456void ath9k_beacon_tasklet(unsigned long data);
457void ath_beacon_config(struct ath_softc *sc, int if_id);
458int ath_beaconq_setup(struct ath_hw *ah);
459int ath_beacon_alloc(struct ath_softc *sc, int if_id);
460void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
461void ath_beacon_sync(struct ath_softc *sc, int if_id);
462
463/*******/
464/* ANI */
465/*******/
416 466
417#define CHANNEL_CW_INT 0x00002 467/* ANI values for STA only.
418#define CHANNEL_CCK 0x00020 468 FIXME: Add appropriate values for AP later */
419#define CHANNEL_OFDM 0x00040
420#define CHANNEL_2GHZ 0x00080
421#define CHANNEL_5GHZ 0x00100
422#define CHANNEL_PASSIVE 0x00200
423#define CHANNEL_DYN 0x00400
424#define CHANNEL_HALF 0x04000
425#define CHANNEL_QUARTER 0x08000
426#define CHANNEL_HT20 0x10000
427#define CHANNEL_HT40PLUS 0x20000
428#define CHANNEL_HT40MINUS 0x40000
429
430#define CHANNEL_INTERFERENCE 0x01
431#define CHANNEL_DFS 0x02
432#define CHANNEL_4MS_LIMIT 0x04
433#define CHANNEL_DFS_CLEAR 0x08
434#define CHANNEL_DISALLOW_ADHOC 0x10
435#define CHANNEL_PER_11D_ADHOC 0x20
436
437#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
438#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
439#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
440#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
441#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
442#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
443#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
444#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
445#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
446#define CHANNEL_ALL \
447 (CHANNEL_OFDM| \
448 CHANNEL_CCK| \
449 CHANNEL_2GHZ | \
450 CHANNEL_5GHZ | \
451 CHANNEL_HT20 | \
452 CHANNEL_HT40PLUS | \
453 CHANNEL_HT40MINUS)
454
455struct ath9k_channel {
456 u16 channel;
457 u32 channelFlags;
458 u8 privFlags;
459 int8_t maxRegTxPower;
460 int8_t maxTxPower;
461 int8_t minTxPower;
462 u32 chanmode;
463 int32_t CalValid;
464 bool oneTimeCalsDone;
465 int8_t iCoff;
466 int8_t qCoff;
467 int16_t rawNoiseFloor;
468 int8_t antennaMax;
469 u32 regDmnFlags;
470 u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */
471#ifdef ATH_NF_PER_CHAN
472 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
473#endif
474};
475 469
476#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \ 470#define ATH_ANI_POLLINTERVAL 100 /* 100 milliseconds between ANI poll */
477 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \ 471#define ATH_SHORT_CALINTERVAL 1000 /* 1 second between calibrations */
478 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \ 472#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds between calibrations */
479 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS)) 473#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes between calibrations */
480#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
481 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
482 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
483 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
484#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
485#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
486#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
487#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
488#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
489#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
490
491/* These macros check chanmode and not channelFlags */
492#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
493#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
494 ((_c)->chanmode == CHANNEL_G_HT20))
495#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
496 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
497 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
498 ((_c)->chanmode == CHANNEL_G_HT40MINUS))
499#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
500
501#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
502#define IS_CHAN_A_5MHZ_SPACED(_c) \
503 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
504 (((_c)->channel % 20) != 0) && \
505 (((_c)->channel % 10) != 0))
506
507struct ath9k_keyval {
508 u8 kv_type;
509 u8 kv_pad;
510 u16 kv_len;
511 u8 kv_val[16];
512 u8 kv_mic[8];
513 u8 kv_txmic[8];
514};
515 474
516enum ath9k_key_type { 475struct ath_ani {
517 ATH9K_KEY_TYPE_CLEAR, 476 bool caldone;
518 ATH9K_KEY_TYPE_WEP, 477 int16_t noise_floor;
519 ATH9K_KEY_TYPE_AES, 478 unsigned int longcal_timer;
520 ATH9K_KEY_TYPE_TKIP, 479 unsigned int shortcal_timer;
480 unsigned int resetcal_timer;
481 unsigned int checkani_timer;
482 struct timer_list timer;
521}; 483};
522 484
523enum ath9k_cipher { 485/********************/
524 ATH9K_CIPHER_WEP = 0, 486/* LED Control */
525 ATH9K_CIPHER_AES_OCB = 1, 487/********************/
526 ATH9K_CIPHER_AES_CCM = 2,
527 ATH9K_CIPHER_CKIP = 3,
528 ATH9K_CIPHER_TKIP = 4,
529 ATH9K_CIPHER_CLR = 5,
530 ATH9K_CIPHER_MIC = 127
531};
532 488
533#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001 489#define ATH_LED_PIN 1
534#define AR_EEPROM_EEPCAP_AES_DIS 0x0002 490#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
535#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004 491#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
536#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
537#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
538#define AR_EEPROM_EEPCAP_MAXQCU_S 4
539#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
540#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
541#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
542
543#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
544#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
545#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
546#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
547#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
548#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
549
550#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
551#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
552
553#define SD_NO_CTL 0xE0
554#define NO_CTL 0xff
555#define CTL_MODE_M 7
556#define CTL_11A 0
557#define CTL_11B 1
558#define CTL_11G 2
559#define CTL_2GHT20 5
560#define CTL_5GHT20 6
561#define CTL_2GHT40 7
562#define CTL_5GHT40 8
563
564#define AR_EEPROM_MAC(i) (0x1d+(i))
565
566#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
567#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
568#define AR_EEPROM_RFSILENT_POLARITY 0x0002
569#define AR_EEPROM_RFSILENT_POLARITY_S 1
570
571#define CTRY_DEBUG 0x1ff
572#define CTRY_DEFAULT 0
573
574enum reg_ext_bitmap {
575 REG_EXT_JAPAN_MIDBAND = 1,
576 REG_EXT_FCC_DFS_HT40 = 2,
577 REG_EXT_JAPAN_NONDFS_HT40 = 3,
578 REG_EXT_JAPAN_DFS_HT40 = 4
579};
580 492
581struct ath9k_country_entry { 493enum ath_led_type {
582 u16 countryCode; 494 ATH_LED_RADIO,
583 u16 regDmnEnum; 495 ATH_LED_ASSOC,
584 u16 regDmn5G; 496 ATH_LED_TX,
585 u16 regDmn2G; 497 ATH_LED_RX
586 u8 isMultidomain;
587 u8 iso[3];
588}; 498};
589 499
590#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg) 500struct ath_led {
591#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg) 501 struct ath_softc *sc;
592 502 struct led_classdev led_cdev;
593#define SM(_v, _f) (((_v) << _f##_S) & _f) 503 enum ath_led_type led_type;
594#define MS(_v, _f) (((_v) & _f) >> _f##_S) 504 char name[32];
595#define REG_RMW(_a, _r, _set, _clr) \ 505 bool registered;
596 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
597#define REG_RMW_FIELD(_a, _r, _f, _v) \
598 REG_WRITE(_a, _r, \
599 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
600#define REG_SET_BIT(_a, _r, _f) \
601 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
602#define REG_CLR_BIT(_a, _r, _f) \
603 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
604
605#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
606
607#define INIT_AIFS 2
608#define INIT_CWMIN 15
609#define INIT_CWMIN_11B 31
610#define INIT_CWMAX 1023
611#define INIT_SH_RETRY 10
612#define INIT_LG_RETRY 10
613#define INIT_SSH_RETRY 32
614#define INIT_SLG_RETRY 32
615
616#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
617
618#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
619#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
620
621#define IEEE80211_WEP_IVLEN 3
622#define IEEE80211_WEP_KIDLEN 1
623#define IEEE80211_WEP_CRCLEN 4
624#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
625 (IEEE80211_WEP_IVLEN + \
626 IEEE80211_WEP_KIDLEN + \
627 IEEE80211_WEP_CRCLEN))
628#define MAX_RATE_POWER 63
629
630enum ath9k_power_mode {
631 ATH9K_PM_AWAKE = 0,
632 ATH9K_PM_FULL_SLEEP,
633 ATH9K_PM_NETWORK_SLEEP,
634 ATH9K_PM_UNDEFINED
635}; 506};
636 507
637struct ath9k_mib_stats { 508/* Rfkill */
638 u32 ackrcv_bad; 509#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
639 u32 rts_bad;
640 u32 rts_good;
641 u32 fcs_bad;
642 u32 beacons;
643};
644 510
645enum ath9k_ant_setting { 511struct ath_rfkill {
646 ATH9K_ANT_VARIABLE = 0, 512 struct rfkill *rfkill;
647 ATH9K_ANT_FIXED_A, 513 struct delayed_work rfkill_poll;
648 ATH9K_ANT_FIXED_B 514 char rfkill_name[32];
649}; 515};
650 516
651#define ATH9K_SLOT_TIME_6 6 517/********************/
652#define ATH9K_SLOT_TIME_9 9 518/* Main driver core */
653#define ATH9K_SLOT_TIME_20 20 519/********************/
654 520
655enum ath9k_ht_macmode { 521/*
656 ATH9K_HT_MACMODE_20 = 0, 522 * Default cache line size, in bytes.
657 ATH9K_HT_MACMODE_2040 = 1, 523 * Used when PCI device not fully initialized by bootrom/BIOS
658}; 524*/
659 525#define DEFAULT_CACHELINE 32
660enum ath9k_ht_extprotspacing { 526#define ATH_DEFAULT_NOISE_FLOOR -95
661 ATH9K_HT_EXTPROTSPACING_20 = 0, 527#define ATH_REGCLASSIDS_MAX 10
662 ATH9K_HT_EXTPROTSPACING_25 = 1, 528#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
663}; 529#define ATH_MAX_SW_RETRIES 10
530#define ATH_CHAN_MAX 255
531#define IEEE80211_WEP_NKID 4 /* number of key ids */
664 532
665struct ath9k_ht_cwm { 533/*
666 enum ath9k_ht_macmode ht_macmode; 534 * The key cache is used for h/w cipher state and also for
535 * tracking station state such as the current tx antenna.
536 * We also setup a mapping table between key cache slot indices
537 * and station state to short-circuit node lookups on rx.
538 * Different parts have different size key caches. We handle
539 * up to ATH_KEYMAX entries (could dynamically allocate state).
540 */
541#define ATH_KEYMAX 128 /* max key cache size we handle */
542
543#define ATH_IF_ID_ANY 0xff
544#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
545#define ATH_RSSI_DUMMY_MARKER 0x127
546#define ATH_RATE_DUMMY_MARKER 0
547
548#define SC_OP_INVALID BIT(0)
549#define SC_OP_BEACONS BIT(1)
550#define SC_OP_RXAGGR BIT(2)
551#define SC_OP_TXAGGR BIT(3)
552#define SC_OP_CHAINMASK_UPDATE BIT(4)
553#define SC_OP_FULL_RESET BIT(5)
554#define SC_OP_NO_RESET BIT(6)
555#define SC_OP_PREAMBLE_SHORT BIT(7)
556#define SC_OP_PROTECT_ENABLE BIT(8)
557#define SC_OP_RXFLUSH BIT(9)
558#define SC_OP_LED_ASSOCIATED BIT(10)
559#define SC_OP_RFKILL_REGISTERED BIT(11)
560#define SC_OP_RFKILL_SW_BLOCKED BIT(12)
561#define SC_OP_RFKILL_HW_BLOCKED BIT(13)
562#define SC_OP_WAIT_FOR_BEACON BIT(14)
563#define SC_OP_LED_ON BIT(15)
564
565struct ath_bus_ops {
566 void (*read_cachesize)(struct ath_softc *sc, int *csz);
567 void (*cleanup)(struct ath_softc *sc);
568 bool (*eeprom_read)(struct ath_hw *ah, u32 off, u16 *data);
569};
570
571struct ath_softc {
572 struct ieee80211_hw *hw;
573 struct device *dev;
574 struct tasklet_struct intr_tq;
575 struct tasklet_struct bcon_tasklet;
576 struct ath_hw *sc_ah;
577 void __iomem *mem;
578 int irq;
579 spinlock_t sc_resetlock;
580 struct mutex mutex;
581
582 u8 curbssid[ETH_ALEN];
583 u8 bssidmask[ETH_ALEN];
584 u32 intrstatus;
585 u32 sc_flags; /* SC_OP_* */
586 u16 curtxpow;
587 u16 curaid;
588 u16 cachelsz;
589 u8 nbcnvifs;
590 u16 nvifs;
591 u8 tx_chainmask;
592 u8 rx_chainmask;
593 u32 keymax;
594 DECLARE_BITMAP(keymap, ATH_KEYMAX);
595 u8 splitmic;
596 atomic_t ps_usecount;
597 enum ath9k_int imask;
667 enum ath9k_ht_extprotspacing ht_extprotspacing; 598 enum ath9k_ht_extprotspacing ht_extprotspacing;
668}; 599 enum ath9k_ht_macmode tx_chan_width;
669 600
670enum ath9k_ani_cmd { 601 struct ath_config config;
671 ATH9K_ANI_PRESENT = 0x1, 602 struct ath_rx rx;
672 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, 603 struct ath_tx tx;
673 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, 604 struct ath_beacon beacon;
674 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8, 605 struct ieee80211_vif *vifs[ATH_BCBUF];
675 ATH9K_ANI_FIRSTEP_LEVEL = 0x10, 606 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
676 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, 607 struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
677 ATH9K_ANI_MODE = 0x40, 608 struct ath_rate_table *cur_rate_table;
678 ATH9K_ANI_PHYERR_RESET = 0x80, 609 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
679 ATH9K_ANI_ALL = 0xff 610
680}; 611 struct ath_led radio_led;
681 612 struct ath_led assoc_led;
682enum { 613 struct ath_led tx_led;
683 WLAN_RC_PHY_OFDM, 614 struct ath_led rx_led;
684 WLAN_RC_PHY_CCK, 615 struct delayed_work ath_led_blink_work;
685 WLAN_RC_PHY_HT_20_SS, 616 int led_on_duration;
686 WLAN_RC_PHY_HT_20_DS, 617 int led_off_duration;
687 WLAN_RC_PHY_HT_40_SS, 618 int led_on_cnt;
688 WLAN_RC_PHY_HT_40_DS, 619 int led_off_cnt;
689 WLAN_RC_PHY_HT_20_SS_HGI, 620
690 WLAN_RC_PHY_HT_20_DS_HGI, 621 struct ath_rfkill rf_kill;
691 WLAN_RC_PHY_HT_40_SS_HGI, 622 struct ath_ani ani;
692 WLAN_RC_PHY_HT_40_DS_HGI, 623 struct ath9k_node_stats nodestats;
693 WLAN_RC_PHY_MAX 624#ifdef CONFIG_ATH9K_DEBUG
694}; 625 struct ath9k_debug debug;
695
696enum ath9k_tp_scale {
697 ATH9K_TP_SCALE_MAX = 0,
698 ATH9K_TP_SCALE_50,
699 ATH9K_TP_SCALE_25,
700 ATH9K_TP_SCALE_12,
701 ATH9K_TP_SCALE_MIN
702};
703
704enum ser_reg_mode {
705 SER_REG_MODE_OFF = 0,
706 SER_REG_MODE_ON = 1,
707 SER_REG_MODE_AUTO = 2,
708};
709
710#define AR_PHY_CCA_MAX_GOOD_VALUE -85
711#define AR_PHY_CCA_MAX_HIGH_VALUE -62
712#define AR_PHY_CCA_MIN_BAD_VALUE -121
713#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
714#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
715
716#define ATH9K_NF_CAL_HIST_MAX 5
717#define NUM_NF_READINGS 6
718
719struct ath9k_nfcal_hist {
720 int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
721 u8 currIndex;
722 int16_t privNF;
723 u8 invalidNFcount;
724};
725
726struct ath9k_beacon_state {
727 u32 bs_nexttbtt;
728 u32 bs_nextdtim;
729 u32 bs_intval;
730#define ATH9K_BEACON_PERIOD 0x0000ffff
731#define ATH9K_BEACON_ENA 0x00800000
732#define ATH9K_BEACON_RESET_TSF 0x01000000
733 u32 bs_dtimperiod;
734 u16 bs_cfpperiod;
735 u16 bs_cfpmaxduration;
736 u32 bs_cfpnext;
737 u16 bs_timoffset;
738 u16 bs_bmissthreshold;
739 u32 bs_sleepduration;
740};
741
742struct ath9k_node_stats {
743 u32 ns_avgbrssi;
744 u32 ns_avgrssi;
745 u32 ns_avgtxrssi;
746 u32 ns_avgtxrate;
747};
748
749#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
750
751#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
752#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
753#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
754#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
755#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
756
757enum {
758 ATH9K_RESET_POWER_ON,
759 ATH9K_RESET_WARM,
760 ATH9K_RESET_COLD,
761};
762
763#define AH_USE_EEPROM 0x1
764
765struct ath_hal {
766 u32 ah_magic;
767 u16 ah_devid;
768 u16 ah_subvendorid;
769 u32 ah_macVersion;
770 u16 ah_macRev;
771 u16 ah_phyRev;
772 u16 ah_analog5GhzRev;
773 u16 ah_analog2GhzRev;
774
775 void __iomem *ah_sh;
776 struct ath_softc *ah_sc;
777
778 enum nl80211_iftype ah_opmode;
779 struct ath9k_ops_config ah_config;
780 struct ath9k_hw_capabilities ah_caps;
781
782 u16 ah_countryCode;
783 u32 ah_flags;
784 int16_t ah_powerLimit;
785 u16 ah_maxPowerLevel;
786 u32 ah_tpScale;
787 u16 ah_currentRD;
788 u16 ah_currentRDExt;
789 u16 ah_currentRDInUse;
790 u16 ah_currentRD5G;
791 u16 ah_currentRD2G;
792 char ah_iso[4];
793
794 struct ath9k_channel ah_channels[150];
795 struct ath9k_channel *ah_curchan;
796 u32 ah_nchan;
797
798 bool ah_isPciExpress;
799 u16 ah_txTrigLevel;
800 u16 ah_rfsilent;
801 u32 ah_rfkill_gpio;
802 u32 ah_rfkill_polarity;
803
804#ifndef ATH_NF_PER_CHAN
805 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
806#endif 626#endif
807}; 627 struct ath_bus_ops *bus_ops;
808 628};
809struct chan_centers { 629
810 u16 synth_center; 630int ath_reset(struct ath_softc *sc, bool retry_tx);
811 u16 ctl_center; 631int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
812 u16 ext_center; 632int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
813}; 633int ath_cabq_update(struct ath_softc *);
814 634
815struct ath_rate_table; 635static inline void ath_read_cachesize(struct ath_softc *sc, int *csz)
816 636{
817/* Helpers */ 637 sc->bus_ops->read_cachesize(sc, csz);
818 638}
819enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah, 639
820 const struct ath9k_channel *chan); 640static inline void ath_bus_cleanup(struct ath_softc *sc)
821bool ath9k_hw_wait(struct ath_hal *ah, u32 reg, u32 mask, u32 val); 641{
822u32 ath9k_hw_reverse_bits(u32 val, u32 n); 642 sc->bus_ops->cleanup(sc);
823bool ath9k_get_channel_edges(struct ath_hal *ah, 643}
824 u16 flags, u16 *low, 644
825 u16 *high); 645extern struct ieee80211_ops ath9k_ops;
826u16 ath9k_hw_computetxtime(struct ath_hal *ah, 646
827 struct ath_rate_table *rates, 647irqreturn_t ath_isr(int irq, void *dev);
828 u32 frameLen, u16 rateix, 648void ath_cleanup(struct ath_softc *sc);
829 bool shortPreamble); 649int ath_attach(u16 devid, struct ath_softc *sc);
830u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); 650void ath_detach(struct ath_softc *sc);
831void ath9k_hw_get_channel_centers(struct ath_hal *ah, 651const char *ath_mac_bb_name(u32 mac_bb_version);
832 struct ath9k_channel *chan, 652const char *ath_rf_name(u16 rf_version);
833 struct chan_centers *centers); 653
834 654#ifdef CONFIG_PCI
835/* Attach, Detach */ 655int ath_pci_init(void);
836 656void ath_pci_exit(void);
837const char *ath9k_hw_probe(u16 vendorid, u16 devid); 657#else
838void ath9k_hw_detach(struct ath_hal *ah); 658static inline int ath_pci_init(void) { return 0; };
839struct ath_hal *ath9k_hw_attach(u16 devid, struct ath_softc *sc, 659static inline void ath_pci_exit(void) {};
840 void __iomem *mem, int *error);
841void ath9k_hw_rfdetach(struct ath_hal *ah);
842
843
844/* HW Reset */
845
846bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
847 enum ath9k_ht_macmode macmode,
848 u8 txchainmask, u8 rxchainmask,
849 enum ath9k_ht_extprotspacing extprotspacing,
850 bool bChannelChange, int *status);
851
852/* Key Cache Management */
853
854bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
855bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, const u8 *mac);
856bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
857 const struct ath9k_keyval *k,
858 const u8 *mac, int xorKey);
859bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
860
861/* Power Management */
862
863bool ath9k_hw_setpower(struct ath_hal *ah,
864 enum ath9k_power_mode mode);
865void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
866
867/* Beacon timers */
868
869void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period);
870void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
871 const struct ath9k_beacon_state *bs);
872/* HW Capabilities */
873
874bool ath9k_hw_fill_cap_info(struct ath_hal *ah);
875bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
876 u32 capability, u32 *result);
877bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type,
878 u32 capability, u32 setting, int *status);
879
880/* GPIO / RFKILL / Antennae */
881
882void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
883u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
884void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
885 u32 ah_signal_type);
886void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val);
887#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
888void ath9k_enable_rfkill(struct ath_hal *ah);
889#endif 660#endif
890int ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg);
891u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
892void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
893bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
894 enum ath9k_ant_setting settings,
895 struct ath9k_channel *chan,
896 u8 *tx_chainmask,
897 u8 *rx_chainmask,
898 u8 *antenna_cfgd);
899
900/* General Operation */
901
902u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
903void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
904bool ath9k_hw_phy_disable(struct ath_hal *ah);
905bool ath9k_hw_disable(struct ath_hal *ah);
906bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
907void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
908bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
909void ath9k_hw_setopmode(struct ath_hal *ah);
910void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, u32 filter1);
911void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
912bool ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask);
913void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, u16 assocId);
914u64 ath9k_hw_gettsf64(struct ath_hal *ah);
915void ath9k_hw_reset_tsf(struct ath_hal *ah);
916bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting);
917bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
918void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
919
920/* Regulatory */
921
922bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
923struct ath9k_channel* ath9k_regd_check_channel(struct ath_hal *ah,
924 const struct ath9k_channel *c);
925u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
926u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
927 struct ath9k_channel *chan);
928bool ath9k_regd_init_channels(struct ath_hal *ah,
929 u32 maxchans, u32 *nchans, u8 *regclassids,
930 u32 maxregids, u32 *nregids, u16 cc,
931 bool enableOutdoor, bool enableExtendedChannels);
932
933/* ANI */
934
935void ath9k_ani_reset(struct ath_hal *ah);
936void ath9k_hw_ani_monitor(struct ath_hal *ah,
937 const struct ath9k_node_stats *stats,
938 struct ath9k_channel *chan);
939bool ath9k_hw_phycounters(struct ath_hal *ah);
940void ath9k_enable_mib_counters(struct ath_hal *ah);
941void ath9k_hw_disable_mib_counters(struct ath_hal *ah);
942u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
943 u32 *rxc_pcnt,
944 u32 *rxf_pcnt,
945 u32 *txf_pcnt);
946void ath9k_hw_procmibevent(struct ath_hal *ah,
947 const struct ath9k_node_stats *stats);
948void ath9k_hw_ani_setup(struct ath_hal *ah);
949void ath9k_hw_ani_attach(struct ath_hal *ah);
950void ath9k_hw_ani_detach(struct ath_hal *ah);
951
952/* Calibration */
953
954void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
955 bool *isCalDone);
956void ath9k_hw_start_nfcal(struct ath_hal *ah);
957void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan);
958int16_t ath9k_hw_getnf(struct ath_hal *ah,
959 struct ath9k_channel *chan);
960void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah);
961s16 ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan);
962bool ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
963 u8 rxchainmask, bool longcal,
964 bool *isCalDone);
965bool ath9k_hw_init_cal(struct ath_hal *ah,
966 struct ath9k_channel *chan);
967
968
969/* EEPROM */
970
971int ath9k_hw_set_txpower(struct ath_hal *ah,
972 struct ath9k_channel *chan,
973 u16 cfgCtl,
974 u8 twiceAntennaReduction,
975 u8 twiceMaxRegulatoryPower,
976 u8 powerLimit);
977void ath9k_hw_set_addac(struct ath_hal *ah, struct ath9k_channel *chan);
978bool ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
979 struct ath9k_channel *chan,
980 int16_t *ratesArray,
981 u16 cfgCtl,
982 u8 AntennaReduction,
983 u8 twiceMaxRegulatoryPower,
984 u8 powerLimit);
985bool ath9k_hw_set_power_cal_table(struct ath_hal *ah,
986 struct ath9k_channel *chan,
987 int16_t *pTxPowerIndexOffset);
988bool ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
989 struct ath9k_channel *chan);
990int ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal *ah,
991 struct ath9k_channel *chan,
992 u8 index, u16 *config);
993u8 ath9k_hw_get_num_ant_config(struct ath_hal *ah,
994 enum ieee80211_band freq_band);
995u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah, u16 i, bool is2GHz);
996int ath9k_hw_eeprom_attach(struct ath_hal *ah);
997
998/* Interrupt Handling */
999
1000bool ath9k_hw_intrpend(struct ath_hal *ah);
1001bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
1002enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
1003enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints);
1004
1005/* MAC (PCU/QCU) */
1006
1007u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
1008bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp);
1009bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
1010u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
1011bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel);
1012bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
1013bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
1014 u32 segLen, bool firstSeg,
1015 bool lastSeg, const struct ath_desc *ds0);
1016void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
1017int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds);
1018void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
1019 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
1020 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
1021void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
1022 struct ath_desc *lastds,
1023 u32 durUpdateEn, u32 rtsctsRate,
1024 u32 rtsctsDuration,
1025 struct ath9k_11n_rate_series series[],
1026 u32 nseries, u32 flags);
1027void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
1028 u32 aggrLen);
1029void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
1030 u32 numDelims);
1031void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
1032void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
1033void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
1034 u32 burstDuration);
1035void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
1036 u32 vmf);
1037void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
1038bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
1039 const struct ath9k_tx_queue_info *qinfo);
1040bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
1041 struct ath9k_tx_queue_info *qinfo);
1042int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
1043 const struct ath9k_tx_queue_info *qinfo);
1044bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
1045bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
1046int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
1047 u32 pa, struct ath_desc *nds, u64 tsf);
1048bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
1049 u32 size, u32 flags);
1050bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
1051void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
1052void ath9k_hw_rxena(struct ath_hal *ah);
1053void ath9k_hw_startpcureceive(struct ath_hal *ah);
1054void ath9k_hw_stoppcurecv(struct ath_hal *ah);
1055bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
1056 661
662#ifdef CONFIG_ATHEROS_AR71XX
663int ath_ahb_init(void);
664void ath_ahb_exit(void);
665#else
666static inline int ath_ahb_init(void) { return 0; };
667static inline void ath_ahb_exit(void) {};
1057#endif 668#endif
669
670static inline void ath9k_ps_wakeup(struct ath_softc *sc)
671{
672 if (atomic_inc_return(&sc->ps_usecount) == 1)
673 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
674 sc->sc_ah->restore_mode = sc->sc_ah->power_mode;
675 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
676 }
677}
678
679static inline void ath9k_ps_restore(struct ath_softc *sc)
680{
681 if (atomic_dec_and_test(&sc->ps_usecount))
682 if (sc->hw->conf.flags & IEEE80211_CONF_PS)
683 ath9k_hw_setpower(sc->sc_ah,
684 sc->sc_ah->restore_mode);
685}
686#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 3ab0b43aaf9..2e2ef352913 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18 18
19/* 19/*
20 * This function will modify certain transmit queue properties depending on 20 * This function will modify certain transmit queue properties depending on
@@ -23,11 +23,11 @@
23*/ 23*/
24static int ath_beaconq_config(struct ath_softc *sc) 24static int ath_beaconq_config(struct ath_softc *sc)
25{ 25{
26 struct ath_hal *ah = sc->sc_ah; 26 struct ath_hw *ah = sc->sc_ah;
27 struct ath9k_tx_queue_info qi; 27 struct ath9k_tx_queue_info qi;
28 28
29 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 29 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
30 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { 30 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
31 /* Always burst out beacon and CAB traffic. */ 31 /* Always burst out beacon and CAB traffic. */
32 qi.tqi_aifs = 1; 32 qi.tqi_aifs = 1;
33 qi.tqi_cwmin = 0; 33 qi.tqi_cwmin = 0;
@@ -63,10 +63,10 @@ static void ath_bstuck_process(struct ath_softc *sc)
63 * Beacons are always sent out at the lowest rate, and are not retried. 63 * Beacons are always sent out at the lowest rate, and are not retried.
64*/ 64*/
65static void ath_beacon_setup(struct ath_softc *sc, 65static void ath_beacon_setup(struct ath_softc *sc,
66 struct ath_vap *avp, struct ath_buf *bf) 66 struct ath_vif *avp, struct ath_buf *bf)
67{ 67{
68 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 68 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
69 struct ath_hal *ah = sc->sc_ah; 69 struct ath_hw *ah = sc->sc_ah;
70 struct ath_desc *ds; 70 struct ath_desc *ds;
71 struct ath9k_11n_rate_series series[4]; 71 struct ath9k_11n_rate_series series[4];
72 struct ath_rate_table *rt; 72 struct ath_rate_table *rt;
@@ -82,8 +82,8 @@ static void ath_beacon_setup(struct ath_softc *sc,
82 82
83 flags = ATH9K_TXDESC_NOACK; 83 flags = ATH9K_TXDESC_NOACK;
84 84
85 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC && 85 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
86 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 86 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
87 ds->ds_link = bf->bf_daddr; /* self-linked */ 87 ds->ds_link = bf->bf_daddr; /* self-linked */
88 flags |= ATH9K_TXDESC_VEOL; 88 flags |= ATH9K_TXDESC_VEOL;
89 /* Let hardware handle antenna switching. */ 89 /* Let hardware handle antenna switching. */
@@ -96,7 +96,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
96 * SWBA's 96 * SWBA's
97 * XXX assumes two antenna 97 * XXX assumes two antenna
98 */ 98 */
99 antenna = ((sc->beacon.ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); 99 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
100 } 100 }
101 101
102 ds->ds_data = bf->bf_buf_addr; 102 ds->ds_data = bf->bf_buf_addr;
@@ -132,24 +132,24 @@ static void ath_beacon_setup(struct ath_softc *sc,
132 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 132 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
133 series[0].Tries = 1; 133 series[0].Tries = 1;
134 series[0].Rate = rate; 134 series[0].Rate = rate;
135 series[0].ChSel = sc->sc_tx_chainmask; 135 series[0].ChSel = sc->tx_chainmask;
136 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; 136 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
137 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, 137 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
138 ctsrate, ctsduration, series, 4, 0); 138 ctsrate, ctsduration, series, 4, 0);
139} 139}
140 140
141/* Generate beacon frame and queue cab data for a vap */ 141/* Generate beacon frame and queue cab data for a VIF */
142static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) 142static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
143{ 143{
144 struct ath_buf *bf; 144 struct ath_buf *bf;
145 struct ath_vap *avp; 145 struct ath_vif *avp;
146 struct sk_buff *skb; 146 struct sk_buff *skb;
147 struct ath_txq *cabq; 147 struct ath_txq *cabq;
148 struct ieee80211_vif *vif; 148 struct ieee80211_vif *vif;
149 struct ieee80211_tx_info *info; 149 struct ieee80211_tx_info *info;
150 int cabq_depth; 150 int cabq_depth;
151 151
152 vif = sc->sc_vaps[if_id]; 152 vif = sc->vifs[if_id];
153 ASSERT(vif); 153 ASSERT(vif);
154 154
155 avp = (void *)vif->drv_priv; 155 avp = (void *)vif->drv_priv;
@@ -164,9 +164,9 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
164 bf = avp->av_bcbuf; 164 bf = avp->av_bcbuf;
165 skb = (struct sk_buff *)bf->bf_mpdu; 165 skb = (struct sk_buff *)bf->bf_mpdu;
166 if (skb) { 166 if (skb) {
167 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 167 dma_unmap_single(sc->dev, bf->bf_dmacontext,
168 skb->len, 168 skb->len,
169 PCI_DMA_TODEVICE); 169 DMA_TO_DEVICE);
170 dev_kfree_skb_any(skb); 170 dev_kfree_skb_any(skb);
171 } 171 }
172 172
@@ -188,14 +188,14 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
188 } 188 }
189 189
190 bf->bf_buf_addr = bf->bf_dmacontext = 190 bf->bf_buf_addr = bf->bf_dmacontext =
191 pci_map_single(sc->pdev, skb->data, 191 dma_map_single(sc->dev, skb->data,
192 skb->len, 192 skb->len,
193 PCI_DMA_TODEVICE); 193 DMA_TO_DEVICE);
194 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) { 194 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
195 dev_kfree_skb_any(skb); 195 dev_kfree_skb_any(skb);
196 bf->bf_mpdu = NULL; 196 bf->bf_mpdu = NULL;
197 DPRINTF(sc, ATH_DBG_CONFIG, 197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "pci_dma_mapping_error() on beaconing\n"); 198 "dma_mapping_error() on beaconing\n");
199 return NULL; 199 return NULL;
200 } 200 }
201 201
@@ -204,10 +204,10 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
204 /* 204 /*
205 * if the CABQ traffic from previous DTIM is pending and the current 205 * if the CABQ traffic from previous DTIM is pending and the current
206 * beacon is also a DTIM. 206 * beacon is also a DTIM.
207 * 1) if there is only one vap let the cab traffic continue. 207 * 1) if there is only one vif let the cab traffic continue.
208 * 2) if there are more than one vap and we are using staggered 208 * 2) if there are more than one vif and we are using staggered
209 * beacons, then drain the cabq by dropping all the frames in 209 * beacons, then drain the cabq by dropping all the frames in
210 * the cabq so that the current vaps cab traffic can be scheduled. 210 * the cabq so that the current vifs cab traffic can be scheduled.
211 */ 211 */
212 spin_lock_bh(&cabq->axq_lock); 212 spin_lock_bh(&cabq->axq_lock);
213 cabq_depth = cabq->axq_depth; 213 cabq_depth = cabq->axq_depth;
@@ -219,8 +219,8 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
219 * the lock again which is a common function and that 219 * the lock again which is a common function and that
220 * acquires txq lock inside. 220 * acquires txq lock inside.
221 */ 221 */
222 if (sc->sc_nvaps > 1) { 222 if (sc->nvifs > 1) {
223 ath_tx_draintxq(sc, cabq, false); 223 ath_draintxq(sc, cabq, false);
224 DPRINTF(sc, ATH_DBG_BEACON, 224 DPRINTF(sc, ATH_DBG_BEACON,
225 "flush previous cabq traffic\n"); 225 "flush previous cabq traffic\n");
226 } 226 }
@@ -248,12 +248,12 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
248static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id) 248static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
249{ 249{
250 struct ieee80211_vif *vif; 250 struct ieee80211_vif *vif;
251 struct ath_hal *ah = sc->sc_ah; 251 struct ath_hw *ah = sc->sc_ah;
252 struct ath_buf *bf; 252 struct ath_buf *bf;
253 struct ath_vap *avp; 253 struct ath_vif *avp;
254 struct sk_buff *skb; 254 struct sk_buff *skb;
255 255
256 vif = sc->sc_vaps[if_id]; 256 vif = sc->vifs[if_id];
257 ASSERT(vif); 257 ASSERT(vif);
258 258
259 avp = (void *)vif->drv_priv; 259 avp = (void *)vif->drv_priv;
@@ -276,7 +276,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
276 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); 276 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
277} 277}
278 278
279int ath_beaconq_setup(struct ath_hal *ah) 279int ath_beaconq_setup(struct ath_hw *ah)
280{ 280{
281 struct ath9k_tx_queue_info qi; 281 struct ath9k_tx_queue_info qi;
282 282
@@ -291,13 +291,13 @@ int ath_beaconq_setup(struct ath_hal *ah)
291int ath_beacon_alloc(struct ath_softc *sc, int if_id) 291int ath_beacon_alloc(struct ath_softc *sc, int if_id)
292{ 292{
293 struct ieee80211_vif *vif; 293 struct ieee80211_vif *vif;
294 struct ath_vap *avp; 294 struct ath_vif *avp;
295 struct ieee80211_hdr *hdr; 295 struct ieee80211_hdr *hdr;
296 struct ath_buf *bf; 296 struct ath_buf *bf;
297 struct sk_buff *skb; 297 struct sk_buff *skb;
298 __le64 tstamp; 298 __le64 tstamp;
299 299
300 vif = sc->sc_vaps[if_id]; 300 vif = sc->vifs[if_id];
301 ASSERT(vif); 301 ASSERT(vif);
302 302
303 avp = (void *)vif->drv_priv; 303 avp = (void *)vif->drv_priv;
@@ -310,11 +310,11 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
310 struct ath_buf, list); 310 struct ath_buf, list);
311 list_del(&avp->av_bcbuf->list); 311 list_del(&avp->av_bcbuf->list);
312 312
313 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP || 313 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
314 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 314 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
315 int slot; 315 int slot;
316 /* 316 /*
317 * Assign the vap to a beacon xmit slot. As 317 * Assign the vif to a beacon xmit slot. As
318 * above, this cannot fail to find one. 318 * above, this cannot fail to find one.
319 */ 319 */
320 avp->av_bslot = 0; 320 avp->av_bslot = 0;
@@ -335,7 +335,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
335 } 335 }
336 BUG_ON(sc->beacon.bslot[avp->av_bslot] != ATH_IF_ID_ANY); 336 BUG_ON(sc->beacon.bslot[avp->av_bslot] != ATH_IF_ID_ANY);
337 sc->beacon.bslot[avp->av_bslot] = if_id; 337 sc->beacon.bslot[avp->av_bslot] = if_id;
338 sc->sc_nbcnvaps++; 338 sc->nbcnvifs++;
339 } 339 }
340 } 340 }
341 341
@@ -343,9 +343,9 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
343 bf = avp->av_bcbuf; 343 bf = avp->av_bcbuf;
344 if (bf->bf_mpdu != NULL) { 344 if (bf->bf_mpdu != NULL) {
345 skb = (struct sk_buff *)bf->bf_mpdu; 345 skb = (struct sk_buff *)bf->bf_mpdu;
346 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 346 dma_unmap_single(sc->dev, bf->bf_dmacontext,
347 skb->len, 347 skb->len,
348 PCI_DMA_TODEVICE); 348 DMA_TO_DEVICE);
349 dev_kfree_skb_any(skb); 349 dev_kfree_skb_any(skb);
350 bf->bf_mpdu = NULL; 350 bf->bf_mpdu = NULL;
351 } 351 }
@@ -384,8 +384,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
384 * timestamp then convert to TSF units and handle 384 * timestamp then convert to TSF units and handle
385 * byte swapping before writing it in the frame. 385 * byte swapping before writing it in the frame.
386 * The hardware will then add this each time a beacon 386 * The hardware will then add this each time a beacon
387 * frame is sent. Note that we align vap's 1..N 387 * frame is sent. Note that we align vif's 1..N
388 * and leave vap 0 untouched. This means vap 0 388 * and leave vif 0 untouched. This means vap 0
389 * has a timestamp in one beacon interval while the 389 * has a timestamp in one beacon interval while the
390 * others get a timestamp aligned to the next interval. 390 * others get a timestamp aligned to the next interval.
391 */ 391 */
@@ -402,36 +402,36 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
402 402
403 bf->bf_mpdu = skb; 403 bf->bf_mpdu = skb;
404 bf->bf_buf_addr = bf->bf_dmacontext = 404 bf->bf_buf_addr = bf->bf_dmacontext =
405 pci_map_single(sc->pdev, skb->data, 405 dma_map_single(sc->dev, skb->data,
406 skb->len, 406 skb->len,
407 PCI_DMA_TODEVICE); 407 DMA_TO_DEVICE);
408 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) { 408 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
409 dev_kfree_skb_any(skb); 409 dev_kfree_skb_any(skb);
410 bf->bf_mpdu = NULL; 410 bf->bf_mpdu = NULL;
411 DPRINTF(sc, ATH_DBG_CONFIG, 411 DPRINTF(sc, ATH_DBG_CONFIG,
412 "pci_dma_mapping_error() on beacon alloc\n"); 412 "dma_mapping_error() on beacon alloc\n");
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
415 415
416 return 0; 416 return 0;
417} 417}
418 418
419void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) 419void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
420{ 420{
421 if (avp->av_bcbuf != NULL) { 421 if (avp->av_bcbuf != NULL) {
422 struct ath_buf *bf; 422 struct ath_buf *bf;
423 423
424 if (avp->av_bslot != -1) { 424 if (avp->av_bslot != -1) {
425 sc->beacon.bslot[avp->av_bslot] = ATH_IF_ID_ANY; 425 sc->beacon.bslot[avp->av_bslot] = ATH_IF_ID_ANY;
426 sc->sc_nbcnvaps--; 426 sc->nbcnvifs--;
427 } 427 }
428 428
429 bf = avp->av_bcbuf; 429 bf = avp->av_bcbuf;
430 if (bf->bf_mpdu != NULL) { 430 if (bf->bf_mpdu != NULL) {
431 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 431 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
432 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 432 dma_unmap_single(sc->dev, bf->bf_dmacontext,
433 skb->len, 433 skb->len,
434 PCI_DMA_TODEVICE); 434 DMA_TO_DEVICE);
435 dev_kfree_skb_any(skb); 435 dev_kfree_skb_any(skb);
436 bf->bf_mpdu = NULL; 436 bf->bf_mpdu = NULL;
437 } 437 }
@@ -444,7 +444,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
444void ath9k_beacon_tasklet(unsigned long data) 444void ath9k_beacon_tasklet(unsigned long data)
445{ 445{
446 struct ath_softc *sc = (struct ath_softc *)data; 446 struct ath_softc *sc = (struct ath_softc *)data;
447 struct ath_hal *ah = sc->sc_ah; 447 struct ath_hw *ah = sc->sc_ah;
448 struct ath_buf *bf = NULL; 448 struct ath_buf *bf = NULL;
449 int slot, if_id; 449 int slot, if_id;
450 u32 bfaddr; 450 u32 bfaddr;
@@ -597,7 +597,7 @@ void ath9k_beacon_tasklet(unsigned long data)
597 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); 597 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
598 ath9k_hw_txstart(ah, sc->beacon.beaconq); 598 ath9k_hw_txstart(ah, sc->beacon.beaconq);
599 599
600 sc->beacon.ast_be_xmit += bc; /* XXX per-vap? */ 600 sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
601 } 601 }
602} 602}
603 603
@@ -619,19 +619,19 @@ void ath9k_beacon_tasklet(unsigned long data)
619void ath_beacon_config(struct ath_softc *sc, int if_id) 619void ath_beacon_config(struct ath_softc *sc, int if_id)
620{ 620{
621 struct ieee80211_vif *vif; 621 struct ieee80211_vif *vif;
622 struct ath_hal *ah = sc->sc_ah; 622 struct ath_hw *ah = sc->sc_ah;
623 struct ath_beacon_config conf; 623 struct ath_beacon_config conf;
624 struct ath_vap *avp; 624 struct ath_vif *avp;
625 enum nl80211_iftype opmode; 625 enum nl80211_iftype opmode;
626 u32 nexttbtt, intval; 626 u32 nexttbtt, intval;
627 627
628 if (if_id != ATH_IF_ID_ANY) { 628 if (if_id != ATH_IF_ID_ANY) {
629 vif = sc->sc_vaps[if_id]; 629 vif = sc->vifs[if_id];
630 ASSERT(vif); 630 ASSERT(vif);
631 avp = (void *)vif->drv_priv; 631 avp = (void *)vif->drv_priv;
632 opmode = avp->av_opmode; 632 opmode = avp->av_opmode;
633 } else { 633 } else {
634 opmode = sc->sc_ah->ah_opmode; 634 opmode = sc->sc_ah->opmode;
635 } 635 }
636 636
637 memset(&conf, 0, sizeof(struct ath_beacon_config)); 637 memset(&conf, 0, sizeof(struct ath_beacon_config));
@@ -647,7 +647,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
647 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); 647 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp);
648 648
649 /* XXX conditionalize multi-bss support? */ 649 /* XXX conditionalize multi-bss support? */
650 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { 650 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
651 /* 651 /*
652 * For multi-bss ap support beacons are either staggered 652 * For multi-bss ap support beacons are either staggered
653 * evenly over N slots or burst together. For the former 653 * evenly over N slots or burst together. For the former
@@ -670,7 +670,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
670 nexttbtt, intval, conf.beacon_interval); 670 nexttbtt, intval, conf.beacon_interval);
671 671
672 /* Check for NL80211_IFTYPE_AP and sc_nostabeacons for WDS client */ 672 /* Check for NL80211_IFTYPE_AP and sc_nostabeacons for WDS client */
673 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) { 673 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
674 struct ath9k_beacon_state bs; 674 struct ath9k_beacon_state bs;
675 u64 tsf; 675 u64 tsf;
676 u32 tsftu; 676 u32 tsftu;
@@ -781,15 +781,15 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
781 781
782 ath9k_hw_set_interrupts(ah, 0); 782 ath9k_hw_set_interrupts(ah, 0);
783 ath9k_hw_set_sta_beacon_timers(ah, &bs); 783 ath9k_hw_set_sta_beacon_timers(ah, &bs);
784 sc->sc_imask |= ATH9K_INT_BMISS; 784 sc->imask |= ATH9K_INT_BMISS;
785 ath9k_hw_set_interrupts(ah, sc->sc_imask); 785 ath9k_hw_set_interrupts(ah, sc->imask);
786 } else { 786 } else {
787 u64 tsf; 787 u64 tsf;
788 u32 tsftu; 788 u32 tsftu;
789 ath9k_hw_set_interrupts(ah, 0); 789 ath9k_hw_set_interrupts(ah, 0);
790 if (nexttbtt == intval) 790 if (nexttbtt == intval)
791 intval |= ATH9K_BEACON_RESET_TSF; 791 intval |= ATH9K_BEACON_RESET_TSF;
792 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) { 792 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) {
793 /* 793 /*
794 * Pull nexttbtt forward to reflect the current 794 * Pull nexttbtt forward to reflect the current
795 * TSF 795 * TSF
@@ -818,27 +818,27 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
818 * deal with things. 818 * deal with things.
819 */ 819 */
820 intval |= ATH9K_BEACON_ENA; 820 intval |= ATH9K_BEACON_ENA;
821 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 821 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
822 sc->sc_imask |= ATH9K_INT_SWBA; 822 sc->imask |= ATH9K_INT_SWBA;
823 ath_beaconq_config(sc); 823 ath_beaconq_config(sc);
824 } else if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { 824 } else if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
825 /* 825 /*
826 * In AP mode we enable the beacon timers and 826 * In AP mode we enable the beacon timers and
827 * SWBA interrupts to prepare beacon frames. 827 * SWBA interrupts to prepare beacon frames.
828 */ 828 */
829 intval |= ATH9K_BEACON_ENA; 829 intval |= ATH9K_BEACON_ENA;
830 sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */ 830 sc->imask |= ATH9K_INT_SWBA; /* beacon prepare */
831 ath_beaconq_config(sc); 831 ath_beaconq_config(sc);
832 } 832 }
833 ath9k_hw_beaconinit(ah, nexttbtt, intval); 833 ath9k_hw_beaconinit(ah, nexttbtt, intval);
834 sc->beacon.bmisscnt = 0; 834 sc->beacon.bmisscnt = 0;
835 ath9k_hw_set_interrupts(ah, sc->sc_imask); 835 ath9k_hw_set_interrupts(ah, sc->imask);
836 /* 836 /*
837 * When using a self-linked beacon descriptor in 837 * When using a self-linked beacon descriptor in
838 * ibss mode load it once here. 838 * ibss mode load it once here.
839 */ 839 */
840 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC && 840 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
841 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 841 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
842 ath_beacon_start_adhoc(sc, 0); 842 ath_beacon_start_adhoc(sc, 0);
843 } 843 }
844} 844}
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c
index 3c7454fc51b..1fc3a08e85c 100644
--- a/drivers/net/wireless/ath9k/calib.c
+++ b/drivers/net/wireless/ath9k/calib.c
@@ -14,12 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
23 18
24/* We can tune this as we go by monitoring really low values */ 19/* We can tune this as we go by monitoring really low values */
25#define ATH9K_NF_TOO_LOW -60 20#define ATH9K_NF_TOO_LOW -60
@@ -28,7 +23,7 @@ static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
28 * is incorrect and we should use the static NF value. Later we can try to 23 * is incorrect and we should use the static NF value. Later we can try to
29 * find out why they are reporting these values */ 24 * find out why they are reporting these values */
30 25
31static bool ath9k_hw_nf_in_range(struct ath_hal *ah, s16 nf) 26static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
32{ 27{
33 if (nf > ATH9K_NF_TOO_LOW) { 28 if (nf > ATH9K_NF_TOO_LOW) {
34 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 29 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
@@ -91,7 +86,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
91 return; 86 return;
92} 87}
93 88
94static void ath9k_hw_do_getnf(struct ath_hal *ah, 89static void ath9k_hw_do_getnf(struct ath_hw *ah,
95 int16_t nfarray[NUM_NF_READINGS]) 90 int16_t nfarray[NUM_NF_READINGS])
96{ 91{
97 int16_t nf; 92 int16_t nf;
@@ -107,27 +102,29 @@ static void ath9k_hw_do_getnf(struct ath_hal *ah,
107 "NF calibrated [ctl] [chain 0] is %d\n", nf); 102 "NF calibrated [ctl] [chain 0] is %d\n", nf);
108 nfarray[0] = nf; 103 nfarray[0] = nf;
109 104
110 if (AR_SREV_9280_10_OR_LATER(ah)) 105 if (!AR_SREV_9285(ah)) {
111 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), 106 if (AR_SREV_9280_10_OR_LATER(ah))
112 AR9280_PHY_CH1_MINCCA_PWR); 107 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
113 else 108 AR9280_PHY_CH1_MINCCA_PWR);
114 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), 109 else
115 AR_PHY_CH1_MINCCA_PWR); 110 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
116 111 AR_PHY_CH1_MINCCA_PWR);
117 if (nf & 0x100)
118 nf = 0 - ((nf ^ 0x1ff) + 1);
119 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
120 "NF calibrated [ctl] [chain 1] is %d\n", nf);
121 nfarray[1] = nf;
122 112
123 if (!AR_SREV_9280(ah)) {
124 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
125 AR_PHY_CH2_MINCCA_PWR);
126 if (nf & 0x100) 113 if (nf & 0x100)
127 nf = 0 - ((nf ^ 0x1ff) + 1); 114 nf = 0 - ((nf ^ 0x1ff) + 1);
128 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 115 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
129 "NF calibrated [ctl] [chain 2] is %d\n", nf); 116 "NF calibrated [ctl] [chain 1] is %d\n", nf);
130 nfarray[2] = nf; 117 nfarray[1] = nf;
118
119 if (!AR_SREV_9280(ah)) {
120 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
121 AR_PHY_CH2_MINCCA_PWR);
122 if (nf & 0x100)
123 nf = 0 - ((nf ^ 0x1ff) + 1);
124 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
125 "NF calibrated [ctl] [chain 2] is %d\n", nf);
126 nfarray[2] = nf;
127 }
131 } 128 }
132 129
133 if (AR_SREV_9280_10_OR_LATER(ah)) 130 if (AR_SREV_9280_10_OR_LATER(ah))
@@ -143,58 +140,52 @@ static void ath9k_hw_do_getnf(struct ath_hal *ah,
143 "NF calibrated [ext] [chain 0] is %d\n", nf); 140 "NF calibrated [ext] [chain 0] is %d\n", nf);
144 nfarray[3] = nf; 141 nfarray[3] = nf;
145 142
146 if (AR_SREV_9280_10_OR_LATER(ah)) 143 if (!AR_SREV_9285(ah)) {
147 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), 144 if (AR_SREV_9280_10_OR_LATER(ah))
148 AR9280_PHY_CH1_EXT_MINCCA_PWR); 145 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
149 else 146 AR9280_PHY_CH1_EXT_MINCCA_PWR);
150 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), 147 else
151 AR_PHY_CH1_EXT_MINCCA_PWR); 148 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
149 AR_PHY_CH1_EXT_MINCCA_PWR);
152 150
153 if (nf & 0x100)
154 nf = 0 - ((nf ^ 0x1ff) + 1);
155 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
156 "NF calibrated [ext] [chain 1] is %d\n", nf);
157 nfarray[4] = nf;
158
159 if (!AR_SREV_9280(ah)) {
160 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
161 AR_PHY_CH2_EXT_MINCCA_PWR);
162 if (nf & 0x100) 151 if (nf & 0x100)
163 nf = 0 - ((nf ^ 0x1ff) + 1); 152 nf = 0 - ((nf ^ 0x1ff) + 1);
164 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 153 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
165 "NF calibrated [ext] [chain 2] is %d\n", nf); 154 "NF calibrated [ext] [chain 1] is %d\n", nf);
166 nfarray[5] = nf; 155 nfarray[4] = nf;
156
157 if (!AR_SREV_9280(ah)) {
158 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
159 AR_PHY_CH2_EXT_MINCCA_PWR);
160 if (nf & 0x100)
161 nf = 0 - ((nf ^ 0x1ff) + 1);
162 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
163 "NF calibrated [ext] [chain 2] is %d\n", nf);
164 nfarray[5] = nf;
165 }
167 } 166 }
168} 167}
169 168
170static bool getNoiseFloorThresh(struct ath_hal *ah, 169static bool getNoiseFloorThresh(struct ath_hw *ah,
171 const struct ath9k_channel *chan, 170 enum ieee80211_band band,
172 int16_t *nft) 171 int16_t *nft)
173{ 172{
174 switch (chan->chanmode) { 173 switch (band) {
175 case CHANNEL_A: 174 case IEEE80211_BAND_5GHZ:
176 case CHANNEL_A_HT20: 175 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
177 case CHANNEL_A_HT40PLUS:
178 case CHANNEL_A_HT40MINUS:
179 *nft = (int8_t)ath9k_hw_get_eeprom(ah, EEP_NFTHRESH_5);
180 break; 176 break;
181 case CHANNEL_B: 177 case IEEE80211_BAND_2GHZ:
182 case CHANNEL_G: 178 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
183 case CHANNEL_G_HT20:
184 case CHANNEL_G_HT40PLUS:
185 case CHANNEL_G_HT40MINUS:
186 *nft = (int8_t)ath9k_hw_get_eeprom(ah, EEP_NFTHRESH_2);
187 break; 179 break;
188 default: 180 default:
189 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 181 BUG_ON(1);
190 "invalid channel flags 0x%x\n", chan->channelFlags);
191 return false; 182 return false;
192 } 183 }
193 184
194 return true; 185 return true;
195} 186}
196 187
197static void ath9k_hw_setup_calibration(struct ath_hal *ah, 188static void ath9k_hw_setup_calibration(struct ath_hw *ah,
198 struct hal_cal_list *currCal) 189 struct hal_cal_list *currCal)
199{ 190{
200 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), 191 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
@@ -228,10 +219,9 @@ static void ath9k_hw_setup_calibration(struct ath_hal *ah,
228 AR_PHY_TIMING_CTRL4_DO_CAL); 219 AR_PHY_TIMING_CTRL4_DO_CAL);
229} 220}
230 221
231static void ath9k_hw_reset_calibration(struct ath_hal *ah, 222static void ath9k_hw_reset_calibration(struct ath_hw *ah,
232 struct hal_cal_list *currCal) 223 struct hal_cal_list *currCal)
233{ 224{
234 struct ath_hal_5416 *ahp = AH5416(ah);
235 int i; 225 int i;
236 226
237 ath9k_hw_setup_calibration(ah, currCal); 227 ath9k_hw_setup_calibration(ah, currCal);
@@ -239,23 +229,21 @@ static void ath9k_hw_reset_calibration(struct ath_hal *ah,
239 currCal->calState = CAL_RUNNING; 229 currCal->calState = CAL_RUNNING;
240 230
241 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 231 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
242 ahp->ah_Meas0.sign[i] = 0; 232 ah->meas0.sign[i] = 0;
243 ahp->ah_Meas1.sign[i] = 0; 233 ah->meas1.sign[i] = 0;
244 ahp->ah_Meas2.sign[i] = 0; 234 ah->meas2.sign[i] = 0;
245 ahp->ah_Meas3.sign[i] = 0; 235 ah->meas3.sign[i] = 0;
246 } 236 }
247 237
248 ahp->ah_CalSamples = 0; 238 ah->cal_samples = 0;
249} 239}
250 240
251static void ath9k_hw_per_calibration(struct ath_hal *ah, 241static void ath9k_hw_per_calibration(struct ath_hw *ah,
252 struct ath9k_channel *ichan, 242 struct ath9k_channel *ichan,
253 u8 rxchainmask, 243 u8 rxchainmask,
254 struct hal_cal_list *currCal, 244 struct hal_cal_list *currCal,
255 bool *isCalDone) 245 bool *isCalDone)
256{ 246{
257 struct ath_hal_5416 *ahp = AH5416(ah);
258
259 *isCalDone = false; 247 *isCalDone = false;
260 248
261 if (currCal->calState == CAL_RUNNING) { 249 if (currCal->calState == CAL_RUNNING) {
@@ -263,9 +251,9 @@ static void ath9k_hw_per_calibration(struct ath_hal *ah,
263 AR_PHY_TIMING_CTRL4_DO_CAL)) { 251 AR_PHY_TIMING_CTRL4_DO_CAL)) {
264 252
265 currCal->calData->calCollect(ah); 253 currCal->calData->calCollect(ah);
266 ahp->ah_CalSamples++; 254 ah->cal_samples++;
267 255
268 if (ahp->ah_CalSamples >= currCal->calData->calNumSamples) { 256 if (ah->cal_samples >= currCal->calData->calNumSamples) {
269 int i, numChains = 0; 257 int i, numChains = 0;
270 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 258 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
271 if (rxchainmask & (1 << i)) 259 if (rxchainmask & (1 << i))
@@ -285,113 +273,105 @@ static void ath9k_hw_per_calibration(struct ath_hal *ah,
285 } 273 }
286} 274}
287 275
288static bool ath9k_hw_iscal_supported(struct ath_hal *ah, 276/* Assumes you are talking about the currently configured channel */
289 struct ath9k_channel *chan, 277static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
290 enum hal_cal_types calType) 278 enum hal_cal_types calType)
291{ 279{
292 struct ath_hal_5416 *ahp = AH5416(ah); 280 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
293 bool retval = false;
294 281
295 switch (calType & ahp->ah_suppCals) { 282 switch (calType & ah->supp_cals) {
296 case IQ_MISMATCH_CAL: 283 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
297 if (!IS_CHAN_B(chan)) 284 return true;
298 retval = true;
299 break;
300 case ADC_GAIN_CAL: 285 case ADC_GAIN_CAL:
301 case ADC_DC_CAL: 286 case ADC_DC_CAL:
302 if (!IS_CHAN_B(chan) 287 if (conf->channel->band == IEEE80211_BAND_5GHZ &&
303 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 288 conf_is_ht20(conf))
304 retval = true; 289 return true;
305 break; 290 break;
306 } 291 }
307 292 return false;
308 return retval;
309} 293}
310 294
311static void ath9k_hw_iqcal_collect(struct ath_hal *ah) 295static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
312{ 296{
313 struct ath_hal_5416 *ahp = AH5416(ah);
314 int i; 297 int i;
315 298
316 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 299 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
317 ahp->ah_totalPowerMeasI[i] += 300 ah->totalPowerMeasI[i] +=
318 REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 301 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
319 ahp->ah_totalPowerMeasQ[i] += 302 ah->totalPowerMeasQ[i] +=
320 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 303 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
321 ahp->ah_totalIqCorrMeas[i] += 304 ah->totalIqCorrMeas[i] +=
322 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 305 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
323 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 306 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
324 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 307 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
325 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i], 308 ah->cal_samples, i, ah->totalPowerMeasI[i],
326 ahp->ah_totalPowerMeasQ[i], 309 ah->totalPowerMeasQ[i],
327 ahp->ah_totalIqCorrMeas[i]); 310 ah->totalIqCorrMeas[i]);
328 } 311 }
329} 312}
330 313
331static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah) 314static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
332{ 315{
333 struct ath_hal_5416 *ahp = AH5416(ah);
334 int i; 316 int i;
335 317
336 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 318 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
337 ahp->ah_totalAdcIOddPhase[i] += 319 ah->totalAdcIOddPhase[i] +=
338 REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 320 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
339 ahp->ah_totalAdcIEvenPhase[i] += 321 ah->totalAdcIEvenPhase[i] +=
340 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 322 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
341 ahp->ah_totalAdcQOddPhase[i] += 323 ah->totalAdcQOddPhase[i] +=
342 REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 324 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
343 ahp->ah_totalAdcQEvenPhase[i] += 325 ah->totalAdcQEvenPhase[i] +=
344 REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 326 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
345 327
346 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 328 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
347 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 329 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
348 "oddq=0x%08x; evenq=0x%08x;\n", 330 "oddq=0x%08x; evenq=0x%08x;\n",
349 ahp->ah_CalSamples, i, 331 ah->cal_samples, i,
350 ahp->ah_totalAdcIOddPhase[i], 332 ah->totalAdcIOddPhase[i],
351 ahp->ah_totalAdcIEvenPhase[i], 333 ah->totalAdcIEvenPhase[i],
352 ahp->ah_totalAdcQOddPhase[i], 334 ah->totalAdcQOddPhase[i],
353 ahp->ah_totalAdcQEvenPhase[i]); 335 ah->totalAdcQEvenPhase[i]);
354 } 336 }
355} 337}
356 338
357static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah) 339static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
358{ 340{
359 struct ath_hal_5416 *ahp = AH5416(ah);
360 int i; 341 int i;
361 342
362 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 343 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
363 ahp->ah_totalAdcDcOffsetIOddPhase[i] += 344 ah->totalAdcDcOffsetIOddPhase[i] +=
364 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 345 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
365 ahp->ah_totalAdcDcOffsetIEvenPhase[i] += 346 ah->totalAdcDcOffsetIEvenPhase[i] +=
366 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 347 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
367 ahp->ah_totalAdcDcOffsetQOddPhase[i] += 348 ah->totalAdcDcOffsetQOddPhase[i] +=
368 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 349 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
369 ahp->ah_totalAdcDcOffsetQEvenPhase[i] += 350 ah->totalAdcDcOffsetQEvenPhase[i] +=
370 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 351 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
371 352
372 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 353 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
373 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 354 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
374 "oddq=0x%08x; evenq=0x%08x;\n", 355 "oddq=0x%08x; evenq=0x%08x;\n",
375 ahp->ah_CalSamples, i, 356 ah->cal_samples, i,
376 ahp->ah_totalAdcDcOffsetIOddPhase[i], 357 ah->totalAdcDcOffsetIOddPhase[i],
377 ahp->ah_totalAdcDcOffsetIEvenPhase[i], 358 ah->totalAdcDcOffsetIEvenPhase[i],
378 ahp->ah_totalAdcDcOffsetQOddPhase[i], 359 ah->totalAdcDcOffsetQOddPhase[i],
379 ahp->ah_totalAdcDcOffsetQEvenPhase[i]); 360 ah->totalAdcDcOffsetQEvenPhase[i]);
380 } 361 }
381} 362}
382 363
383static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains) 364static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
384{ 365{
385 struct ath_hal_5416 *ahp = AH5416(ah);
386 u32 powerMeasQ, powerMeasI, iqCorrMeas; 366 u32 powerMeasQ, powerMeasI, iqCorrMeas;
387 u32 qCoffDenom, iCoffDenom; 367 u32 qCoffDenom, iCoffDenom;
388 int32_t qCoff, iCoff; 368 int32_t qCoff, iCoff;
389 int iqCorrNeg, i; 369 int iqCorrNeg, i;
390 370
391 for (i = 0; i < numChains; i++) { 371 for (i = 0; i < numChains; i++) {
392 powerMeasI = ahp->ah_totalPowerMeasI[i]; 372 powerMeasI = ah->totalPowerMeasI[i];
393 powerMeasQ = ahp->ah_totalPowerMeasQ[i]; 373 powerMeasQ = ah->totalPowerMeasQ[i];
394 iqCorrMeas = ahp->ah_totalIqCorrMeas[i]; 374 iqCorrMeas = ah->totalIqCorrMeas[i];
395 375
396 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 376 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
397 "Starting IQ Cal and Correction for Chain %d\n", 377 "Starting IQ Cal and Correction for Chain %d\n",
@@ -399,7 +379,7 @@ static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
399 379
400 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 380 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
401 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 381 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
402 i, ahp->ah_totalIqCorrMeas[i]); 382 i, ah->totalIqCorrMeas[i]);
403 383
404 iqCorrNeg = 0; 384 iqCorrNeg = 0;
405 385
@@ -457,17 +437,16 @@ static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
457 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE); 437 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
458} 438}
459 439
460static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains) 440static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
461{ 441{
462 struct ath_hal_5416 *ahp = AH5416(ah);
463 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset; 442 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
464 u32 qGainMismatch, iGainMismatch, val, i; 443 u32 qGainMismatch, iGainMismatch, val, i;
465 444
466 for (i = 0; i < numChains; i++) { 445 for (i = 0; i < numChains; i++) {
467 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i]; 446 iOddMeasOffset = ah->totalAdcIOddPhase[i];
468 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i]; 447 iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
469 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i]; 448 qOddMeasOffset = ah->totalAdcQOddPhase[i];
470 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i]; 449 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
471 450
472 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 451 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
473 "Starting ADC Gain Cal for Chain %d\n", i); 452 "Starting ADC Gain Cal for Chain %d\n", i);
@@ -515,21 +494,20 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
515 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE); 494 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
516} 495}
517 496
518static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains) 497static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
519{ 498{
520 struct ath_hal_5416 *ahp = AH5416(ah);
521 u32 iOddMeasOffset, iEvenMeasOffset, val, i; 499 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
522 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch; 500 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
523 const struct hal_percal_data *calData = 501 const struct hal_percal_data *calData =
524 ahp->ah_cal_list_curr->calData; 502 ah->cal_list_curr->calData;
525 u32 numSamples = 503 u32 numSamples =
526 (1 << (calData->calCountMax + 5)) * calData->calNumSamples; 504 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
527 505
528 for (i = 0; i < numChains; i++) { 506 for (i = 0; i < numChains; i++) {
529 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i]; 507 iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
530 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i]; 508 iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
531 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i]; 509 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
532 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i]; 510 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
533 511
534 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 512 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
535 "Starting ADC DC Offset Cal for Chain %d\n", i); 513 "Starting ADC DC Offset Cal for Chain %d\n", i);
@@ -573,53 +551,42 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
573 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE); 551 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
574} 552}
575 553
576void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan, 554/* This is done for the currently configured channel */
577 bool *isCalDone) 555bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
578{ 556{
579 struct ath_hal_5416 *ahp = AH5416(ah); 557 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
580 struct ath9k_channel *ichan = 558 struct hal_cal_list *currCal = ah->cal_list_curr;
581 ath9k_regd_check_channel(ah, chan);
582 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
583 559
584 *isCalDone = true; 560 if (!ah->curchan)
561 return true;
585 562
586 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) 563 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
587 return; 564 return true;
588 565
589 if (currCal == NULL) 566 if (currCal == NULL)
590 return; 567 return true;
591
592 if (ichan == NULL) {
593 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
594 "invalid channel %u/0x%x; no mapping\n",
595 chan->channel, chan->channelFlags);
596 return;
597 }
598
599 568
600 if (currCal->calState != CAL_DONE) { 569 if (currCal->calState != CAL_DONE) {
601 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 570 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
602 "Calibration state incorrect, %d\n", 571 "Calibration state incorrect, %d\n",
603 currCal->calState); 572 currCal->calState);
604 return; 573 return true;
605 } 574 }
606 575
607 576 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType))
608 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType)) 577 return true;
609 return;
610 578
611 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 579 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
612 "Resetting Cal %d state for channel %u/0x%x\n", 580 "Resetting Cal %d state for channel %u\n",
613 currCal->calData->calType, chan->channel, 581 currCal->calData->calType, conf->channel->center_freq);
614 chan->channelFlags);
615 582
616 ichan->CalValid &= ~currCal->calData->calType; 583 ah->curchan->CalValid &= ~currCal->calData->calType;
617 currCal->calState = CAL_WAITING; 584 currCal->calState = CAL_WAITING;
618 585
619 *isCalDone = false; 586 return false;
620} 587}
621 588
622void ath9k_hw_start_nfcal(struct ath_hal *ah) 589void ath9k_hw_start_nfcal(struct ath_hw *ah)
623{ 590{
624 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 591 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
625 AR_PHY_AGC_CONTROL_ENABLE_NF); 592 AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -628,7 +595,7 @@ void ath9k_hw_start_nfcal(struct ath_hal *ah)
628 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 595 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
629} 596}
630 597
631void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan) 598void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
632{ 599{
633 struct ath9k_nfcal_hist *h; 600 struct ath9k_nfcal_hist *h;
634 int i, j; 601 int i, j;
@@ -643,16 +610,14 @@ void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
643 }; 610 };
644 u8 chainmask; 611 u8 chainmask;
645 612
646 if (AR_SREV_9280(ah)) 613 if (AR_SREV_9285(ah))
614 chainmask = 0x9;
615 else if (AR_SREV_9280(ah))
647 chainmask = 0x1B; 616 chainmask = 0x1B;
648 else 617 else
649 chainmask = 0x3F; 618 chainmask = 0x3F;
650 619
651#ifdef ATH_NF_PER_CHAN
652 h = chan->nfCalHist;
653#else
654 h = ah->nfCalHist; 620 h = ah->nfCalHist;
655#endif
656 621
657 for (i = 0; i < NUM_NF_READINGS; i++) { 622 for (i = 0; i < NUM_NF_READINGS; i++) {
658 if (chainmask & (1 << i)) { 623 if (chainmask & (1 << i)) {
@@ -686,18 +651,13 @@ void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
686 } 651 }
687} 652}
688 653
689int16_t ath9k_hw_getnf(struct ath_hal *ah, 654int16_t ath9k_hw_getnf(struct ath_hw *ah,
690 struct ath9k_channel *chan) 655 struct ath9k_channel *chan)
691{ 656{
692 int16_t nf, nfThresh; 657 int16_t nf, nfThresh;
693 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 658 int16_t nfarray[NUM_NF_READINGS] = { 0 };
694 struct ath9k_nfcal_hist *h; 659 struct ath9k_nfcal_hist *h;
695 u8 chainmask; 660 struct ieee80211_channel *c = chan->chan;
696
697 if (AR_SREV_9280(ah))
698 chainmask = 0x1B;
699 else
700 chainmask = 0x3F;
701 661
702 chan->channelFlags &= (~CHANNEL_CW_INT); 662 chan->channelFlags &= (~CHANNEL_CW_INT);
703 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 663 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
@@ -709,7 +669,7 @@ int16_t ath9k_hw_getnf(struct ath_hal *ah,
709 } else { 669 } else {
710 ath9k_hw_do_getnf(ah, nfarray); 670 ath9k_hw_do_getnf(ah, nfarray);
711 nf = nfarray[0]; 671 nf = nfarray[0];
712 if (getNoiseFloorThresh(ah, chan, &nfThresh) 672 if (getNoiseFloorThresh(ah, c->band, &nfThresh)
713 && nf > nfThresh) { 673 && nf > nfThresh) {
714 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 674 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
715 "noise floor failed detected; " 675 "noise floor failed detected; "
@@ -719,11 +679,7 @@ int16_t ath9k_hw_getnf(struct ath_hal *ah,
719 } 679 }
720 } 680 }
721 681
722#ifdef ATH_NF_PER_CHAN
723 h = chan->nfCalHist;
724#else
725 h = ah->nfCalHist; 682 h = ah->nfCalHist;
726#endif
727 683
728 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 684 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
729 chan->rawNoiseFloor = h[0].privNF; 685 chan->rawNoiseFloor = h[0].privNF;
@@ -731,7 +687,7 @@ int16_t ath9k_hw_getnf(struct ath_hal *ah,
731 return chan->rawNoiseFloor; 687 return chan->rawNoiseFloor;
732} 688}
733 689
734void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah) 690void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
735{ 691{
736 int i, j; 692 int i, j;
737 693
@@ -745,26 +701,16 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
745 AR_PHY_CCA_MAX_GOOD_VALUE; 701 AR_PHY_CCA_MAX_GOOD_VALUE;
746 } 702 }
747 } 703 }
748 return;
749} 704}
750 705
751s16 ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan) 706s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
752{ 707{
753 struct ath9k_channel *ichan;
754 s16 nf; 708 s16 nf;
755 709
756 ichan = ath9k_regd_check_channel(ah, chan); 710 if (chan->rawNoiseFloor == 0)
757 if (ichan == NULL) { 711 nf = -96;
758 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 712 else
759 "invalid channel %u/0x%x; no mapping\n", 713 nf = chan->rawNoiseFloor;
760 chan->channel, chan->channelFlags);
761 return ATH_DEFAULT_NOISE_FLOOR;
762 }
763 if (ichan->rawNoiseFloor == 0) {
764 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
765 nf = NOISE_FLOOR[mode];
766 } else
767 nf = ichan->rawNoiseFloor;
768 714
769 if (!ath9k_hw_nf_in_range(ah, nf)) 715 if (!ath9k_hw_nf_in_range(ah, nf))
770 nf = ATH_DEFAULT_NOISE_FLOOR; 716 nf = ATH_DEFAULT_NOISE_FLOOR;
@@ -772,30 +718,21 @@ s16 ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
772 return nf; 718 return nf;
773} 719}
774 720
775bool ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan, 721bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
776 u8 rxchainmask, bool longcal, 722 u8 rxchainmask, bool longcal,
777 bool *isCalDone) 723 bool *isCalDone)
778{ 724{
779 struct ath_hal_5416 *ahp = AH5416(ah); 725 struct hal_cal_list *currCal = ah->cal_list_curr;
780 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
781 struct ath9k_channel *ichan = ath9k_regd_check_channel(ah, chan);
782 726
783 *isCalDone = true; 727 *isCalDone = true;
784 728
785 if (ichan == NULL) {
786 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
787 "invalid channel %u/0x%x; no mapping\n",
788 chan->channel, chan->channelFlags);
789 return false;
790 }
791
792 if (currCal && 729 if (currCal &&
793 (currCal->calState == CAL_RUNNING || 730 (currCal->calState == CAL_RUNNING ||
794 currCal->calState == CAL_WAITING)) { 731 currCal->calState == CAL_WAITING)) {
795 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal, 732 ath9k_hw_per_calibration(ah, chan, rxchainmask, currCal,
796 isCalDone); 733 isCalDone);
797 if (*isCalDone) { 734 if (*isCalDone) {
798 ahp->ah_cal_list_curr = currCal = currCal->calNext; 735 ah->cal_list_curr = currCal = currCal->calNext;
799 736
800 if (currCal->calState == CAL_WAITING) { 737 if (currCal->calState == CAL_WAITING) {
801 *isCalDone = false; 738 *isCalDone = false;
@@ -805,20 +742,18 @@ bool ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
805 } 742 }
806 743
807 if (longcal) { 744 if (longcal) {
808 ath9k_hw_getnf(ah, ichan); 745 ath9k_hw_getnf(ah, chan);
809 ath9k_hw_loadnf(ah, ah->ah_curchan); 746 ath9k_hw_loadnf(ah, ah->curchan);
810 ath9k_hw_start_nfcal(ah); 747 ath9k_hw_start_nfcal(ah);
811 748
812 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) { 749 if (chan->channelFlags & CHANNEL_CW_INT)
813 chan->channelFlags |= CHANNEL_CW_INT; 750 chan->channelFlags &= ~CHANNEL_CW_INT;
814 ichan->channelFlags &= ~CHANNEL_CW_INT;
815 }
816 } 751 }
817 752
818 return true; 753 return true;
819} 754}
820 755
821static inline void ath9k_hw_9285_pa_cal(struct ath_hal *ah) 756static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah)
822{ 757{
823 758
824 u32 regVal; 759 u32 regVal;
@@ -913,12 +848,9 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hal *ah)
913 848
914} 849}
915 850
916bool ath9k_hw_init_cal(struct ath_hal *ah, 851bool ath9k_hw_init_cal(struct ath_hw *ah,
917 struct ath9k_channel *chan) 852 struct ath9k_channel *chan)
918{ 853{
919 struct ath_hal_5416 *ahp = AH5416(ah);
920 struct ath9k_channel *ichan = ath9k_regd_check_channel(ah, chan);
921
922 REG_WRITE(ah, AR_PHY_AGC_CONTROL, 854 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
923 REG_READ(ah, AR_PHY_AGC_CONTROL) | 855 REG_READ(ah, AR_PHY_AGC_CONTROL) |
924 AR_PHY_AGC_CONTROL_CAL); 856 AR_PHY_AGC_CONTROL_CAL);
@@ -937,35 +869,35 @@ bool ath9k_hw_init_cal(struct ath_hal *ah,
937 REG_READ(ah, AR_PHY_AGC_CONTROL) | 869 REG_READ(ah, AR_PHY_AGC_CONTROL) |
938 AR_PHY_AGC_CONTROL_NF); 870 AR_PHY_AGC_CONTROL_NF);
939 871
940 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL; 872 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
941 873
942 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { 874 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
943 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) { 875 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
944 INIT_CAL(&ahp->ah_adcGainCalData); 876 INIT_CAL(&ah->adcgain_caldata);
945 INSERT_CAL(ahp, &ahp->ah_adcGainCalData); 877 INSERT_CAL(ah, &ah->adcgain_caldata);
946 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 878 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
947 "enabling ADC Gain Calibration.\n"); 879 "enabling ADC Gain Calibration.\n");
948 } 880 }
949 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) { 881 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
950 INIT_CAL(&ahp->ah_adcDcCalData); 882 INIT_CAL(&ah->adcdc_caldata);
951 INSERT_CAL(ahp, &ahp->ah_adcDcCalData); 883 INSERT_CAL(ah, &ah->adcdc_caldata);
952 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 884 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
953 "enabling ADC DC Calibration.\n"); 885 "enabling ADC DC Calibration.\n");
954 } 886 }
955 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) { 887 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
956 INIT_CAL(&ahp->ah_iqCalData); 888 INIT_CAL(&ah->iq_caldata);
957 INSERT_CAL(ahp, &ahp->ah_iqCalData); 889 INSERT_CAL(ah, &ah->iq_caldata);
958 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 890 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
959 "enabling IQ Calibration.\n"); 891 "enabling IQ Calibration.\n");
960 } 892 }
961 893
962 ahp->ah_cal_list_curr = ahp->ah_cal_list; 894 ah->cal_list_curr = ah->cal_list;
963 895
964 if (ahp->ah_cal_list_curr) 896 if (ah->cal_list_curr)
965 ath9k_hw_reset_calibration(ah, ahp->ah_cal_list_curr); 897 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
966 } 898 }
967 899
968 ichan->CalValid = 0; 900 chan->CalValid = 0;
969 901
970 return true; 902 return true;
971} 903}
diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath9k/calib.h
new file mode 100644
index 00000000000..d2448f049c1
--- /dev/null
+++ b/drivers/net/wireless/ath9k/calib.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CALIB_H
18#define CALIB_H
19
20extern const struct hal_percal_data iq_cal_multi_sample;
21extern const struct hal_percal_data iq_cal_single_sample;
22extern const struct hal_percal_data adc_gain_cal_multi_sample;
23extern const struct hal_percal_data adc_gain_cal_single_sample;
24extern const struct hal_percal_data adc_dc_cal_multi_sample;
25extern const struct hal_percal_data adc_dc_cal_single_sample;
26extern const struct hal_percal_data adc_init_dc_cal;
27
28#define AR_PHY_CCA_MAX_GOOD_VALUE -85
29#define AR_PHY_CCA_MAX_HIGH_VALUE -62
30#define AR_PHY_CCA_MIN_BAD_VALUE -121
31#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
32#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
33
34#define NUM_NF_READINGS 6
35#define ATH9K_NF_CAL_HIST_MAX 5
36
37struct ar5416IniArray {
38 u32 *ia_array;
39 u32 ia_rows;
40 u32 ia_columns;
41};
42
43#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
44 (iniarray)->ia_array = (u32 *)(array); \
45 (iniarray)->ia_rows = (rows); \
46 (iniarray)->ia_columns = (columns); \
47 } while (0)
48
49#define INI_RA(iniarray, row, column) \
50 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
51
52#define INIT_CAL(_perCal) do { \
53 (_perCal)->calState = CAL_WAITING; \
54 (_perCal)->calNext = NULL; \
55 } while (0)
56
57#define INSERT_CAL(_ahp, _perCal) \
58 do { \
59 if ((_ahp)->cal_list_last == NULL) { \
60 (_ahp)->cal_list = \
61 (_ahp)->cal_list_last = (_perCal); \
62 ((_ahp)->cal_list_last)->calNext = (_perCal); \
63 } else { \
64 ((_ahp)->cal_list_last)->calNext = (_perCal); \
65 (_ahp)->cal_list_last = (_perCal); \
66 (_perCal)->calNext = (_ahp)->cal_list; \
67 } \
68 } while (0)
69
70enum hal_cal_types {
71 ADC_DC_INIT_CAL = 0x1,
72 ADC_GAIN_CAL = 0x2,
73 ADC_DC_CAL = 0x4,
74 IQ_MISMATCH_CAL = 0x8
75};
76
77enum hal_cal_state {
78 CAL_INACTIVE,
79 CAL_WAITING,
80 CAL_RUNNING,
81 CAL_DONE
82};
83
84#define MIN_CAL_SAMPLES 1
85#define MAX_CAL_SAMPLES 64
86#define INIT_LOG_COUNT 5
87#define PER_MIN_LOG_COUNT 2
88#define PER_MAX_LOG_COUNT 10
89
90struct hal_percal_data {
91 enum hal_cal_types calType;
92 u32 calNumSamples;
93 u32 calCountMax;
94 void (*calCollect) (struct ath_hw *);
95 void (*calPostProc) (struct ath_hw *, u8);
96};
97
98struct hal_cal_list {
99 const struct hal_percal_data *calData;
100 enum hal_cal_state calState;
101 struct hal_cal_list *calNext;
102};
103
104struct ath9k_nfcal_hist {
105 int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
106 u8 currIndex;
107 int16_t privNF;
108 u8 invalidNFcount;
109};
110
111bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
112void ath9k_hw_start_nfcal(struct ath_hw *ah);
113void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
114int16_t ath9k_hw_getnf(struct ath_hw *ah,
115 struct ath9k_channel *chan);
116void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
117s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
118bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
119 u8 rxchainmask, bool longcal,
120 bool *isCalDone);
121bool ath9k_hw_init_cal(struct ath_hw *ah,
122 struct ath9k_channel *chan);
123
124#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
deleted file mode 100644
index 4ca2aed236e..00000000000
--- a/drivers/net/wireless/ath9k/core.h
+++ /dev/null
@@ -1,754 +0,0 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/etherdevice.h>
21#include <linux/pci.h>
22#include <net/mac80211.h>
23#include <linux/leds.h>
24#include <linux/rfkill.h>
25
26#include "ath9k.h"
27#include "rc.h"
28
29struct ath_node;
30
31/* Macro to expand scalars to 64-bit objects */
32
33#define ito64(x) (sizeof(x) == 8) ? \
34 (((unsigned long long int)(x)) & (0xff)) : \
35 (sizeof(x) == 16) ? \
36 (((unsigned long long int)(x)) & 0xffff) : \
37 ((sizeof(x) == 32) ? \
38 (((unsigned long long int)(x)) & 0xffffffff) : \
39 (unsigned long long int)(x))
40
41/* increment with wrap-around */
42#define INCR(_l, _sz) do { \
43 (_l)++; \
44 (_l) &= ((_sz) - 1); \
45 } while (0)
46
47/* decrement with wrap-around */
48#define DECR(_l, _sz) do { \
49 (_l)--; \
50 (_l) &= ((_sz) - 1); \
51 } while (0)
52
53#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
54
55#define ASSERT(exp) do { \
56 if (unlikely(!(exp))) { \
57 BUG(); \
58 } \
59 } while (0)
60
61#define TSF_TO_TU(_h,_l) \
62 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
63
64#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
65
66static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
67
68enum ATH_DEBUG {
69 ATH_DBG_RESET = 0x00000001,
70 ATH_DBG_REG_IO = 0x00000002,
71 ATH_DBG_QUEUE = 0x00000004,
72 ATH_DBG_EEPROM = 0x00000008,
73 ATH_DBG_CALIBRATE = 0x00000010,
74 ATH_DBG_CHANNEL = 0x00000020,
75 ATH_DBG_INTERRUPT = 0x00000040,
76 ATH_DBG_REGULATORY = 0x00000080,
77 ATH_DBG_ANI = 0x00000100,
78 ATH_DBG_POWER_MGMT = 0x00000200,
79 ATH_DBG_XMIT = 0x00000400,
80 ATH_DBG_BEACON = 0x00001000,
81 ATH_DBG_CONFIG = 0x00002000,
82 ATH_DBG_KEYCACHE = 0x00004000,
83 ATH_DBG_FATAL = 0x00008000,
84 ATH_DBG_ANY = 0xffffffff
85};
86
87#define DBG_DEFAULT (ATH_DBG_FATAL)
88
89#ifdef CONFIG_ATH9K_DEBUG
90
91/**
92 * struct ath_interrupt_stats - Contains statistics about interrupts
93 * @total: Total no. of interrupts generated so far
94 * @rxok: RX with no errors
95 * @rxeol: RX with no more RXDESC available
96 * @rxorn: RX FIFO overrun
97 * @txok: TX completed at the requested rate
98 * @txurn: TX FIFO underrun
99 * @mib: MIB regs reaching its threshold
100 * @rxphyerr: RX with phy errors
101 * @rx_keycache_miss: RX with key cache misses
102 * @swba: Software Beacon Alert
103 * @bmiss: Beacon Miss
104 * @bnr: Beacon Not Ready
105 * @cst: Carrier Sense TImeout
106 * @gtt: Global TX Timeout
107 * @tim: RX beacon TIM occurrence
108 * @cabend: RX End of CAB traffic
109 * @dtimsync: DTIM sync lossage
110 * @dtim: RX Beacon with DTIM
111 */
112struct ath_interrupt_stats {
113 u32 total;
114 u32 rxok;
115 u32 rxeol;
116 u32 rxorn;
117 u32 txok;
118 u32 txeol;
119 u32 txurn;
120 u32 mib;
121 u32 rxphyerr;
122 u32 rx_keycache_miss;
123 u32 swba;
124 u32 bmiss;
125 u32 bnr;
126 u32 cst;
127 u32 gtt;
128 u32 tim;
129 u32 cabend;
130 u32 dtimsync;
131 u32 dtim;
132};
133
134struct ath_stats {
135 struct ath_interrupt_stats istats;
136};
137
138struct ath9k_debug {
139 int debug_mask;
140 struct dentry *debugfs_root;
141 struct dentry *debugfs_phy;
142 struct dentry *debugfs_dma;
143 struct dentry *debugfs_interrupt;
144 struct ath_stats stats;
145};
146
147void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...);
148int ath9k_init_debug(struct ath_softc *sc);
149void ath9k_exit_debug(struct ath_softc *sc);
150void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
151
152#else
153
154static inline void DPRINTF(struct ath_softc *sc, int dbg_mask,
155 const char *fmt, ...)
156{
157}
158
159static inline int ath9k_init_debug(struct ath_softc *sc)
160{
161 return 0;
162}
163
164static inline void ath9k_exit_debug(struct ath_softc *sc)
165{
166}
167
168static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
169 enum ath9k_int status)
170{
171}
172
173#endif /* CONFIG_ATH9K_DEBUG */
174
175struct ath_config {
176 u32 ath_aggr_prot;
177 u16 txpowlimit;
178 u16 txpowlimit_override;
179 u8 cabqReadytime;
180 u8 swBeaconProcess;
181};
182
183/*************************/
184/* Descriptor Management */
185/*************************/
186
187#define ATH_TXBUF_RESET(_bf) do { \
188 (_bf)->bf_status = 0; \
189 (_bf)->bf_lastbf = NULL; \
190 (_bf)->bf_lastfrm = NULL; \
191 (_bf)->bf_next = NULL; \
192 memset(&((_bf)->bf_state), 0, \
193 sizeof(struct ath_buf_state)); \
194 } while (0)
195
196enum buffer_type {
197 BUF_DATA = BIT(0),
198 BUF_AGGR = BIT(1),
199 BUF_AMPDU = BIT(2),
200 BUF_HT = BIT(3),
201 BUF_RETRY = BIT(4),
202 BUF_XRETRY = BIT(5),
203 BUF_SHORT_PREAMBLE = BIT(6),
204 BUF_BAR = BIT(7),
205 BUF_PSPOLL = BIT(8),
206 BUF_AGGR_BURST = BIT(9),
207 BUF_CALC_AIRTIME = BIT(10),
208};
209
210struct ath_buf_state {
211 int bfs_nframes; /* # frames in aggregate */
212 u16 bfs_al; /* length of aggregate */
213 u16 bfs_frmlen; /* length of frame */
214 int bfs_seqno; /* sequence number */
215 int bfs_tidno; /* tid of this frame */
216 int bfs_retries; /* current retries */
217 u32 bf_type; /* BUF_* (enum buffer_type) */
218 u32 bfs_keyix;
219 enum ath9k_key_type bfs_keytype;
220};
221
222#define bf_nframes bf_state.bfs_nframes
223#define bf_al bf_state.bfs_al
224#define bf_frmlen bf_state.bfs_frmlen
225#define bf_retries bf_state.bfs_retries
226#define bf_seqno bf_state.bfs_seqno
227#define bf_tidno bf_state.bfs_tidno
228#define bf_rcs bf_state.bfs_rcs
229#define bf_keyix bf_state.bfs_keyix
230#define bf_keytype bf_state.bfs_keytype
231#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA)
232#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
233#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
234#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
235#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
236#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
237#define bf_isshpreamble(bf) (bf->bf_state.bf_type & BUF_SHORT_PREAMBLE)
238#define bf_isbar(bf) (bf->bf_state.bf_type & BUF_BAR)
239#define bf_ispspoll(bf) (bf->bf_state.bf_type & BUF_PSPOLL)
240#define bf_isaggrburst(bf) (bf->bf_state.bf_type & BUF_AGGR_BURST)
241
242/*
243 * Abstraction of a contiguous buffer to transmit/receive. There is only
244 * a single hw descriptor encapsulated here.
245 */
246struct ath_buf {
247 struct list_head list;
248 struct list_head *last;
249 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
250 an aggregate) */
251 struct ath_buf *bf_lastfrm; /* last buf of this frame */
252 struct ath_buf *bf_next; /* next subframe in the aggregate */
253 void *bf_mpdu; /* enclosing frame structure */
254 struct ath_desc *bf_desc; /* virtual addr of desc */
255 dma_addr_t bf_daddr; /* physical addr of desc */
256 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
257 u32 bf_status;
258 u16 bf_flags; /* tx descriptor flags */
259 struct ath_buf_state bf_state; /* buffer state */
260 dma_addr_t bf_dmacontext;
261};
262
263#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
264
265/* hw processing complete, desc processed by hal */
266#define ATH_BUFSTATUS_DONE 0x00000001
267/* hw processing complete, desc hold for hw */
268#define ATH_BUFSTATUS_STALE 0x00000002
269/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
270#define ATH_BUFSTATUS_FREE 0x00000004
271
272/* DMA state for tx/rx descriptors */
273
274struct ath_descdma {
275 const char *dd_name;
276 struct ath_desc *dd_desc; /* descriptors */
277 dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
278 u32 dd_desc_len; /* size of dd_desc */
279 struct ath_buf *dd_bufptr; /* associated buffers */
280 dma_addr_t dd_dmacontext;
281};
282
283int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
284 struct list_head *head, const char *name,
285 int nbuf, int ndesc);
286void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
287 struct list_head *head);
288
289/***********/
290/* RX / TX */
291/***********/
292
293#define ATH_MAX_ANTENNA 3
294#define ATH_RXBUF 512
295#define WME_NUM_TID 16
296#define ATH_TXBUF 512
297#define ATH_TXMAXTRY 13
298#define ATH_11N_TXMAXTRY 10
299#define ATH_MGT_TXMAXTRY 4
300#define WME_BA_BMP_SIZE 64
301#define WME_MAX_BA WME_BA_BMP_SIZE
302#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
303
304#define TID_TO_WME_AC(_tid) \
305 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
306 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
307 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
308 WME_AC_VO)
309
310#define WME_AC_BE 0
311#define WME_AC_BK 1
312#define WME_AC_VI 2
313#define WME_AC_VO 3
314#define WME_NUM_AC 4
315
316#define ADDBA_EXCHANGE_ATTEMPTS 10
317#define ATH_AGGR_DELIM_SZ 4
318#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
319/* number of delimiters for encryption padding */
320#define ATH_AGGR_ENCRYPTDELIM 10
321/* minimum h/w qdepth to be sustained to maximize aggregation */
322#define ATH_AGGR_MIN_QDEPTH 2
323#define ATH_AMPDU_SUBFRAME_DEFAULT 32
324#define IEEE80211_SEQ_SEQ_SHIFT 4
325#define IEEE80211_SEQ_MAX 4096
326#define IEEE80211_MIN_AMPDU_BUF 0x8
327#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
328
329/* return whether a bit at index _n in bitmap _bm is set
330 * _sz is the size of the bitmap */
331#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
332 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
333
334/* return block-ack bitmap index given sequence and starting sequence */
335#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
336
337/* returns delimiter padding required given the packet length */
338#define ATH_AGGR_GET_NDELIM(_len) \
339 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
340 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
341
342#define BAW_WITHIN(_start, _bawsz, _seqno) \
343 ((((_seqno) - (_start)) & 4095) < (_bawsz))
344
345#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
346#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
347#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
348#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
349
350enum ATH_AGGR_STATUS {
351 ATH_AGGR_DONE,
352 ATH_AGGR_BAW_CLOSED,
353 ATH_AGGR_LIMITED,
354 ATH_AGGR_SHORTPKT,
355 ATH_AGGR_8K_LIMITED,
356};
357
358struct ath_txq {
359 u32 axq_qnum; /* hardware q number */
360 u32 *axq_link; /* link ptr in last TX desc */
361 struct list_head axq_q; /* transmit queue */
362 spinlock_t axq_lock;
363 unsigned long axq_lockflags; /* intr state when must cli */
364 u32 axq_depth; /* queue depth */
365 u8 axq_aggr_depth; /* aggregates queued */
366 u32 axq_totalqueued; /* total ever queued */
367 bool stopped; /* Is mac80211 queue stopped ? */
368 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
369
370 /* first desc of the last descriptor that contains CTS */
371 struct ath_desc *axq_lastdsWithCTS;
372
373 /* final desc of the gating desc that determines whether
374 lastdsWithCTS has been DMA'ed or not */
375 struct ath_desc *axq_gatingds;
376
377 struct list_head axq_acq;
378};
379
380#define AGGR_CLEANUP BIT(1)
381#define AGGR_ADDBA_COMPLETE BIT(2)
382#define AGGR_ADDBA_PROGRESS BIT(3)
383
384/* per TID aggregate tx state for a destination */
385struct ath_atx_tid {
386 struct list_head list; /* round-robin tid entry */
387 struct list_head buf_q; /* pending buffers */
388 struct ath_node *an;
389 struct ath_atx_ac *ac;
390 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */
391 u16 seq_start;
392 u16 seq_next;
393 u16 baw_size;
394 int tidno;
395 int baw_head; /* first un-acked tx buffer */
396 int baw_tail; /* next unused tx buffer slot */
397 int sched;
398 int paused;
399 u8 state;
400 int addba_exchangeattempts;
401};
402
403/* per access-category aggregate tx state for a destination */
404struct ath_atx_ac {
405 int sched; /* dest-ac is scheduled */
406 int qnum; /* H/W queue number associated
407 with this AC */
408 struct list_head list; /* round-robin txq entry */
409 struct list_head tid_q; /* queue of TIDs with buffers */
410};
411
412/* per-frame tx control block */
413struct ath_tx_control {
414 struct ath_txq *txq;
415 int if_id;
416};
417
418/* per frame tx status block */
419struct ath_xmit_status {
420 int retries; /* number of retries to successufully
421 transmit this frame */
422 int flags; /* status of transmit */
423#define ATH_TX_ERROR 0x01
424#define ATH_TX_XRETRY 0x02
425#define ATH_TX_BAR 0x04
426};
427
428/* All RSSI values are noise floor adjusted */
429struct ath_tx_stat {
430 int rssi;
431 int rssictl[ATH_MAX_ANTENNA];
432 int rssiextn[ATH_MAX_ANTENNA];
433 int rateieee;
434 int rateKbps;
435 int ratecode;
436 int flags;
437 u32 airtime; /* time on air per final tx rate */
438};
439
440struct aggr_rifs_param {
441 int param_max_frames;
442 int param_max_len;
443 int param_rl;
444 int param_al;
445 struct ath_rc_series *param_rcs;
446};
447
448struct ath_node {
449 struct ath_softc *an_sc;
450 struct ath_atx_tid tid[WME_NUM_TID];
451 struct ath_atx_ac ac[WME_NUM_AC];
452 u16 maxampdu;
453 u8 mpdudensity;
454};
455
456struct ath_tx {
457 u16 seq_no;
458 u32 txqsetup;
459 int hwq_map[ATH9K_WME_AC_VO+1];
460 spinlock_t txbuflock;
461 struct list_head txbuf;
462 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
463 struct ath_descdma txdma;
464};
465
466struct ath_rx {
467 u8 defant;
468 u8 rxotherant;
469 u32 *rxlink;
470 int bufsize;
471 unsigned int rxfilter;
472 spinlock_t rxflushlock;
473 spinlock_t rxbuflock;
474 struct list_head rxbuf;
475 struct ath_descdma rxdma;
476};
477
478int ath_startrecv(struct ath_softc *sc);
479bool ath_stoprecv(struct ath_softc *sc);
480void ath_flushrecv(struct ath_softc *sc);
481u32 ath_calcrxfilter(struct ath_softc *sc);
482int ath_rx_init(struct ath_softc *sc, int nbufs);
483void ath_rx_cleanup(struct ath_softc *sc);
484int ath_rx_tasklet(struct ath_softc *sc, int flush);
485struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
486void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
487int ath_tx_setup(struct ath_softc *sc, int haltype);
488void ath_draintxq(struct ath_softc *sc, bool retry_tx);
489void ath_tx_draintxq(struct ath_softc *sc,
490 struct ath_txq *txq, bool retry_tx);
491void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
492void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
493void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
494void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
495int ath_tx_init(struct ath_softc *sc, int nbufs);
496int ath_tx_cleanup(struct ath_softc *sc);
497int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
498struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
499int ath_txq_update(struct ath_softc *sc, int qnum,
500 struct ath9k_tx_queue_info *q);
501int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
502 struct ath_tx_control *txctl);
503void ath_tx_tasklet(struct ath_softc *sc);
504u32 ath_txq_depth(struct ath_softc *sc, int qnum);
505u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
506void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
507void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
508bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
509void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tidno);
510int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
511 u16 tid, u16 *ssn);
512int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
513void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
514
515/********/
516/* VAPs */
517/********/
518
519/*
520 * Define the scheme that we select MAC address for multiple
521 * BSS on the same radio. The very first VAP will just use the MAC
522 * address from the EEPROM. For the next 3 VAPs, we set the
523 * U/L bit (bit 1) in MAC address, and use the next two bits as the
524 * index of the VAP.
525 */
526
527#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
528 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
529
530struct ath_vap {
531 int av_bslot;
532 enum nl80211_iftype av_opmode;
533 struct ath_buf *av_bcbuf;
534 struct ath_tx_control av_btxctl;
535};
536
537/*******************/
538/* Beacon Handling */
539/*******************/
540
541/*
542 * Regardless of the number of beacons we stagger, (i.e. regardless of the
543 * number of BSSIDs) if a given beacon does not go out even after waiting this
544 * number of beacon intervals, the game's up.
545 */
546#define BSTUCK_THRESH (9 * ATH_BCBUF)
547#define ATH_BCBUF 1
548#define ATH_DEFAULT_BINTVAL 100 /* TU */
549#define ATH_DEFAULT_BMISS_LIMIT 10
550#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
551
552struct ath_beacon_config {
553 u16 beacon_interval;
554 u16 listen_interval;
555 u16 dtim_period;
556 u16 bmiss_timeout;
557 u8 dtim_count;
558 u8 tim_offset;
559 union {
560 u64 last_tsf;
561 u8 last_tstamp[8];
562 } u; /* last received beacon/probe response timestamp of this BSS. */
563};
564
565struct ath_beacon {
566 enum {
567 OK, /* no change needed */
568 UPDATE, /* update pending */
569 COMMIT /* beacon sent, commit change */
570 } updateslot; /* slot time update fsm */
571
572 u32 beaconq;
573 u32 bmisscnt;
574 u32 ast_be_xmit;
575 u64 bc_tstamp;
576 int bslot[ATH_BCBUF];
577 int slottime;
578 int slotupdate;
579 struct ath9k_tx_queue_info beacon_qi;
580 struct ath_descdma bdma;
581 struct ath_txq *cabq;
582 struct list_head bbuf;
583};
584
585void ath9k_beacon_tasklet(unsigned long data);
586void ath_beacon_config(struct ath_softc *sc, int if_id);
587int ath_beaconq_setup(struct ath_hal *ah);
588int ath_beacon_alloc(struct ath_softc *sc, int if_id);
589void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
590void ath_beacon_sync(struct ath_softc *sc, int if_id);
591
592/*******/
593/* ANI */
594/*******/
595
596/* ANI values for STA only.
597 FIXME: Add appropriate values for AP later */
598
599#define ATH_ANI_POLLINTERVAL 100 /* 100 milliseconds between ANI poll */
600#define ATH_SHORT_CALINTERVAL 1000 /* 1 second between calibrations */
601#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds between calibrations */
602#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes between calibrations */
603
604struct ath_ani {
605 bool sc_caldone;
606 int16_t sc_noise_floor;
607 unsigned int sc_longcal_timer;
608 unsigned int sc_shortcal_timer;
609 unsigned int sc_resetcal_timer;
610 unsigned int sc_checkani_timer;
611 struct timer_list timer;
612};
613
614/********************/
615/* LED Control */
616/********************/
617
618#define ATH_LED_PIN 1
619
620enum ath_led_type {
621 ATH_LED_RADIO,
622 ATH_LED_ASSOC,
623 ATH_LED_TX,
624 ATH_LED_RX
625};
626
627struct ath_led {
628 struct ath_softc *sc;
629 struct led_classdev led_cdev;
630 enum ath_led_type led_type;
631 char name[32];
632 bool registered;
633};
634
635/* Rfkill */
636#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
637
638struct ath_rfkill {
639 struct rfkill *rfkill;
640 struct delayed_work rfkill_poll;
641 char rfkill_name[32];
642};
643
644/********************/
645/* Main driver core */
646/********************/
647
648/*
649 * Default cache line size, in bytes.
650 * Used when PCI device not fully initialized by bootrom/BIOS
651*/
652#define DEFAULT_CACHELINE 32
653#define ATH_DEFAULT_NOISE_FLOOR -95
654#define ATH_REGCLASSIDS_MAX 10
655#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
656#define ATH_MAX_SW_RETRIES 10
657#define ATH_CHAN_MAX 255
658#define IEEE80211_WEP_NKID 4 /* number of key ids */
659#define IEEE80211_RATE_VAL 0x7f
660/*
661 * The key cache is used for h/w cipher state and also for
662 * tracking station state such as the current tx antenna.
663 * We also setup a mapping table between key cache slot indices
664 * and station state to short-circuit node lookups on rx.
665 * Different parts have different size key caches. We handle
666 * up to ATH_KEYMAX entries (could dynamically allocate state).
667 */
668#define ATH_KEYMAX 128 /* max key cache size we handle */
669
670#define ATH_IF_ID_ANY 0xff
671#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
672#define ATH_RSSI_DUMMY_MARKER 0x127
673#define ATH_RATE_DUMMY_MARKER 0
674
675enum PROT_MODE {
676 PROT_M_NONE = 0,
677 PROT_M_RTSCTS,
678 PROT_M_CTSONLY
679};
680
681#define SC_OP_INVALID BIT(0)
682#define SC_OP_BEACONS BIT(1)
683#define SC_OP_RXAGGR BIT(2)
684#define SC_OP_TXAGGR BIT(3)
685#define SC_OP_CHAINMASK_UPDATE BIT(4)
686#define SC_OP_FULL_RESET BIT(5)
687#define SC_OP_NO_RESET BIT(6)
688#define SC_OP_PREAMBLE_SHORT BIT(7)
689#define SC_OP_PROTECT_ENABLE BIT(8)
690#define SC_OP_RXFLUSH BIT(9)
691#define SC_OP_LED_ASSOCIATED BIT(10)
692#define SC_OP_RFKILL_REGISTERED BIT(11)
693#define SC_OP_RFKILL_SW_BLOCKED BIT(12)
694#define SC_OP_RFKILL_HW_BLOCKED BIT(13)
695
696struct ath_softc {
697 struct ieee80211_hw *hw;
698 struct pci_dev *pdev;
699 struct tasklet_struct intr_tq;
700 struct tasklet_struct bcon_tasklet;
701 struct ath_hal *sc_ah;
702 void __iomem *mem;
703 spinlock_t sc_resetlock;
704 struct mutex mutex;
705
706 u8 sc_curbssid[ETH_ALEN];
707 u8 sc_myaddr[ETH_ALEN];
708 u8 sc_bssidmask[ETH_ALEN];
709 u32 sc_intrstatus;
710 u32 sc_flags; /* SC_OP_* */
711 u16 sc_curtxpow;
712 u16 sc_curaid;
713 u16 sc_cachelsz;
714 u8 sc_nbcnvaps;
715 u16 sc_nvaps;
716 u8 sc_tx_chainmask;
717 u8 sc_rx_chainmask;
718 u32 sc_keymax;
719 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX);
720 u8 sc_splitmic;
721 u8 sc_protrix;
722 enum ath9k_int sc_imask;
723 enum PROT_MODE sc_protmode;
724 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
725 enum ath9k_ht_macmode tx_chan_width;
726
727 struct ath_config sc_config;
728 struct ath_rx rx;
729 struct ath_tx tx;
730 struct ath_beacon beacon;
731 struct ieee80211_vif *sc_vaps[ATH_BCBUF];
732 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
733 struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
734 struct ath_rate_table *cur_rate_table;
735 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
736 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
737 struct ath_led radio_led;
738 struct ath_led assoc_led;
739 struct ath_led tx_led;
740 struct ath_led rx_led;
741 struct ath_rfkill rf_kill;
742 struct ath_ani sc_ani;
743 struct ath9k_node_stats sc_halstats;
744#ifdef CONFIG_ATH9K_DEBUG
745 struct ath9k_debug sc_debug;
746#endif
747};
748
749int ath_reset(struct ath_softc *sc, bool retry_tx);
750int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
751int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
752int ath_cabq_update(struct ath_softc *);
753
754#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c
index a80ed576830..800ad5926b6 100644
--- a/drivers/net/wireless/ath9k/debug.c
+++ b/drivers/net/wireless/ath9k/debug.c
@@ -14,9 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "reg.h"
19#include "hw.h"
20 18
21static unsigned int ath9k_debug = DBG_DEFAULT; 19static unsigned int ath9k_debug = DBG_DEFAULT;
22module_param_named(debug, ath9k_debug, uint, 0); 20module_param_named(debug, ath9k_debug, uint, 0);
@@ -26,7 +24,7 @@ void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...)
26 if (!sc) 24 if (!sc)
27 return; 25 return;
28 26
29 if (sc->sc_debug.debug_mask & dbg_mask) { 27 if (sc->debug.debug_mask & dbg_mask) {
30 va_list args; 28 va_list args;
31 29
32 va_start(args, fmt); 30 va_start(args, fmt);
@@ -46,7 +44,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
46 size_t count, loff_t *ppos) 44 size_t count, loff_t *ppos)
47{ 45{
48 struct ath_softc *sc = file->private_data; 46 struct ath_softc *sc = file->private_data;
49 struct ath_hal *ah = sc->sc_ah; 47 struct ath_hw *ah = sc->sc_ah;
50 char buf[1024]; 48 char buf[1024];
51 unsigned int len = 0; 49 unsigned int len = 0;
52 u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; 50 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
@@ -132,41 +130,41 @@ static const struct file_operations fops_dma = {
132void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) 130void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
133{ 131{
134 if (status) 132 if (status)
135 sc->sc_debug.stats.istats.total++; 133 sc->debug.stats.istats.total++;
136 if (status & ATH9K_INT_RX) 134 if (status & ATH9K_INT_RX)
137 sc->sc_debug.stats.istats.rxok++; 135 sc->debug.stats.istats.rxok++;
138 if (status & ATH9K_INT_RXEOL) 136 if (status & ATH9K_INT_RXEOL)
139 sc->sc_debug.stats.istats.rxeol++; 137 sc->debug.stats.istats.rxeol++;
140 if (status & ATH9K_INT_RXORN) 138 if (status & ATH9K_INT_RXORN)
141 sc->sc_debug.stats.istats.rxorn++; 139 sc->debug.stats.istats.rxorn++;
142 if (status & ATH9K_INT_TX) 140 if (status & ATH9K_INT_TX)
143 sc->sc_debug.stats.istats.txok++; 141 sc->debug.stats.istats.txok++;
144 if (status & ATH9K_INT_TXURN) 142 if (status & ATH9K_INT_TXURN)
145 sc->sc_debug.stats.istats.txurn++; 143 sc->debug.stats.istats.txurn++;
146 if (status & ATH9K_INT_MIB) 144 if (status & ATH9K_INT_MIB)
147 sc->sc_debug.stats.istats.mib++; 145 sc->debug.stats.istats.mib++;
148 if (status & ATH9K_INT_RXPHY) 146 if (status & ATH9K_INT_RXPHY)
149 sc->sc_debug.stats.istats.rxphyerr++; 147 sc->debug.stats.istats.rxphyerr++;
150 if (status & ATH9K_INT_RXKCM) 148 if (status & ATH9K_INT_RXKCM)
151 sc->sc_debug.stats.istats.rx_keycache_miss++; 149 sc->debug.stats.istats.rx_keycache_miss++;
152 if (status & ATH9K_INT_SWBA) 150 if (status & ATH9K_INT_SWBA)
153 sc->sc_debug.stats.istats.swba++; 151 sc->debug.stats.istats.swba++;
154 if (status & ATH9K_INT_BMISS) 152 if (status & ATH9K_INT_BMISS)
155 sc->sc_debug.stats.istats.bmiss++; 153 sc->debug.stats.istats.bmiss++;
156 if (status & ATH9K_INT_BNR) 154 if (status & ATH9K_INT_BNR)
157 sc->sc_debug.stats.istats.bnr++; 155 sc->debug.stats.istats.bnr++;
158 if (status & ATH9K_INT_CST) 156 if (status & ATH9K_INT_CST)
159 sc->sc_debug.stats.istats.cst++; 157 sc->debug.stats.istats.cst++;
160 if (status & ATH9K_INT_GTT) 158 if (status & ATH9K_INT_GTT)
161 sc->sc_debug.stats.istats.gtt++; 159 sc->debug.stats.istats.gtt++;
162 if (status & ATH9K_INT_TIM) 160 if (status & ATH9K_INT_TIM)
163 sc->sc_debug.stats.istats.tim++; 161 sc->debug.stats.istats.tim++;
164 if (status & ATH9K_INT_CABEND) 162 if (status & ATH9K_INT_CABEND)
165 sc->sc_debug.stats.istats.cabend++; 163 sc->debug.stats.istats.cabend++;
166 if (status & ATH9K_INT_DTIMSYNC) 164 if (status & ATH9K_INT_DTIMSYNC)
167 sc->sc_debug.stats.istats.dtimsync++; 165 sc->debug.stats.istats.dtimsync++;
168 if (status & ATH9K_INT_DTIM) 166 if (status & ATH9K_INT_DTIM)
169 sc->sc_debug.stats.istats.dtim++; 167 sc->debug.stats.istats.dtim++;
170} 168}
171 169
172static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 170static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -177,41 +175,41 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
177 unsigned int len = 0; 175 unsigned int len = 0;
178 176
179 len += snprintf(buf + len, sizeof(buf) - len, 177 len += snprintf(buf + len, sizeof(buf) - len,
180 "%8s: %10u\n", "RX", sc->sc_debug.stats.istats.rxok); 178 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
181 len += snprintf(buf + len, sizeof(buf) - len, 179 len += snprintf(buf + len, sizeof(buf) - len,
182 "%8s: %10u\n", "RXEOL", sc->sc_debug.stats.istats.rxeol); 180 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
183 len += snprintf(buf + len, sizeof(buf) - len, 181 len += snprintf(buf + len, sizeof(buf) - len,
184 "%8s: %10u\n", "RXORN", sc->sc_debug.stats.istats.rxorn); 182 "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn);
185 len += snprintf(buf + len, sizeof(buf) - len, 183 len += snprintf(buf + len, sizeof(buf) - len,
186 "%8s: %10u\n", "TX", sc->sc_debug.stats.istats.txok); 184 "%8s: %10u\n", "TX", sc->debug.stats.istats.txok);
187 len += snprintf(buf + len, sizeof(buf) - len, 185 len += snprintf(buf + len, sizeof(buf) - len,
188 "%8s: %10u\n", "TXURN", sc->sc_debug.stats.istats.txurn); 186 "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn);
189 len += snprintf(buf + len, sizeof(buf) - len, 187 len += snprintf(buf + len, sizeof(buf) - len,
190 "%8s: %10u\n", "MIB", sc->sc_debug.stats.istats.mib); 188 "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib);
191 len += snprintf(buf + len, sizeof(buf) - len, 189 len += snprintf(buf + len, sizeof(buf) - len,
192 "%8s: %10u\n", "RXPHY", sc->sc_debug.stats.istats.rxphyerr); 190 "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr);
193 len += snprintf(buf + len, sizeof(buf) - len, 191 len += snprintf(buf + len, sizeof(buf) - len,
194 "%8s: %10u\n", "RXKCM", sc->sc_debug.stats.istats.rx_keycache_miss); 192 "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss);
195 len += snprintf(buf + len, sizeof(buf) - len, 193 len += snprintf(buf + len, sizeof(buf) - len,
196 "%8s: %10u\n", "SWBA", sc->sc_debug.stats.istats.swba); 194 "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba);
197 len += snprintf(buf + len, sizeof(buf) - len, 195 len += snprintf(buf + len, sizeof(buf) - len,
198 "%8s: %10u\n", "BMISS", sc->sc_debug.stats.istats.bmiss); 196 "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss);
199 len += snprintf(buf + len, sizeof(buf) - len, 197 len += snprintf(buf + len, sizeof(buf) - len,
200 "%8s: %10u\n", "BNR", sc->sc_debug.stats.istats.bnr); 198 "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr);
201 len += snprintf(buf + len, sizeof(buf) - len, 199 len += snprintf(buf + len, sizeof(buf) - len,
202 "%8s: %10u\n", "CST", sc->sc_debug.stats.istats.cst); 200 "%8s: %10u\n", "CST", sc->debug.stats.istats.cst);
203 len += snprintf(buf + len, sizeof(buf) - len, 201 len += snprintf(buf + len, sizeof(buf) - len,
204 "%8s: %10u\n", "GTT", sc->sc_debug.stats.istats.gtt); 202 "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt);
205 len += snprintf(buf + len, sizeof(buf) - len, 203 len += snprintf(buf + len, sizeof(buf) - len,
206 "%8s: %10u\n", "TIM", sc->sc_debug.stats.istats.tim); 204 "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim);
207 len += snprintf(buf + len, sizeof(buf) - len, 205 len += snprintf(buf + len, sizeof(buf) - len,
208 "%8s: %10u\n", "CABEND", sc->sc_debug.stats.istats.cabend); 206 "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend);
209 len += snprintf(buf + len, sizeof(buf) - len, 207 len += snprintf(buf + len, sizeof(buf) - len,
210 "%8s: %10u\n", "DTIMSYNC", sc->sc_debug.stats.istats.dtimsync); 208 "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync);
211 len += snprintf(buf + len, sizeof(buf) - len, 209 len += snprintf(buf + len, sizeof(buf) - len,
212 "%8s: %10u\n", "DTIM", sc->sc_debug.stats.istats.dtim); 210 "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim);
213 len += snprintf(buf + len, sizeof(buf) - len, 211 len += snprintf(buf + len, sizeof(buf) - len,
214 "%8s: %10u\n", "TOTAL", sc->sc_debug.stats.istats.total); 212 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
215 213
216 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 214 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
217} 215}
@@ -222,29 +220,144 @@ static const struct file_operations fops_interrupt = {
222 .owner = THIS_MODULE 220 .owner = THIS_MODULE
223}; 221};
224 222
223static void ath_debug_stat_11n_rc(struct ath_softc *sc, struct sk_buff *skb)
224{
225 struct ath_tx_info_priv *tx_info_priv = NULL;
226 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
227 struct ieee80211_tx_rate *rates = tx_info->status.rates;
228 int final_ts_idx, idx;
229
230 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
231 final_ts_idx = tx_info_priv->tx.ts_rateindex;
232 idx = sc->cur_rate_table->info[rates[final_ts_idx].idx].dot11rate;
233
234 sc->debug.stats.n_rcstats[idx].success++;
235}
236
237static void ath_debug_stat_legacy_rc(struct ath_softc *sc, struct sk_buff *skb)
238{
239 struct ath_tx_info_priv *tx_info_priv = NULL;
240 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
241 struct ieee80211_tx_rate *rates = tx_info->status.rates;
242 int final_ts_idx, idx;
243
244 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
245 final_ts_idx = tx_info_priv->tx.ts_rateindex;
246 idx = rates[final_ts_idx].idx;
247
248 sc->debug.stats.legacy_rcstats[idx].success++;
249}
250
251void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
252{
253 if (conf_is_ht(&sc->hw->conf))
254 ath_debug_stat_11n_rc(sc, skb);
255 else
256 ath_debug_stat_legacy_rc(sc, skb);
257}
258
259/* FIXME: legacy rates, later on .. */
260void ath_debug_stat_retries(struct ath_softc *sc, int rix,
261 int xretries, int retries)
262{
263 if (conf_is_ht(&sc->hw->conf)) {
264 int idx = sc->cur_rate_table->info[rix].dot11rate;
265
266 sc->debug.stats.n_rcstats[idx].xretries += xretries;
267 sc->debug.stats.n_rcstats[idx].retries += retries;
268 }
269}
270
271static ssize_t ath_read_file_stat_11n_rc(struct file *file,
272 char __user *user_buf,
273 size_t count, loff_t *ppos)
274{
275 struct ath_softc *sc = file->private_data;
276 char buf[1024];
277 unsigned int len = 0;
278 int i = 0;
279
280 len += sprintf(buf, "%7s %13s %8s %8s\n\n", "Rate", "Success",
281 "Retries", "XRetries");
282
283 for (i = 0; i <= 15; i++) {
284 len += snprintf(buf + len, sizeof(buf) - len,
285 "%5s%3d: %8u %8u %8u\n", "MCS", i,
286 sc->debug.stats.n_rcstats[i].success,
287 sc->debug.stats.n_rcstats[i].retries,
288 sc->debug.stats.n_rcstats[i].xretries);
289 }
290
291 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
292}
293
294static ssize_t ath_read_file_stat_legacy_rc(struct file *file,
295 char __user *user_buf,
296 size_t count, loff_t *ppos)
297{
298 struct ath_softc *sc = file->private_data;
299 char buf[512];
300 unsigned int len = 0;
301 int i = 0;
302
303 len += sprintf(buf, "%7s %13s\n\n", "Rate", "Success");
304
305 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
306 len += snprintf(buf + len, sizeof(buf) - len, "%5u: %12u\n",
307 sc->cur_rate_table->info[i].ratekbps / 1000,
308 sc->debug.stats.legacy_rcstats[i].success);
309 }
310
311 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
312}
313
314static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
315 size_t count, loff_t *ppos)
316{
317 struct ath_softc *sc = file->private_data;
318
319 if (conf_is_ht(&sc->hw->conf))
320 return ath_read_file_stat_11n_rc(file, user_buf, count, ppos);
321 else
322 return ath_read_file_stat_legacy_rc(file, user_buf, count ,ppos);
323}
324
325static const struct file_operations fops_rcstat = {
326 .read = read_file_rcstat,
327 .open = ath9k_debugfs_open,
328 .owner = THIS_MODULE
329};
330
225int ath9k_init_debug(struct ath_softc *sc) 331int ath9k_init_debug(struct ath_softc *sc)
226{ 332{
227 sc->sc_debug.debug_mask = ath9k_debug; 333 sc->debug.debug_mask = ath9k_debug;
228 334
229 sc->sc_debug.debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 335 sc->debug.debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
230 if (!sc->sc_debug.debugfs_root) 336 if (!sc->debug.debugfs_root)
231 goto err; 337 goto err;
232 338
233 sc->sc_debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 339 sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
234 sc->sc_debug.debugfs_root); 340 sc->debug.debugfs_root);
235 if (!sc->sc_debug.debugfs_phy) 341 if (!sc->debug.debugfs_phy)
236 goto err; 342 goto err;
237 343
238 sc->sc_debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO, 344 sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO,
239 sc->sc_debug.debugfs_phy, sc, &fops_dma); 345 sc->debug.debugfs_phy, sc, &fops_dma);
240 if (!sc->sc_debug.debugfs_dma) 346 if (!sc->debug.debugfs_dma)
241 goto err; 347 goto err;
242 348
243 sc->sc_debug.debugfs_interrupt = debugfs_create_file("interrupt", 349 sc->debug.debugfs_interrupt = debugfs_create_file("interrupt",
244 S_IRUGO, 350 S_IRUGO,
245 sc->sc_debug.debugfs_phy, 351 sc->debug.debugfs_phy,
246 sc, &fops_interrupt); 352 sc, &fops_interrupt);
247 if (!sc->sc_debug.debugfs_interrupt) 353 if (!sc->debug.debugfs_interrupt)
354 goto err;
355
356 sc->debug.debugfs_rcstat = debugfs_create_file("rcstat",
357 S_IRUGO,
358 sc->debug.debugfs_phy,
359 sc, &fops_rcstat);
360 if (!sc->debug.debugfs_rcstat)
248 goto err; 361 goto err;
249 362
250 return 0; 363 return 0;
@@ -255,8 +368,9 @@ err:
255 368
256void ath9k_exit_debug(struct ath_softc *sc) 369void ath9k_exit_debug(struct ath_softc *sc)
257{ 370{
258 debugfs_remove(sc->sc_debug.debugfs_interrupt); 371 debugfs_remove(sc->debug.debugfs_rcstat);
259 debugfs_remove(sc->sc_debug.debugfs_dma); 372 debugfs_remove(sc->debug.debugfs_interrupt);
260 debugfs_remove(sc->sc_debug.debugfs_phy); 373 debugfs_remove(sc->debug.debugfs_dma);
261 debugfs_remove(sc->sc_debug.debugfs_root); 374 debugfs_remove(sc->debug.debugfs_phy);
375 debugfs_remove(sc->debug.debugfs_root);
262} 376}
diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath9k/debug.h
new file mode 100644
index 00000000000..61e969894c0
--- /dev/null
+++ b/drivers/net/wireless/ath9k/debug.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DEBUG_H
18#define DEBUG_H
19
20enum ATH_DEBUG {
21 ATH_DBG_RESET = 0x00000001,
22 ATH_DBG_REG_IO = 0x00000002,
23 ATH_DBG_QUEUE = 0x00000004,
24 ATH_DBG_EEPROM = 0x00000008,
25 ATH_DBG_CALIBRATE = 0x00000010,
26 ATH_DBG_CHANNEL = 0x00000020,
27 ATH_DBG_INTERRUPT = 0x00000040,
28 ATH_DBG_REGULATORY = 0x00000080,
29 ATH_DBG_ANI = 0x00000100,
30 ATH_DBG_POWER_MGMT = 0x00000200,
31 ATH_DBG_XMIT = 0x00000400,
32 ATH_DBG_BEACON = 0x00001000,
33 ATH_DBG_CONFIG = 0x00002000,
34 ATH_DBG_KEYCACHE = 0x00004000,
35 ATH_DBG_FATAL = 0x00008000,
36 ATH_DBG_ANY = 0xffffffff
37};
38
39#define DBG_DEFAULT (ATH_DBG_FATAL)
40
41#ifdef CONFIG_ATH9K_DEBUG
42
43/**
44 * struct ath_interrupt_stats - Contains statistics about interrupts
45 * @total: Total no. of interrupts generated so far
46 * @rxok: RX with no errors
47 * @rxeol: RX with no more RXDESC available
48 * @rxorn: RX FIFO overrun
49 * @txok: TX completed at the requested rate
50 * @txurn: TX FIFO underrun
51 * @mib: MIB regs reaching its threshold
52 * @rxphyerr: RX with phy errors
53 * @rx_keycache_miss: RX with key cache misses
54 * @swba: Software Beacon Alert
55 * @bmiss: Beacon Miss
56 * @bnr: Beacon Not Ready
57 * @cst: Carrier Sense TImeout
58 * @gtt: Global TX Timeout
59 * @tim: RX beacon TIM occurrence
60 * @cabend: RX End of CAB traffic
61 * @dtimsync: DTIM sync lossage
62 * @dtim: RX Beacon with DTIM
63 */
64struct ath_interrupt_stats {
65 u32 total;
66 u32 rxok;
67 u32 rxeol;
68 u32 rxorn;
69 u32 txok;
70 u32 txeol;
71 u32 txurn;
72 u32 mib;
73 u32 rxphyerr;
74 u32 rx_keycache_miss;
75 u32 swba;
76 u32 bmiss;
77 u32 bnr;
78 u32 cst;
79 u32 gtt;
80 u32 tim;
81 u32 cabend;
82 u32 dtimsync;
83 u32 dtim;
84};
85
86struct ath_legacy_rc_stats {
87 u32 success;
88};
89
90struct ath_11n_rc_stats {
91 u32 success;
92 u32 retries;
93 u32 xretries;
94};
95
96struct ath_stats {
97 struct ath_interrupt_stats istats;
98 struct ath_legacy_rc_stats legacy_rcstats[12]; /* max(11a,11b,11g) */
99 struct ath_11n_rc_stats n_rcstats[16]; /* 0..15 MCS rates */
100};
101
102struct ath9k_debug {
103 int debug_mask;
104 struct dentry *debugfs_root;
105 struct dentry *debugfs_phy;
106 struct dentry *debugfs_dma;
107 struct dentry *debugfs_interrupt;
108 struct dentry *debugfs_rcstat;
109 struct ath_stats stats;
110};
111
112void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...);
113int ath9k_init_debug(struct ath_softc *sc);
114void ath9k_exit_debug(struct ath_softc *sc);
115void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
116void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb);
117void ath_debug_stat_retries(struct ath_softc *sc, int rix,
118 int xretries, int retries);
119
120#else
121
122static inline void DPRINTF(struct ath_softc *sc, int dbg_mask,
123 const char *fmt, ...)
124{
125}
126
127static inline int ath9k_init_debug(struct ath_softc *sc)
128{
129 return 0;
130}
131
132static inline void ath9k_exit_debug(struct ath_softc *sc)
133{
134}
135
136static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
137 enum ath9k_int status)
138{
139}
140
141static inline void ath_debug_stat_rc(struct ath_softc *sc,
142 struct sk_buff *skb)
143{
144}
145
146static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
147 int xretries, int retries)
148{
149}
150
151#endif /* CONFIG_ATH9K_DEBUG */
152
153#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c
index acd6c5374d4..c0359ad2bc7 100644
--- a/drivers/net/wireless/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath9k/eeprom.c
@@ -14,12 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21 18
22static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah, 19static void ath9k_hw_analog_shift_rmw(struct ath_hw *ah,
23 u32 reg, u32 mask, 20 u32 reg, u32 mask,
24 u32 shift, u32 val) 21 u32 shift, u32 val)
25{ 22{
@@ -30,7 +27,7 @@ static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
30 27
31 REG_WRITE(ah, reg, regVal); 28 REG_WRITE(ah, reg, regVal);
32 29
33 if (ah->ah_config.analog_shiftreg) 30 if (ah->config.analog_shiftreg)
34 udelay(100); 31 udelay(100);
35 32
36 return; 33 return;
@@ -91,254 +88,227 @@ static inline bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList,
91 return false; 88 return false;
92} 89}
93 90
94static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off, u16 *data) 91static inline bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
95{ 92{
96 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 93 struct ath_softc *sc = ah->ah_sc;
97 94
98 if (!ath9k_hw_wait(ah, 95 return sc->bus_ops->eeprom_read(ah, off, data);
99 AR_EEPROM_STATUS_DATA,
100 AR_EEPROM_STATUS_DATA_BUSY |
101 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
102 return false;
103 }
104
105 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
106 AR_EEPROM_STATUS_DATA_VAL);
107
108 return true;
109} 96}
110 97
111static int ath9k_hw_flash_map(struct ath_hal *ah) 98static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
99 u8 *pVpdList, u16 numIntercepts,
100 u8 *pRetVpdList)
112{ 101{
113 struct ath_hal_5416 *ahp = AH5416(ah); 102 u16 i, k;
114 103 u8 currPwr = pwrMin;
115 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX); 104 u16 idxL = 0, idxR = 0;
116 105
117 if (!ahp->ah_cal_mem) { 106 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
118 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 107 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
119 "cannot remap eeprom region \n"); 108 numIntercepts, &(idxL),
120 return -EIO; 109 &(idxR));
110 if (idxR < 1)
111 idxR = 1;
112 if (idxL == numIntercepts - 1)
113 idxL = (u16) (numIntercepts - 2);
114 if (pPwrList[idxL] == pPwrList[idxR])
115 k = pVpdList[idxL];
116 else
117 k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] +
118 (pPwrList[idxR] - currPwr) * pVpdList[idxL]) /
119 (pPwrList[idxR] - pPwrList[idxL]));
120 pRetVpdList[i] = (u8) k;
121 currPwr += 2;
121 } 122 }
122 123
123 return 0;
124}
125
126static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off, u16 *data)
127{
128 struct ath_hal_5416 *ahp = AH5416(ah);
129
130 *data = ioread16(ahp->ah_cal_mem + off);
131
132 return true; 124 return true;
133} 125}
134 126
135static inline bool ath9k_hw_nvram_read(struct ath_hal *ah, u32 off, u16 *data) 127static void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah,
136{ 128 struct ath9k_channel *chan,
137 if (ath9k_hw_use_flash(ah)) 129 struct cal_target_power_leg *powInfo,
138 return ath9k_hw_flash_read(ah, off, data); 130 u16 numChannels,
139 else 131 struct cal_target_power_leg *pNewPower,
140 return ath9k_hw_eeprom_read(ah, off, data); 132 u16 numRates, bool isExtTarget)
141}
142
143static bool ath9k_hw_fill_4k_eeprom(struct ath_hal *ah)
144{ 133{
145#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 134 struct chan_centers centers;
146 struct ath_hal_5416 *ahp = AH5416(ah); 135 u16 clo, chi;
147 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k; 136 int i;
148 u16 *eep_data; 137 int matchIndex = -1, lowIndex = -1;
149 int addr, eep_start_loc = 0; 138 u16 freq;
150 139
151 eep_start_loc = 64; 140 ath9k_hw_get_channel_centers(ah, chan, &centers);
141 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
152 142
153 if (!ath9k_hw_use_flash(ah)) { 143 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
154 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 144 IS_CHAN_2GHZ(chan))) {
155 "Reading from EEPROM, not flash\n"); 145 matchIndex = 0;
146 } else {
147 for (i = 0; (i < numChannels) &&
148 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
149 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
150 IS_CHAN_2GHZ(chan))) {
151 matchIndex = i;
152 break;
153 } else if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
154 IS_CHAN_2GHZ(chan))) &&
155 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
156 IS_CHAN_2GHZ(chan)))) {
157 lowIndex = i - 1;
158 break;
159 }
160 }
161 if ((matchIndex == -1) && (lowIndex == -1))
162 matchIndex = i - 1;
156 } 163 }
157 164
158 eep_data = (u16 *)eep; 165 if (matchIndex != -1) {
166 *pNewPower = powInfo[matchIndex];
167 } else {
168 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
169 IS_CHAN_2GHZ(chan));
170 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
171 IS_CHAN_2GHZ(chan));
159 172
160 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 173 for (i = 0; i < numRates; i++) {
161 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 174 pNewPower->tPow2x[i] =
162 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 175 (u8)ath9k_hw_interpolate(freq, clo, chi,
163 "Unable to read eeprom region \n"); 176 powInfo[lowIndex].tPow2x[i],
164 return false; 177 powInfo[lowIndex + 1].tPow2x[i]);
165 } 178 }
166 eep_data++;
167 } 179 }
168 return true;
169#undef SIZE_EEPROM_4K
170} 180}
171 181
172static bool ath9k_hw_fill_def_eeprom(struct ath_hal *ah) 182static void ath9k_hw_get_target_powers(struct ath_hw *ah,
183 struct ath9k_channel *chan,
184 struct cal_target_power_ht *powInfo,
185 u16 numChannels,
186 struct cal_target_power_ht *pNewPower,
187 u16 numRates, bool isHt40Target)
173{ 188{
174#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 189 struct chan_centers centers;
175 struct ath_hal_5416 *ahp = AH5416(ah); 190 u16 clo, chi;
176 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def; 191 int i;
177 u16 *eep_data; 192 int matchIndex = -1, lowIndex = -1;
178 int addr, ar5416_eep_start_loc = 0x100; 193 u16 freq;
179 194
180 eep_data = (u16 *)eep; 195 ath9k_hw_get_channel_centers(ah, chan, &centers);
196 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
181 197
182 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 198 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
183 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 199 matchIndex = 0;
184 eep_data)) { 200 } else {
185 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 201 for (i = 0; (i < numChannels) &&
186 "Unable to read eeprom region\n"); 202 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
187 return false; 203 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
204 IS_CHAN_2GHZ(chan))) {
205 matchIndex = i;
206 break;
207 } else
208 if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
209 IS_CHAN_2GHZ(chan))) &&
210 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
211 IS_CHAN_2GHZ(chan)))) {
212 lowIndex = i - 1;
213 break;
214 }
188 } 215 }
189 eep_data++; 216 if ((matchIndex == -1) && (lowIndex == -1))
217 matchIndex = i - 1;
190 } 218 }
191 return true;
192#undef SIZE_EEPROM_DEF
193}
194
195static bool (*ath9k_fill_eeprom[]) (struct ath_hal *) = {
196 ath9k_hw_fill_def_eeprom,
197 ath9k_hw_fill_4k_eeprom
198};
199 219
200static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah) 220 if (matchIndex != -1) {
201{ 221 *pNewPower = powInfo[matchIndex];
202 struct ath_hal_5416 *ahp = AH5416(ah); 222 } else {
223 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
224 IS_CHAN_2GHZ(chan));
225 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
226 IS_CHAN_2GHZ(chan));
203 227
204 return ath9k_fill_eeprom[ahp->ah_eep_map](ah); 228 for (i = 0; i < numRates; i++) {
229 pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq,
230 clo, chi,
231 powInfo[lowIndex].tPow2x[i],
232 powInfo[lowIndex + 1].tPow2x[i]);
233 }
234 }
205} 235}
206 236
207static int ath9k_hw_check_def_eeprom(struct ath_hal *ah) 237static u16 ath9k_hw_get_max_edge_power(u16 freq,
238 struct cal_ctl_edges *pRdEdgesPower,
239 bool is2GHz, int num_band_edges)
208{ 240{
209 struct ath_hal_5416 *ahp = AH5416(ah); 241 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
210 struct ar5416_eeprom_def *eep = 242 int i;
211 (struct ar5416_eeprom_def *) &ahp->ah_eeprom.def;
212 u16 *eepdata, temp, magic, magic2;
213 u32 sum = 0, el;
214 bool need_swap = false;
215 int i, addr, size;
216
217 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
218 &magic)) {
219 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
220 "Reading Magic # failed\n");
221 return false;
222 }
223
224 if (!ath9k_hw_use_flash(ah)) {
225
226 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
227 "Read Magic = 0x%04X\n", magic);
228
229 if (magic != AR5416_EEPROM_MAGIC) {
230 magic2 = swab16(magic);
231
232 if (magic2 == AR5416_EEPROM_MAGIC) {
233 size = sizeof(struct ar5416_eeprom_def);
234 need_swap = true;
235 eepdata = (u16 *) (&ahp->ah_eeprom);
236
237 for (addr = 0; addr < size / sizeof(u16); addr++) {
238 temp = swab16(*eepdata);
239 *eepdata = temp;
240 eepdata++;
241
242 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
243 "0x%04X ", *eepdata);
244 243
245 if (((addr + 1) % 6) == 0) 244 for (i = 0; (i < num_band_edges) &&
246 DPRINTF(ah->ah_sc, 245 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
247 ATH_DBG_EEPROM, "\n"); 246 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
248 } 247 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
249 } else { 248 break;
250 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 249 } else if ((i > 0) &&
251 "Invalid EEPROM Magic. " 250 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
252 "endianness mismatch.\n"); 251 is2GHz))) {
253 return -EINVAL; 252 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
253 is2GHz) < freq &&
254 pRdEdgesPower[i - 1].flag) {
255 twiceMaxEdgePower =
256 pRdEdgesPower[i - 1].tPower;
254 } 257 }
258 break;
255 } 259 }
256 } 260 }
257 261
258 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", 262 return twiceMaxEdgePower;
259 need_swap ? "True" : "False"); 263}
260 264
261 if (need_swap) 265/****************************************/
262 el = swab16(ahp->ah_eeprom.def.baseEepHeader.length); 266/* EEPROM Operations for 4K sized cards */
263 else 267/****************************************/
264 el = ahp->ah_eeprom.def.baseEepHeader.length;
265 268
266 if (el > sizeof(struct ar5416_eeprom_def)) 269static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
267 el = sizeof(struct ar5416_eeprom_def) / sizeof(u16); 270{
268 else 271 return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF);
269 el = el / sizeof(u16); 272}
270 273
271 eepdata = (u16 *)(&ahp->ah_eeprom); 274static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
275{
276 return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
277}
272 278
273 for (i = 0; i < el; i++) 279static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
274 sum ^= *eepdata++; 280{
281#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
282 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
283 u16 *eep_data;
284 int addr, eep_start_loc = 0;
275 285
276 if (need_swap) { 286 eep_start_loc = 64;
277 u32 integer, j;
278 u16 word;
279 287
288 if (!ath9k_hw_use_flash(ah)) {
280 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 289 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
281 "EEPROM Endianness is not native.. Changing \n"); 290 "Reading from EEPROM, not flash\n");
282 291 }
283 word = swab16(eep->baseEepHeader.length);
284 eep->baseEepHeader.length = word;
285
286 word = swab16(eep->baseEepHeader.checksum);
287 eep->baseEepHeader.checksum = word;
288
289 word = swab16(eep->baseEepHeader.version);
290 eep->baseEepHeader.version = word;
291
292 word = swab16(eep->baseEepHeader.regDmn[0]);
293 eep->baseEepHeader.regDmn[0] = word;
294
295 word = swab16(eep->baseEepHeader.regDmn[1]);
296 eep->baseEepHeader.regDmn[1] = word;
297
298 word = swab16(eep->baseEepHeader.rfSilent);
299 eep->baseEepHeader.rfSilent = word;
300
301 word = swab16(eep->baseEepHeader.blueToothOptions);
302 eep->baseEepHeader.blueToothOptions = word;
303
304 word = swab16(eep->baseEepHeader.deviceCap);
305 eep->baseEepHeader.deviceCap = word;
306
307 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
308 struct modal_eep_header *pModal =
309 &eep->modalHeader[j];
310 integer = swab32(pModal->antCtrlCommon);
311 pModal->antCtrlCommon = integer;
312 292
313 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 293 eep_data = (u16 *)eep;
314 integer = swab32(pModal->antCtrlChain[i]);
315 pModal->antCtrlChain[i] = integer;
316 }
317 294
318 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { 295 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
319 word = swab16(pModal->spurChans[i].spurChan); 296 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
320 pModal->spurChans[i].spurChan = word; 297 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
321 } 298 "Unable to read eeprom region \n");
299 return false;
322 } 300 }
301 eep_data++;
323 } 302 }
324 303 return true;
325 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER || 304#undef SIZE_EEPROM_4K
326 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
327 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
328 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
329 sum, ar5416_get_eep_ver(ahp));
330 return -EINVAL;
331 }
332
333 return 0;
334} 305}
335 306
336static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah) 307static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
337{ 308{
338#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 309#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
339 struct ath_hal_5416 *ahp = AH5416(ah);
340 struct ar5416_eeprom_4k *eep = 310 struct ar5416_eeprom_4k *eep =
341 (struct ar5416_eeprom_4k *) &ahp->ah_eeprom.map4k; 311 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
342 u16 *eepdata, temp, magic, magic2; 312 u16 *eepdata, temp, magic, magic2;
343 u32 sum = 0, el; 313 u32 sum = 0, el;
344 bool need_swap = false; 314 bool need_swap = false;
@@ -362,7 +332,7 @@ static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah)
362 332
363 if (magic2 == AR5416_EEPROM_MAGIC) { 333 if (magic2 == AR5416_EEPROM_MAGIC) {
364 need_swap = true; 334 need_swap = true;
365 eepdata = (u16 *) (&ahp->ah_eeprom); 335 eepdata = (u16 *) (&ah->eeprom);
366 336
367 for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { 337 for (addr = 0; addr < EEPROM_4K_SIZE; addr++) {
368 temp = swab16(*eepdata); 338 temp = swab16(*eepdata);
@@ -389,16 +359,16 @@ static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah)
389 need_swap ? "True" : "False"); 359 need_swap ? "True" : "False");
390 360
391 if (need_swap) 361 if (need_swap)
392 el = swab16(ahp->ah_eeprom.map4k.baseEepHeader.length); 362 el = swab16(ah->eeprom.map4k.baseEepHeader.length);
393 else 363 else
394 el = ahp->ah_eeprom.map4k.baseEepHeader.length; 364 el = ah->eeprom.map4k.baseEepHeader.length;
395 365
396 if (el > sizeof(struct ar5416_eeprom_def)) 366 if (el > sizeof(struct ar5416_eeprom_def))
397 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); 367 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
398 else 368 else
399 el = el / sizeof(u16); 369 el = el / sizeof(u16);
400 370
401 eepdata = (u16 *)(&ahp->ah_eeprom); 371 eepdata = (u16 *)(&ah->eeprom);
402 372
403 for (i = 0; i < el; i++) 373 for (i = 0; i < el; i++)
404 sum ^= *eepdata++; 374 sum ^= *eepdata++;
@@ -448,11 +418,11 @@ static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah)
448 } 418 }
449 } 419 }
450 420
451 if (sum != 0xffff || ar5416_get_eep4k_ver(ahp) != AR5416_EEP_VER || 421 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
452 ar5416_get_eep4k_rev(ahp) < AR5416_EEP_NO_BACK_VER) { 422 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
453 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 423 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
454 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 424 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
455 sum, ar5416_get_eep4k_ver(ahp)); 425 sum, ah->eep_ops->get_eeprom_ver(ah));
456 return -EINVAL; 426 return -EINVAL;
457 } 427 }
458 428
@@ -460,48 +430,48 @@ static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah)
460#undef EEPROM_4K_SIZE 430#undef EEPROM_4K_SIZE
461} 431}
462 432
463static int (*ath9k_check_eeprom[]) (struct ath_hal *) = { 433static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
464 ath9k_hw_check_def_eeprom, 434 enum eeprom_param param)
465 ath9k_hw_check_4k_eeprom
466};
467
468static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
469{
470 struct ath_hal_5416 *ahp = AH5416(ah);
471
472 return ath9k_check_eeprom[ahp->ah_eep_map](ah);
473}
474
475static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
476 u8 *pVpdList, u16 numIntercepts,
477 u8 *pRetVpdList)
478{ 435{
479 u16 i, k; 436 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
480 u8 currPwr = pwrMin; 437 struct modal_eep_4k_header *pModal = &eep->modalHeader;
481 u16 idxL = 0, idxR = 0; 438 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
482 439
483 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { 440 switch (param) {
484 ath9k_hw_get_lower_upper_index(currPwr, pPwrList, 441 case EEP_NFTHRESH_2:
485 numIntercepts, &(idxL), 442 return pModal[1].noiseFloorThreshCh[0];
486 &(idxR)); 443 case AR_EEPROM_MAC(0):
487 if (idxR < 1) 444 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
488 idxR = 1; 445 case AR_EEPROM_MAC(1):
489 if (idxL == numIntercepts - 1) 446 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
490 idxL = (u16) (numIntercepts - 2); 447 case AR_EEPROM_MAC(2):
491 if (pPwrList[idxL] == pPwrList[idxR]) 448 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
492 k = pVpdList[idxL]; 449 case EEP_REG_0:
493 else 450 return pBase->regDmn[0];
494 k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] + 451 case EEP_REG_1:
495 (pPwrList[idxR] - currPwr) * pVpdList[idxL]) / 452 return pBase->regDmn[1];
496 (pPwrList[idxR] - pPwrList[idxL])); 453 case EEP_OP_CAP:
497 pRetVpdList[i] = (u8) k; 454 return pBase->deviceCap;
498 currPwr += 2; 455 case EEP_OP_MODE:
456 return pBase->opCapFlags;
457 case EEP_RF_SILENT:
458 return pBase->rfSilent;
459 case EEP_OB_2:
460 return pModal->ob_01;
461 case EEP_DB_2:
462 return pModal->db1_01;
463 case EEP_MINOR_REV:
464 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
465 case EEP_TX_MASK:
466 return pBase->txMask;
467 case EEP_RX_MASK:
468 return pBase->rxMask;
469 default:
470 return 0;
499 } 471 }
500
501 return true;
502} 472}
503 473
504static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hal *ah, 474static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
505 struct ath9k_channel *chan, 475 struct ath9k_channel *chan,
506 struct cal_data_per_freq_4k *pRawDataSet, 476 struct cal_data_per_freq_4k *pRawDataSet,
507 u8 *bChans, u16 availPiers, 477 u8 *bChans, u16 availPiers,
@@ -669,442 +639,11 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hal *ah,
669#undef TMP_VAL_VPD_TABLE 639#undef TMP_VAL_VPD_TABLE
670} 640}
671 641
672static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hal *ah, 642static bool ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
673 struct ath9k_channel *chan,
674 struct cal_data_per_freq *pRawDataSet,
675 u8 *bChans, u16 availPiers,
676 u16 tPdGainOverlap, int16_t *pMinCalPower,
677 u16 *pPdGainBoundaries, u8 *pPDADCValues,
678 u16 numXpdGains)
679{
680 int i, j, k;
681 int16_t ss;
682 u16 idxL = 0, idxR = 0, numPiers;
683 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
684 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
685 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
686 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
687 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
688 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
689
690 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
691 u8 minPwrT4[AR5416_NUM_PD_GAINS];
692 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
693 int16_t vpdStep;
694 int16_t tmpVal;
695 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
696 bool match;
697 int16_t minDelta = 0;
698 struct chan_centers centers;
699
700 ath9k_hw_get_channel_centers(ah, chan, &centers);
701
702 for (numPiers = 0; numPiers < availPiers; numPiers++) {
703 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
704 break;
705 }
706
707 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
708 IS_CHAN_2GHZ(chan)),
709 bChans, numPiers, &idxL, &idxR);
710
711 if (match) {
712 for (i = 0; i < numXpdGains; i++) {
713 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
714 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
715 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
716 pRawDataSet[idxL].pwrPdg[i],
717 pRawDataSet[idxL].vpdPdg[i],
718 AR5416_PD_GAIN_ICEPTS,
719 vpdTableI[i]);
720 }
721 } else {
722 for (i = 0; i < numXpdGains; i++) {
723 pVpdL = pRawDataSet[idxL].vpdPdg[i];
724 pPwrL = pRawDataSet[idxL].pwrPdg[i];
725 pVpdR = pRawDataSet[idxR].vpdPdg[i];
726 pPwrR = pRawDataSet[idxR].pwrPdg[i];
727
728 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
729
730 maxPwrT4[i] =
731 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
732 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
733
734
735 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
736 pPwrL, pVpdL,
737 AR5416_PD_GAIN_ICEPTS,
738 vpdTableL[i]);
739 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
740 pPwrR, pVpdR,
741 AR5416_PD_GAIN_ICEPTS,
742 vpdTableR[i]);
743
744 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
745 vpdTableI[i][j] =
746 (u8)(ath9k_hw_interpolate((u16)
747 FREQ2FBIN(centers.
748 synth_center,
749 IS_CHAN_2GHZ
750 (chan)),
751 bChans[idxL], bChans[idxR],
752 vpdTableL[i][j], vpdTableR[i][j]));
753 }
754 }
755 }
756
757 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
758
759 k = 0;
760
761 for (i = 0; i < numXpdGains; i++) {
762 if (i == (numXpdGains - 1))
763 pPdGainBoundaries[i] =
764 (u16)(maxPwrT4[i] / 2);
765 else
766 pPdGainBoundaries[i] =
767 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
768
769 pPdGainBoundaries[i] =
770 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
771
772 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
773 minDelta = pPdGainBoundaries[0] - 23;
774 pPdGainBoundaries[0] = 23;
775 } else {
776 minDelta = 0;
777 }
778
779 if (i == 0) {
780 if (AR_SREV_9280_10_OR_LATER(ah))
781 ss = (int16_t)(0 - (minPwrT4[i] / 2));
782 else
783 ss = 0;
784 } else {
785 ss = (int16_t)((pPdGainBoundaries[i - 1] -
786 (minPwrT4[i] / 2)) -
787 tPdGainOverlap + 1 + minDelta);
788 }
789 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
790 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
791
792 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
793 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
794 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
795 ss++;
796 }
797
798 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
799 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
800 (minPwrT4[i] / 2));
801 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
802 tgtIndex : sizeCurrVpdTable;
803
804 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
805 pPDADCValues[k++] = vpdTableI[i][ss++];
806 }
807
808 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
809 vpdTableI[i][sizeCurrVpdTable - 2]);
810 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
811
812 if (tgtIndex > maxIndex) {
813 while ((ss <= tgtIndex) &&
814 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
815 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
816 (ss - maxIndex + 1) * vpdStep));
817 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
818 255 : tmpVal);
819 ss++;
820 }
821 }
822 }
823
824 while (i < AR5416_PD_GAINS_IN_MASK) {
825 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
826 i++;
827 }
828
829 while (k < AR5416_NUM_PDADC_VALUES) {
830 pPDADCValues[k] = pPDADCValues[k - 1];
831 k++;
832 }
833
834 return;
835}
836
837static void ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
838 struct ath9k_channel *chan,
839 struct cal_target_power_leg *powInfo,
840 u16 numChannels,
841 struct cal_target_power_leg *pNewPower,
842 u16 numRates, bool isExtTarget)
843{
844 struct chan_centers centers;
845 u16 clo, chi;
846 int i;
847 int matchIndex = -1, lowIndex = -1;
848 u16 freq;
849
850 ath9k_hw_get_channel_centers(ah, chan, &centers);
851 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
852
853 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
854 IS_CHAN_2GHZ(chan))) {
855 matchIndex = 0;
856 } else {
857 for (i = 0; (i < numChannels) &&
858 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
859 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
860 IS_CHAN_2GHZ(chan))) {
861 matchIndex = i;
862 break;
863 } else if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
864 IS_CHAN_2GHZ(chan))) &&
865 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
866 IS_CHAN_2GHZ(chan)))) {
867 lowIndex = i - 1;
868 break;
869 }
870 }
871 if ((matchIndex == -1) && (lowIndex == -1))
872 matchIndex = i - 1;
873 }
874
875 if (matchIndex != -1) {
876 *pNewPower = powInfo[matchIndex];
877 } else {
878 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
879 IS_CHAN_2GHZ(chan));
880 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
881 IS_CHAN_2GHZ(chan));
882
883 for (i = 0; i < numRates; i++) {
884 pNewPower->tPow2x[i] =
885 (u8)ath9k_hw_interpolate(freq, clo, chi,
886 powInfo[lowIndex].tPow2x[i],
887 powInfo[lowIndex + 1].tPow2x[i]);
888 }
889 }
890}
891
892static void ath9k_hw_get_target_powers(struct ath_hal *ah,
893 struct ath9k_channel *chan,
894 struct cal_target_power_ht *powInfo,
895 u16 numChannels,
896 struct cal_target_power_ht *pNewPower,
897 u16 numRates, bool isHt40Target)
898{
899 struct chan_centers centers;
900 u16 clo, chi;
901 int i;
902 int matchIndex = -1, lowIndex = -1;
903 u16 freq;
904
905 ath9k_hw_get_channel_centers(ah, chan, &centers);
906 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
907
908 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
909 matchIndex = 0;
910 } else {
911 for (i = 0; (i < numChannels) &&
912 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
913 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
914 IS_CHAN_2GHZ(chan))) {
915 matchIndex = i;
916 break;
917 } else
918 if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
919 IS_CHAN_2GHZ(chan))) &&
920 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
921 IS_CHAN_2GHZ(chan)))) {
922 lowIndex = i - 1;
923 break;
924 }
925 }
926 if ((matchIndex == -1) && (lowIndex == -1))
927 matchIndex = i - 1;
928 }
929
930 if (matchIndex != -1) {
931 *pNewPower = powInfo[matchIndex];
932 } else {
933 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
934 IS_CHAN_2GHZ(chan));
935 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
936 IS_CHAN_2GHZ(chan));
937
938 for (i = 0; i < numRates; i++) {
939 pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq,
940 clo, chi,
941 powInfo[lowIndex].tPow2x[i],
942 powInfo[lowIndex + 1].tPow2x[i]);
943 }
944 }
945}
946
947static u16 ath9k_hw_get_max_edge_power(u16 freq,
948 struct cal_ctl_edges *pRdEdgesPower,
949 bool is2GHz, int num_band_edges)
950{
951 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
952 int i;
953
954 for (i = 0; (i < num_band_edges) &&
955 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
956 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
957 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
958 break;
959 } else if ((i > 0) &&
960 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
961 is2GHz))) {
962 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
963 is2GHz) < freq &&
964 pRdEdgesPower[i - 1].flag) {
965 twiceMaxEdgePower =
966 pRdEdgesPower[i - 1].tPower;
967 }
968 break;
969 }
970 }
971
972 return twiceMaxEdgePower;
973}
974
975static bool ath9k_hw_set_def_power_cal_table(struct ath_hal *ah,
976 struct ath9k_channel *chan, 643 struct ath9k_channel *chan,
977 int16_t *pTxPowerIndexOffset) 644 int16_t *pTxPowerIndexOffset)
978{ 645{
979 struct ath_hal_5416 *ahp = AH5416(ah); 646 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
980 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def;
981 struct cal_data_per_freq *pRawDataset;
982 u8 *pCalBChans = NULL;
983 u16 pdGainOverlap_t2;
984 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
985 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
986 u16 numPiers, i, j;
987 int16_t tMinCalPower;
988 u16 numXpdGain, xpdMask;
989 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
990 u32 reg32, regOffset, regChainOffset;
991 int16_t modalIdx;
992
993 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
994 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
995
996 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
997 AR5416_EEP_MINOR_VER_2) {
998 pdGainOverlap_t2 =
999 pEepData->modalHeader[modalIdx].pdGainOverlap;
1000 } else {
1001 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
1002 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
1003 }
1004
1005 if (IS_CHAN_2GHZ(chan)) {
1006 pCalBChans = pEepData->calFreqPier2G;
1007 numPiers = AR5416_NUM_2G_CAL_PIERS;
1008 } else {
1009 pCalBChans = pEepData->calFreqPier5G;
1010 numPiers = AR5416_NUM_5G_CAL_PIERS;
1011 }
1012
1013 numXpdGain = 0;
1014
1015 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
1016 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
1017 if (numXpdGain >= AR5416_NUM_PD_GAINS)
1018 break;
1019 xpdGainValues[numXpdGain] =
1020 (u16)(AR5416_PD_GAINS_IN_MASK - i);
1021 numXpdGain++;
1022 }
1023 }
1024
1025 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
1026 (numXpdGain - 1) & 0x3);
1027 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
1028 xpdGainValues[0]);
1029 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
1030 xpdGainValues[1]);
1031 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
1032 xpdGainValues[2]);
1033
1034 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
1035 if (AR_SREV_5416_V20_OR_LATER(ah) &&
1036 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) &&
1037 (i != 0)) {
1038 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1039 } else
1040 regChainOffset = i * 0x1000;
1041
1042 if (pEepData->baseEepHeader.txMask & (1 << i)) {
1043 if (IS_CHAN_2GHZ(chan))
1044 pRawDataset = pEepData->calPierData2G[i];
1045 else
1046 pRawDataset = pEepData->calPierData5G[i];
1047
1048 ath9k_hw_get_def_gain_boundaries_pdadcs(ah, chan,
1049 pRawDataset, pCalBChans,
1050 numPiers, pdGainOverlap_t2,
1051 &tMinCalPower, gainBoundaries,
1052 pdadcValues, numXpdGain);
1053
1054 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
1055 REG_WRITE(ah,
1056 AR_PHY_TPCRG5 + regChainOffset,
1057 SM(pdGainOverlap_t2,
1058 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
1059 | SM(gainBoundaries[0],
1060 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
1061 | SM(gainBoundaries[1],
1062 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
1063 | SM(gainBoundaries[2],
1064 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
1065 | SM(gainBoundaries[3],
1066 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
1067 }
1068
1069 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
1070 for (j = 0; j < 32; j++) {
1071 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
1072 ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
1073 ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
1074 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
1075 REG_WRITE(ah, regOffset, reg32);
1076
1077 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1078 "PDADC (%d,%4x): %4.4x %8.8x\n",
1079 i, regChainOffset, regOffset,
1080 reg32);
1081 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1082 "PDADC: Chain %d | PDADC %3d "
1083 "Value %3d | PDADC %3d Value %3d | "
1084 "PDADC %3d Value %3d | PDADC %3d "
1085 "Value %3d |\n",
1086 i, 4 * j, pdadcValues[4 * j],
1087 4 * j + 1, pdadcValues[4 * j + 1],
1088 4 * j + 2, pdadcValues[4 * j + 2],
1089 4 * j + 3,
1090 pdadcValues[4 * j + 3]);
1091
1092 regOffset += 4;
1093 }
1094 }
1095 }
1096
1097 *pTxPowerIndexOffset = 0;
1098
1099 return true;
1100}
1101
1102static bool ath9k_hw_set_4k_power_cal_table(struct ath_hal *ah,
1103 struct ath9k_channel *chan,
1104 int16_t *pTxPowerIndexOffset)
1105{
1106 struct ath_hal_5416 *ahp = AH5416(ah);
1107 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k;
1108 struct cal_data_per_freq_4k *pRawDataset; 647 struct cal_data_per_freq_4k *pRawDataset;
1109 u8 *pCalBChans = NULL; 648 u8 *pCalBChans = NULL;
1110 u16 pdGainOverlap_t2; 649 u16 pdGainOverlap_t2;
@@ -1153,7 +692,7 @@ static bool ath9k_hw_set_4k_power_cal_table(struct ath_hal *ah,
1153 692
1154 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 693 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
1155 if (AR_SREV_5416_V20_OR_LATER(ah) && 694 if (AR_SREV_5416_V20_OR_LATER(ah) &&
1156 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) && 695 (ah->rxchainmask == 5 || ah->txchainmask == 5) &&
1157 (i != 0)) { 696 (i != 0)) {
1158 regChainOffset = (i == 1) ? 0x2000 : 0x1000; 697 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1159 } else 698 } else
@@ -1216,298 +755,7 @@ static bool ath9k_hw_set_4k_power_cal_table(struct ath_hal *ah,
1216 return true; 755 return true;
1217} 756}
1218 757
1219static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hal *ah, 758static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
1220 struct ath9k_channel *chan,
1221 int16_t *ratesArray,
1222 u16 cfgCtl,
1223 u16 AntennaReduction,
1224 u16 twiceMaxRegulatoryPower,
1225 u16 powerLimit)
1226{
1227#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
1228#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */
1229
1230 struct ath_hal_5416 *ahp = AH5416(ah);
1231 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def;
1232 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1233 static const u16 tpScaleReductionTable[5] =
1234 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
1235
1236 int i;
1237 int16_t twiceLargestAntenna;
1238 struct cal_ctl_data *rep;
1239 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
1240 0, { 0, 0, 0, 0}
1241 };
1242 struct cal_target_power_leg targetPowerOfdmExt = {
1243 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
1244 0, { 0, 0, 0, 0 }
1245 };
1246 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
1247 0, {0, 0, 0, 0}
1248 };
1249 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1250 u16 ctlModesFor11a[] =
1251 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
1252 u16 ctlModesFor11g[] =
1253 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
1254 CTL_2GHT40
1255 };
1256 u16 numCtlModes, *pCtlMode, ctlMode, freq;
1257 struct chan_centers centers;
1258 int tx_chainmask;
1259 u16 twiceMinEdgePower;
1260
1261 tx_chainmask = ahp->ah_txchainmask;
1262
1263 ath9k_hw_get_channel_centers(ah, chan, &centers);
1264
1265 twiceLargestAntenna = max(
1266 pEepData->modalHeader
1267 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
1268 pEepData->modalHeader
1269 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
1270
1271 twiceLargestAntenna = max((u8)twiceLargestAntenna,
1272 pEepData->modalHeader
1273 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
1274
1275 twiceLargestAntenna = (int16_t)min(AntennaReduction -
1276 twiceLargestAntenna, 0);
1277
1278 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
1279
1280 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
1281 maxRegAllowedPower -=
1282 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
1283 }
1284
1285 scaledPower = min(powerLimit, maxRegAllowedPower);
1286
1287 switch (ar5416_get_ntxchains(tx_chainmask)) {
1288 case 1:
1289 break;
1290 case 2:
1291 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
1292 break;
1293 case 3:
1294 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
1295 break;
1296 }
1297
1298 scaledPower = max((u16)0, scaledPower);
1299
1300 if (IS_CHAN_2GHZ(chan)) {
1301 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
1302 SUB_NUM_CTL_MODES_AT_2G_40;
1303 pCtlMode = ctlModesFor11g;
1304
1305 ath9k_hw_get_legacy_target_powers(ah, chan,
1306 pEepData->calTargetPowerCck,
1307 AR5416_NUM_2G_CCK_TARGET_POWERS,
1308 &targetPowerCck, 4, false);
1309 ath9k_hw_get_legacy_target_powers(ah, chan,
1310 pEepData->calTargetPower2G,
1311 AR5416_NUM_2G_20_TARGET_POWERS,
1312 &targetPowerOfdm, 4, false);
1313 ath9k_hw_get_target_powers(ah, chan,
1314 pEepData->calTargetPower2GHT20,
1315 AR5416_NUM_2G_20_TARGET_POWERS,
1316 &targetPowerHt20, 8, false);
1317
1318 if (IS_CHAN_HT40(chan)) {
1319 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
1320 ath9k_hw_get_target_powers(ah, chan,
1321 pEepData->calTargetPower2GHT40,
1322 AR5416_NUM_2G_40_TARGET_POWERS,
1323 &targetPowerHt40, 8, true);
1324 ath9k_hw_get_legacy_target_powers(ah, chan,
1325 pEepData->calTargetPowerCck,
1326 AR5416_NUM_2G_CCK_TARGET_POWERS,
1327 &targetPowerCckExt, 4, true);
1328 ath9k_hw_get_legacy_target_powers(ah, chan,
1329 pEepData->calTargetPower2G,
1330 AR5416_NUM_2G_20_TARGET_POWERS,
1331 &targetPowerOfdmExt, 4, true);
1332 }
1333 } else {
1334 numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
1335 SUB_NUM_CTL_MODES_AT_5G_40;
1336 pCtlMode = ctlModesFor11a;
1337
1338 ath9k_hw_get_legacy_target_powers(ah, chan,
1339 pEepData->calTargetPower5G,
1340 AR5416_NUM_5G_20_TARGET_POWERS,
1341 &targetPowerOfdm, 4, false);
1342 ath9k_hw_get_target_powers(ah, chan,
1343 pEepData->calTargetPower5GHT20,
1344 AR5416_NUM_5G_20_TARGET_POWERS,
1345 &targetPowerHt20, 8, false);
1346
1347 if (IS_CHAN_HT40(chan)) {
1348 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
1349 ath9k_hw_get_target_powers(ah, chan,
1350 pEepData->calTargetPower5GHT40,
1351 AR5416_NUM_5G_40_TARGET_POWERS,
1352 &targetPowerHt40, 8, true);
1353 ath9k_hw_get_legacy_target_powers(ah, chan,
1354 pEepData->calTargetPower5G,
1355 AR5416_NUM_5G_20_TARGET_POWERS,
1356 &targetPowerOfdmExt, 4, true);
1357 }
1358 }
1359
1360 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
1361 bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
1362 (pCtlMode[ctlMode] == CTL_2GHT40);
1363 if (isHt40CtlMode)
1364 freq = centers.synth_center;
1365 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
1366 freq = centers.ext_center;
1367 else
1368 freq = centers.ctl_center;
1369
1370 if (ar5416_get_eep_ver(ahp) == 14 && ar5416_get_eep_rev(ahp) <= 2)
1371 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1372
1373 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1374 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
1375 "EXT_ADDITIVE %d\n",
1376 ctlMode, numCtlModes, isHt40CtlMode,
1377 (pCtlMode[ctlMode] & EXT_ADDITIVE));
1378
1379 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
1380 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1381 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
1382 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
1383 "chan %d\n",
1384 i, cfgCtl, pCtlMode[ctlMode],
1385 pEepData->ctlIndex[i], chan->channel);
1386
1387 if ((((cfgCtl & ~CTL_MODE_M) |
1388 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1389 pEepData->ctlIndex[i]) ||
1390 (((cfgCtl & ~CTL_MODE_M) |
1391 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1392 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
1393 rep = &(pEepData->ctlData[i]);
1394
1395 twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
1396 rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1],
1397 IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
1398
1399 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1400 " MATCH-EE_IDX %d: ch %d is2 %d "
1401 "2xMinEdge %d chainmask %d chains %d\n",
1402 i, freq, IS_CHAN_2GHZ(chan),
1403 twiceMinEdgePower, tx_chainmask,
1404 ar5416_get_ntxchains
1405 (tx_chainmask));
1406 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
1407 twiceMaxEdgePower = min(twiceMaxEdgePower,
1408 twiceMinEdgePower);
1409 } else {
1410 twiceMaxEdgePower = twiceMinEdgePower;
1411 break;
1412 }
1413 }
1414 }
1415
1416 minCtlPower = min(twiceMaxEdgePower, scaledPower);
1417
1418 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1419 " SEL-Min ctlMode %d pCtlMode %d "
1420 "2xMaxEdge %d sP %d minCtlPwr %d\n",
1421 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
1422 scaledPower, minCtlPower);
1423
1424 switch (pCtlMode[ctlMode]) {
1425 case CTL_11B:
1426 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
1427 targetPowerCck.tPow2x[i] =
1428 min((u16)targetPowerCck.tPow2x[i],
1429 minCtlPower);
1430 }
1431 break;
1432 case CTL_11A:
1433 case CTL_11G:
1434 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
1435 targetPowerOfdm.tPow2x[i] =
1436 min((u16)targetPowerOfdm.tPow2x[i],
1437 minCtlPower);
1438 }
1439 break;
1440 case CTL_5GHT20:
1441 case CTL_2GHT20:
1442 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
1443 targetPowerHt20.tPow2x[i] =
1444 min((u16)targetPowerHt20.tPow2x[i],
1445 minCtlPower);
1446 }
1447 break;
1448 case CTL_11B_EXT:
1449 targetPowerCckExt.tPow2x[0] = min((u16)
1450 targetPowerCckExt.tPow2x[0],
1451 minCtlPower);
1452 break;
1453 case CTL_11A_EXT:
1454 case CTL_11G_EXT:
1455 targetPowerOfdmExt.tPow2x[0] = min((u16)
1456 targetPowerOfdmExt.tPow2x[0],
1457 minCtlPower);
1458 break;
1459 case CTL_5GHT40:
1460 case CTL_2GHT40:
1461 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
1462 targetPowerHt40.tPow2x[i] =
1463 min((u16)targetPowerHt40.tPow2x[i],
1464 minCtlPower);
1465 }
1466 break;
1467 default:
1468 break;
1469 }
1470 }
1471
1472 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
1473 ratesArray[rate18mb] = ratesArray[rate24mb] =
1474 targetPowerOfdm.tPow2x[0];
1475 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
1476 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
1477 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
1478 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
1479
1480 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
1481 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
1482
1483 if (IS_CHAN_2GHZ(chan)) {
1484 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
1485 ratesArray[rate2s] = ratesArray[rate2l] =
1486 targetPowerCck.tPow2x[1];
1487 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
1488 targetPowerCck.tPow2x[2];
1489 ;
1490 ratesArray[rate11s] = ratesArray[rate11l] =
1491 targetPowerCck.tPow2x[3];
1492 ;
1493 }
1494 if (IS_CHAN_HT40(chan)) {
1495 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
1496 ratesArray[rateHt40_0 + i] =
1497 targetPowerHt40.tPow2x[i];
1498 }
1499 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
1500 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
1501 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
1502 if (IS_CHAN_2GHZ(chan)) {
1503 ratesArray[rateExtCck] =
1504 targetPowerCckExt.tPow2x[0];
1505 }
1506 }
1507 return true;
1508}
1509
1510static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1511 struct ath9k_channel *chan, 759 struct ath9k_channel *chan,
1512 int16_t *ratesArray, 760 int16_t *ratesArray,
1513 u16 cfgCtl, 761 u16 cfgCtl,
@@ -1515,8 +763,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1515 u16 twiceMaxRegulatoryPower, 763 u16 twiceMaxRegulatoryPower,
1516 u16 powerLimit) 764 u16 powerLimit)
1517{ 765{
1518 struct ath_hal_5416 *ahp = AH5416(ah); 766 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
1519 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k;
1520 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 767 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1521 static const u16 tpScaleReductionTable[5] = 768 static const u16 tpScaleReductionTable[5] =
1522 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; 769 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
@@ -1544,7 +791,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1544 int tx_chainmask; 791 int tx_chainmask;
1545 u16 twiceMinEdgePower; 792 u16 twiceMinEdgePower;
1546 793
1547 tx_chainmask = ahp->ah_txchainmask; 794 tx_chainmask = ah->txchainmask;
1548 795
1549 ath9k_hw_get_channel_centers(ah, chan, &centers); 796 ath9k_hw_get_channel_centers(ah, chan, &centers);
1550 797
@@ -1555,9 +802,9 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1555 802
1556 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; 803 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
1557 804
1558 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) { 805 if (ah->regulatory.tp_scale != ATH9K_TP_SCALE_MAX) {
1559 maxRegAllowedPower -= 806 maxRegAllowedPower -=
1560 (tpScaleReductionTable[(ah->ah_tpScale)] * 2); 807 (tpScaleReductionTable[(ah->regulatory.tp_scale)] * 2);
1561 } 808 }
1562 809
1563 scaledPower = min(powerLimit, maxRegAllowedPower); 810 scaledPower = min(powerLimit, maxRegAllowedPower);
@@ -1605,8 +852,8 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1605 else 852 else
1606 freq = centers.ctl_center; 853 freq = centers.ctl_center;
1607 854
1608 if (ar5416_get_eep_ver(ahp) == 14 && 855 if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
1609 ar5416_get_eep_rev(ahp) <= 2) 856 ah->eep_ops->get_eeprom_rev(ah) <= 2)
1610 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 857 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1611 858
1612 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, 859 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
@@ -1743,17 +990,15 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1743 return true; 990 return true;
1744} 991}
1745 992
1746static int ath9k_hw_def_set_txpower(struct ath_hal *ah, 993static int ath9k_hw_4k_set_txpower(struct ath_hw *ah,
1747 struct ath9k_channel *chan, 994 struct ath9k_channel *chan,
1748 u16 cfgCtl, 995 u16 cfgCtl,
1749 u8 twiceAntennaReduction, 996 u8 twiceAntennaReduction,
1750 u8 twiceMaxRegulatoryPower, 997 u8 twiceMaxRegulatoryPower,
1751 u8 powerLimit) 998 u8 powerLimit)
1752{ 999{
1753 struct ath_hal_5416 *ahp = AH5416(ah); 1000 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
1754 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def; 1001 struct modal_eep_4k_header *pModal = &pEepData->modalHeader;
1755 struct modal_eep_header *pModal =
1756 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
1757 int16_t ratesArray[Ar5416RateSize]; 1002 int16_t ratesArray[Ar5416RateSize];
1758 int16_t txPowerIndexOffset = 0; 1003 int16_t txPowerIndexOffset = 0;
1759 u8 ht40PowerIncForPdadc = 2; 1004 u8 ht40PowerIncForPdadc = 2;
@@ -1766,7 +1011,7 @@ static int ath9k_hw_def_set_txpower(struct ath_hal *ah,
1766 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; 1011 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
1767 } 1012 }
1768 1013
1769 if (!ath9k_hw_set_def_power_per_rate_table(ah, chan, 1014 if (!ath9k_hw_set_4k_power_per_rate_table(ah, chan,
1770 &ratesArray[0], cfgCtl, 1015 &ratesArray[0], cfgCtl,
1771 twiceAntennaReduction, 1016 twiceAntennaReduction,
1772 twiceMaxRegulatoryPower, 1017 twiceMaxRegulatoryPower,
@@ -1777,7 +1022,7 @@ static int ath9k_hw_def_set_txpower(struct ath_hal *ah,
1777 return -EIO; 1022 return -EIO;
1778 } 1023 }
1779 1024
1780 if (!ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset)) { 1025 if (!ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset)) {
1781 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1026 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1782 "ath9k_hw_set_txpower: unable to set power table\n"); 1027 "ath9k_hw_set_txpower: unable to set power table\n");
1783 return -EIO; 1028 return -EIO;
@@ -1856,10 +1101,6 @@ static int ath9k_hw_def_set_txpower(struct ath_hal *ah,
1856 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 1101 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
1857 } 1102 }
1858 1103
1859 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1860 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1861 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1862
1863 i = rate6mb; 1104 i = rate6mb;
1864 1105
1865 if (IS_CHAN_HT40(chan)) 1106 if (IS_CHAN_HT40(chan))
@@ -1868,272 +1109,518 @@ static int ath9k_hw_def_set_txpower(struct ath_hal *ah,
1868 i = rateHt20_0; 1109 i = rateHt20_0;
1869 1110
1870 if (AR_SREV_9280_10_OR_LATER(ah)) 1111 if (AR_SREV_9280_10_OR_LATER(ah))
1871 ah->ah_maxPowerLevel = 1112 ah->regulatory.max_power_level =
1872 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2; 1113 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
1873 else 1114 else
1874 ah->ah_maxPowerLevel = ratesArray[i]; 1115 ah->regulatory.max_power_level = ratesArray[i];
1875 1116
1876 return 0; 1117 return 0;
1877} 1118}
1878 1119
1879static int ath9k_hw_4k_set_txpower(struct ath_hal *ah, 1120static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
1880 struct ath9k_channel *chan, 1121 struct ath9k_channel *chan)
1881 u16 cfgCtl,
1882 u8 twiceAntennaReduction,
1883 u8 twiceMaxRegulatoryPower,
1884 u8 powerLimit)
1885{ 1122{
1886 struct ath_hal_5416 *ahp = AH5416(ah); 1123 struct modal_eep_4k_header *pModal;
1887 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k; 1124 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1888 struct modal_eep_4k_header *pModal = &pEepData->modalHeader; 1125 u8 biaslevel;
1889 int16_t ratesArray[Ar5416RateSize];
1890 int16_t txPowerIndexOffset = 0;
1891 u8 ht40PowerIncForPdadc = 2;
1892 int i;
1893 1126
1894 memset(ratesArray, 0, sizeof(ratesArray)); 1127 if (ah->hw_version.macVersion != AR_SREV_VERSION_9160)
1128 return;
1895 1129
1896 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1130 if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7)
1897 AR5416_EEP_MINOR_VER_2) { 1131 return;
1898 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; 1132
1133 pModal = &eep->modalHeader;
1134
1135 if (pModal->xpaBiasLvl != 0xff) {
1136 biaslevel = pModal->xpaBiasLvl;
1137 INI_RA(&ah->iniAddac, 7, 1) =
1138 (INI_RA(&ah->iniAddac, 7, 1) & (~0x18)) | biaslevel << 3;
1899 } 1139 }
1140}
1900 1141
1901 if (!ath9k_hw_set_4k_power_per_rate_table(ah, chan, 1142static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1902 &ratesArray[0], cfgCtl, 1143 struct ath9k_channel *chan)
1903 twiceAntennaReduction, 1144{
1904 twiceMaxRegulatoryPower, 1145 struct modal_eep_4k_header *pModal;
1905 powerLimit)) { 1146 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1906 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1147 int regChainOffset;
1907 "ath9k_hw_set_txpower: unable to set " 1148 u8 txRxAttenLocal;
1908 "tx power per rate table\n"); 1149 u8 ob[5], db1[5], db2[5];
1909 return -EIO; 1150 u8 ant_div_control1, ant_div_control2;
1151 u32 regVal;
1152
1153
1154 pModal = &eep->modalHeader;
1155
1156 txRxAttenLocal = 23;
1157
1158 REG_WRITE(ah, AR_PHY_SWITCH_COM,
1159 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1160
1161 regChainOffset = 0;
1162 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
1163 pModal->antCtrlChain[0]);
1164
1165 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
1166 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
1167 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
1168 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
1169 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
1170 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
1171
1172 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1173 AR5416_EEP_MINOR_VER_3) {
1174 txRxAttenLocal = pModal->txRxAttenCh[0];
1175 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1176 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
1177 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1178 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
1179 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1180 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1181 pModal->xatten2Margin[0]);
1182 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1183 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
1910 } 1184 }
1911 1185
1912 if (!ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset)) { 1186 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1187 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
1188 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1189 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
1190
1191 if (AR_SREV_9285_11(ah))
1192 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
1193
1194 /* Initialize Ant Diversity settings from EEPROM */
1195 if (pModal->version == 3) {
1196 ant_div_control1 = ((pModal->ob_234 >> 12) & 0xf);
1197 ant_div_control2 = ((pModal->db1_234 >> 12) & 0xf);
1198 regVal = REG_READ(ah, 0x99ac);
1199 regVal &= (~(0x7f000000));
1200 regVal |= ((ant_div_control1 & 0x1) << 24);
1201 regVal |= (((ant_div_control1 >> 1) & 0x1) << 29);
1202 regVal |= (((ant_div_control1 >> 2) & 0x1) << 30);
1203 regVal |= ((ant_div_control2 & 0x3) << 25);
1204 regVal |= (((ant_div_control2 >> 2) & 0x3) << 27);
1205 REG_WRITE(ah, 0x99ac, regVal);
1206 regVal = REG_READ(ah, 0x99ac);
1207 regVal = REG_READ(ah, 0xa208);
1208 regVal &= (~(0x1 << 13));
1209 regVal |= (((ant_div_control1 >> 3) & 0x1) << 13);
1210 REG_WRITE(ah, 0xa208, regVal);
1211 regVal = REG_READ(ah, 0xa208);
1212 }
1213
1214 if (pModal->version >= 2) {
1215 ob[0] = (pModal->ob_01 & 0xf);
1216 ob[1] = (pModal->ob_01 >> 4) & 0xf;
1217 ob[2] = (pModal->ob_234 & 0xf);
1218 ob[3] = ((pModal->ob_234 >> 4) & 0xf);
1219 ob[4] = ((pModal->ob_234 >> 8) & 0xf);
1220
1221 db1[0] = (pModal->db1_01 & 0xf);
1222 db1[1] = ((pModal->db1_01 >> 4) & 0xf);
1223 db1[2] = (pModal->db1_234 & 0xf);
1224 db1[3] = ((pModal->db1_234 >> 4) & 0xf);
1225 db1[4] = ((pModal->db1_234 >> 8) & 0xf);
1226
1227 db2[0] = (pModal->db2_01 & 0xf);
1228 db2[1] = ((pModal->db2_01 >> 4) & 0xf);
1229 db2[2] = (pModal->db2_234 & 0xf);
1230 db2[3] = ((pModal->db2_234 >> 4) & 0xf);
1231 db2[4] = ((pModal->db2_234 >> 8) & 0xf);
1232
1233 } else if (pModal->version == 1) {
1234
1913 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1235 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1914 "ath9k_hw_set_txpower: unable to set power table\n"); 1236 "EEPROM Model version is set to 1 \n");
1915 return -EIO; 1237 ob[0] = (pModal->ob_01 & 0xf);
1238 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf;
1239 db1[0] = (pModal->db1_01 & 0xf);
1240 db1[1] = db1[2] = db1[3] =
1241 db1[4] = ((pModal->db1_01 >> 4) & 0xf);
1242 db2[0] = (pModal->db2_01 & 0xf);
1243 db2[1] = db2[2] = db2[3] =
1244 db2[4] = ((pModal->db2_01 >> 4) & 0xf);
1245 } else {
1246 int i;
1247 for (i = 0; i < 5; i++) {
1248 ob[i] = pModal->ob_01;
1249 db1[i] = pModal->db1_01;
1250 db2[i] = pModal->db1_01;
1251 }
1916 } 1252 }
1917 1253
1918 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 1254 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1919 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 1255 AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]);
1920 if (ratesArray[i] > AR5416_MAX_RATE_POWER) 1256 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1921 ratesArray[i] = AR5416_MAX_RATE_POWER; 1257 AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]);
1258 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1259 AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]);
1260 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1261 AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]);
1262 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1263 AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]);
1264
1265 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1266 AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]);
1267 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1268 AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]);
1269 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
1270 AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]);
1271 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1272 AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]);
1273 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1274 AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]);
1275
1276 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1277 AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]);
1278 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1279 AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]);
1280 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1281 AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]);
1282 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1283 AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]);
1284 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
1285 AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]);
1286
1287
1288 if (AR_SREV_9285_11(ah))
1289 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
1290
1291 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
1292 pModal->switchSettling);
1293 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
1294 pModal->adcDesiredSize);
1295
1296 REG_WRITE(ah, AR_PHY_RF_CTL4,
1297 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
1298 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
1299 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
1300 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
1301
1302 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1303 pModal->txEndToRxOn);
1304 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
1305 pModal->thresh62);
1306 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
1307 pModal->thresh62);
1308
1309 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1310 AR5416_EEP_MINOR_VER_2) {
1311 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START,
1312 pModal->txFrameToDataStart);
1313 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
1314 pModal->txFrameToPaOn);
1922 } 1315 }
1923 1316
1924 if (AR_SREV_9280_10_OR_LATER(ah)) { 1317 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1925 for (i = 0; i < Ar5416RateSize; i++) 1318 AR5416_EEP_MINOR_VER_3) {
1926 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 1319 if (IS_CHAN_HT40(chan))
1320 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
1321 AR_PHY_SETTLING_SWITCH,
1322 pModal->swSettleHt40);
1927 } 1323 }
1928 1324
1929 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1325 return true;
1930 ATH9K_POW_SM(ratesArray[rate18mb], 24) 1326}
1931 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
1932 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
1933 | ATH9K_POW_SM(ratesArray[rate6mb], 0));
1934 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
1935 ATH9K_POW_SM(ratesArray[rate54mb], 24)
1936 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
1937 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
1938 | ATH9K_POW_SM(ratesArray[rate24mb], 0));
1939 1327
1940 if (IS_CHAN_2GHZ(chan)) { 1328static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
1941 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, 1329 struct ath9k_channel *chan)
1942 ATH9K_POW_SM(ratesArray[rate2s], 24) 1330{
1943 | ATH9K_POW_SM(ratesArray[rate2l], 16) 1331 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1944 | ATH9K_POW_SM(ratesArray[rateXr], 8) 1332 struct modal_eep_4k_header *pModal = &eep->modalHeader;
1945 | ATH9K_POW_SM(ratesArray[rate1l], 0));
1946 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
1947 ATH9K_POW_SM(ratesArray[rate11s], 24)
1948 | ATH9K_POW_SM(ratesArray[rate11l], 16)
1949 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
1950 | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
1951 }
1952 1333
1953 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, 1334 return pModal->antCtrlCommon & 0xFFFF;
1954 ATH9K_POW_SM(ratesArray[rateHt20_3], 24) 1335}
1955 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
1956 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
1957 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
1958 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
1959 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
1960 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
1961 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
1962 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
1963 1336
1964 if (IS_CHAN_HT40(chan)) { 1337static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
1965 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, 1338 enum ieee80211_band freq_band)
1966 ATH9K_POW_SM(ratesArray[rateHt40_3] + 1339{
1967 ht40PowerIncForPdadc, 24) 1340 return 1;
1968 | ATH9K_POW_SM(ratesArray[rateHt40_2] + 1341}
1969 ht40PowerIncForPdadc, 16)
1970 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
1971 ht40PowerIncForPdadc, 8)
1972 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
1973 ht40PowerIncForPdadc, 0));
1974 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
1975 ATH9K_POW_SM(ratesArray[rateHt40_7] +
1976 ht40PowerIncForPdadc, 24)
1977 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
1978 ht40PowerIncForPdadc, 16)
1979 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
1980 ht40PowerIncForPdadc, 8)
1981 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
1982 ht40PowerIncForPdadc, 0));
1983 1342
1984 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, 1343u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1985 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) 1344{
1986 | ATH9K_POW_SM(ratesArray[rateExtCck], 16) 1345#define EEP_MAP4K_SPURCHAN \
1987 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 1346 (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
1988 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
1989 }
1990 1347
1991 i = rate6mb; 1348 u16 spur_val = AR_NO_SPUR;
1992 1349
1993 if (IS_CHAN_HT40(chan)) 1350 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1994 i = rateHt40_0; 1351 "Getting spur idx %d is2Ghz. %d val %x\n",
1995 else if (IS_CHAN_HT20(chan)) 1352 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1996 i = rateHt20_0;
1997 1353
1998 if (AR_SREV_9280_10_OR_LATER(ah)) 1354 switch (ah->config.spurmode) {
1999 ah->ah_maxPowerLevel = 1355 case SPUR_DISABLE:
2000 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2; 1356 break;
2001 else 1357 case SPUR_ENABLE_IOCTL:
2002 ah->ah_maxPowerLevel = ratesArray[i]; 1358 spur_val = ah->config.spurchans[i][is2GHz];
1359 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1360 "Getting spur val from new loc. %d\n", spur_val);
1361 break;
1362 case SPUR_ENABLE_EEPROM:
1363 spur_val = EEP_MAP4K_SPURCHAN;
1364 break;
1365 }
2003 1366
2004 return 0; 1367 return spur_val;
1368
1369#undef EEP_MAP4K_SPURCHAN
2005} 1370}
2006 1371
2007static int (*ath9k_set_txpower[]) (struct ath_hal *, 1372struct eeprom_ops eep_4k_ops = {
2008 struct ath9k_channel *, 1373 .check_eeprom = ath9k_hw_4k_check_eeprom,
2009 u16, u8, u8, u8) = { 1374 .get_eeprom = ath9k_hw_4k_get_eeprom,
2010 ath9k_hw_def_set_txpower, 1375 .fill_eeprom = ath9k_hw_4k_fill_eeprom,
2011 ath9k_hw_4k_set_txpower 1376 .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver,
1377 .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
1378 .get_num_ant_config = ath9k_hw_4k_get_num_ant_config,
1379 .get_eeprom_antenna_cfg = ath9k_hw_4k_get_eeprom_antenna_cfg,
1380 .set_board_values = ath9k_hw_4k_set_board_values,
1381 .set_addac = ath9k_hw_4k_set_addac,
1382 .set_txpower = ath9k_hw_4k_set_txpower,
1383 .get_spur_channel = ath9k_hw_4k_get_spur_channel
2012}; 1384};
2013 1385
2014int ath9k_hw_set_txpower(struct ath_hal *ah, 1386/************************************************/
2015 struct ath9k_channel *chan, 1387/* EEPROM Operations for non-4K (Default) cards */
2016 u16 cfgCtl, 1388/************************************************/
2017 u8 twiceAntennaReduction, 1389
2018 u8 twiceMaxRegulatoryPower, 1390static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah)
2019 u8 powerLimit)
2020{ 1391{
2021 struct ath_hal_5416 *ahp = AH5416(ah); 1392 return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF);
1393}
2022 1394
2023 return ath9k_set_txpower[ahp->ah_eep_map](ah, chan, cfgCtl, 1395static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
2024 twiceAntennaReduction, twiceMaxRegulatoryPower, 1396{
2025 powerLimit); 1397 return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
2026} 1398}
2027 1399
2028static void ath9k_hw_set_def_addac(struct ath_hal *ah, 1400static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
2029 struct ath9k_channel *chan)
2030{ 1401{
2031#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt]) 1402#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
2032 struct modal_eep_header *pModal; 1403 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
2033 struct ath_hal_5416 *ahp = AH5416(ah); 1404 u16 *eep_data;
2034 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def; 1405 int addr, ar5416_eep_start_loc = 0x100;
2035 u8 biaslevel;
2036 1406
2037 if (ah->ah_macVersion != AR_SREV_VERSION_9160) 1407 eep_data = (u16 *)eep;
2038 return;
2039 1408
2040 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7) 1409 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
2041 return; 1410 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
1411 eep_data)) {
1412 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1413 "Unable to read eeprom region\n");
1414 return false;
1415 }
1416 eep_data++;
1417 }
1418 return true;
1419#undef SIZE_EEPROM_DEF
1420}
2042 1421
2043 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 1422static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1423{
1424 struct ar5416_eeprom_def *eep =
1425 (struct ar5416_eeprom_def *) &ah->eeprom.def;
1426 u16 *eepdata, temp, magic, magic2;
1427 u32 sum = 0, el;
1428 bool need_swap = false;
1429 int i, addr, size;
2044 1430
2045 if (pModal->xpaBiasLvl != 0xff) { 1431 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
2046 biaslevel = pModal->xpaBiasLvl; 1432 &magic)) {
2047 } else { 1433 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2048 u16 resetFreqBin, freqBin, freqCount = 0; 1434 "Reading Magic # failed\n");
2049 struct chan_centers centers; 1435 return false;
1436 }
2050 1437
2051 ath9k_hw_get_channel_centers(ah, chan, &centers); 1438 if (!ath9k_hw_use_flash(ah)) {
2052 1439
2053 resetFreqBin = FREQ2FBIN(centers.synth_center, 1440 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2054 IS_CHAN_2GHZ(chan)); 1441 "Read Magic = 0x%04X\n", magic);
2055 freqBin = XPA_LVL_FREQ(0) & 0xff;
2056 biaslevel = (u8) (XPA_LVL_FREQ(0) >> 14);
2057 1442
2058 freqCount++; 1443 if (magic != AR5416_EEPROM_MAGIC) {
1444 magic2 = swab16(magic);
2059 1445
2060 while (freqCount < 3) { 1446 if (magic2 == AR5416_EEPROM_MAGIC) {
2061 if (XPA_LVL_FREQ(freqCount) == 0x0) 1447 size = sizeof(struct ar5416_eeprom_def);
2062 break; 1448 need_swap = true;
1449 eepdata = (u16 *) (&ah->eeprom);
2063 1450
2064 freqBin = XPA_LVL_FREQ(freqCount) & 0xff; 1451 for (addr = 0; addr < size / sizeof(u16); addr++) {
2065 if (resetFreqBin >= freqBin) 1452 temp = swab16(*eepdata);
2066 biaslevel = (u8)(XPA_LVL_FREQ(freqCount) >> 14); 1453 *eepdata = temp;
2067 else 1454 eepdata++;
2068 break; 1455
2069 freqCount++; 1456 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1457 "0x%04X ", *eepdata);
1458
1459 if (((addr + 1) % 6) == 0)
1460 DPRINTF(ah->ah_sc,
1461 ATH_DBG_EEPROM, "\n");
1462 }
1463 } else {
1464 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1465 "Invalid EEPROM Magic. "
1466 "endianness mismatch.\n");
1467 return -EINVAL;
1468 }
2070 } 1469 }
2071 } 1470 }
2072 1471
2073 if (IS_CHAN_2GHZ(chan)) { 1472 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
2074 INI_RA(&ahp->ah_iniAddac, 7, 1) = (INI_RA(&ahp->ah_iniAddac, 1473 need_swap ? "True" : "False");
2075 7, 1) & (~0x18)) | biaslevel << 3;
2076 } else {
2077 INI_RA(&ahp->ah_iniAddac, 6, 1) = (INI_RA(&ahp->ah_iniAddac,
2078 6, 1) & (~0xc0)) | biaslevel << 6;
2079 }
2080#undef XPA_LVL_FREQ
2081}
2082 1474
2083static void ath9k_hw_set_4k_addac(struct ath_hal *ah, 1475 if (need_swap)
2084 struct ath9k_channel *chan) 1476 el = swab16(ah->eeprom.def.baseEepHeader.length);
2085{ 1477 else
2086 struct modal_eep_4k_header *pModal; 1478 el = ah->eeprom.def.baseEepHeader.length;
2087 struct ath_hal_5416 *ahp = AH5416(ah);
2088 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2089 u8 biaslevel;
2090 1479
2091 if (ah->ah_macVersion != AR_SREV_VERSION_9160) 1480 if (el > sizeof(struct ar5416_eeprom_def))
2092 return; 1481 el = sizeof(struct ar5416_eeprom_def) / sizeof(u16);
1482 else
1483 el = el / sizeof(u16);
2093 1484
2094 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7) 1485 eepdata = (u16 *)(&ah->eeprom);
2095 return;
2096 1486
2097 pModal = &eep->modalHeader; 1487 for (i = 0; i < el; i++)
1488 sum ^= *eepdata++;
2098 1489
2099 if (pModal->xpaBiasLvl != 0xff) { 1490 if (need_swap) {
2100 biaslevel = pModal->xpaBiasLvl; 1491 u32 integer, j;
2101 INI_RA(&ahp->ah_iniAddac, 7, 1) = 1492 u16 word;
2102 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel << 3;
2103 }
2104}
2105 1493
2106static void (*ath9k_set_addac[]) (struct ath_hal *, struct ath9k_channel *) = { 1494 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2107 ath9k_hw_set_def_addac, 1495 "EEPROM Endianness is not native.. Changing \n");
2108 ath9k_hw_set_4k_addac
2109};
2110 1496
2111void ath9k_hw_set_addac(struct ath_hal *ah, struct ath9k_channel *chan) 1497 word = swab16(eep->baseEepHeader.length);
2112{ 1498 eep->baseEepHeader.length = word;
2113 struct ath_hal_5416 *ahp = AH5416(ah); 1499
1500 word = swab16(eep->baseEepHeader.checksum);
1501 eep->baseEepHeader.checksum = word;
1502
1503 word = swab16(eep->baseEepHeader.version);
1504 eep->baseEepHeader.version = word;
1505
1506 word = swab16(eep->baseEepHeader.regDmn[0]);
1507 eep->baseEepHeader.regDmn[0] = word;
1508
1509 word = swab16(eep->baseEepHeader.regDmn[1]);
1510 eep->baseEepHeader.regDmn[1] = word;
1511
1512 word = swab16(eep->baseEepHeader.rfSilent);
1513 eep->baseEepHeader.rfSilent = word;
1514
1515 word = swab16(eep->baseEepHeader.blueToothOptions);
1516 eep->baseEepHeader.blueToothOptions = word;
1517
1518 word = swab16(eep->baseEepHeader.deviceCap);
1519 eep->baseEepHeader.deviceCap = word;
1520
1521 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
1522 struct modal_eep_header *pModal =
1523 &eep->modalHeader[j];
1524 integer = swab32(pModal->antCtrlCommon);
1525 pModal->antCtrlCommon = integer;
1526
1527 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
1528 integer = swab32(pModal->antCtrlChain[i]);
1529 pModal->antCtrlChain[i] = integer;
1530 }
1531
1532 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
1533 word = swab16(pModal->spurChans[i].spurChan);
1534 pModal->spurChans[i].spurChan = word;
1535 }
1536 }
1537 }
1538
1539 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
1540 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
1541 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1542 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
1543 sum, ah->eep_ops->get_eeprom_ver(ah));
1544 return -EINVAL;
1545 }
2114 1546
2115 ath9k_set_addac[ahp->ah_eep_map](ah, chan); 1547 return 0;
2116} 1548}
2117 1549
1550static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
1551 enum eeprom_param param)
1552{
1553#define AR5416_VER_MASK (pBase->version & AR5416_EEP_VER_MINOR_MASK)
1554 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1555 struct modal_eep_header *pModal = eep->modalHeader;
1556 struct base_eep_header *pBase = &eep->baseEepHeader;
2118 1557
1558 switch (param) {
1559 case EEP_NFTHRESH_5:
1560 return pModal[0].noiseFloorThreshCh[0];
1561 case EEP_NFTHRESH_2:
1562 return pModal[1].noiseFloorThreshCh[0];
1563 case AR_EEPROM_MAC(0):
1564 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
1565 case AR_EEPROM_MAC(1):
1566 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
1567 case AR_EEPROM_MAC(2):
1568 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
1569 case EEP_REG_0:
1570 return pBase->regDmn[0];
1571 case EEP_REG_1:
1572 return pBase->regDmn[1];
1573 case EEP_OP_CAP:
1574 return pBase->deviceCap;
1575 case EEP_OP_MODE:
1576 return pBase->opCapFlags;
1577 case EEP_RF_SILENT:
1578 return pBase->rfSilent;
1579 case EEP_OB_5:
1580 return pModal[0].ob;
1581 case EEP_DB_5:
1582 return pModal[0].db;
1583 case EEP_OB_2:
1584 return pModal[1].ob;
1585 case EEP_DB_2:
1586 return pModal[1].db;
1587 case EEP_MINOR_REV:
1588 return AR5416_VER_MASK;
1589 case EEP_TX_MASK:
1590 return pBase->txMask;
1591 case EEP_RX_MASK:
1592 return pBase->rxMask;
1593 case EEP_RXGAIN_TYPE:
1594 return pBase->rxGainType;
1595 case EEP_TXGAIN_TYPE:
1596 return pBase->txGainType;
1597 case EEP_DAC_HPWR_5G:
1598 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20)
1599 return pBase->dacHiPwrMode_5G;
1600 else
1601 return 0;
1602 default:
1603 return 0;
1604 }
1605#undef AR5416_VER_MASK
1606}
2119 1607
2120/* XXX: Clean me up, make me more legible */ 1608/* XXX: Clean me up, make me more legible */
2121static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah, 1609static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
2122 struct ath9k_channel *chan) 1610 struct ath9k_channel *chan)
2123{ 1611{
1612#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
2124 struct modal_eep_header *pModal; 1613 struct modal_eep_header *pModal;
2125 struct ath_hal_5416 *ahp = AH5416(ah); 1614 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
2126 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2127 int i, regChainOffset; 1615 int i, regChainOffset;
2128 u8 txRxAttenLocal; 1616 u8 txRxAttenLocal;
2129 u16 ant_config;
2130 1617
2131 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 1618 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2132 1619
2133 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; 1620 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
2134 1621
2135 ath9k_hw_get_eeprom_antenna_cfg(ah, chan, 0, &ant_config); 1622 REG_WRITE(ah, AR_PHY_SWITCH_COM,
2136 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); 1623 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
2137 1624
2138 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 1625 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
2139 if (AR_SREV_9280(ah)) { 1626 if (AR_SREV_9280(ah)) {
@@ -2142,7 +1629,7 @@ static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah,
2142 } 1629 }
2143 1630
2144 if (AR_SREV_5416_V20_OR_LATER(ah) && 1631 if (AR_SREV_5416_V20_OR_LATER(ah) &&
2145 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) 1632 (ah->rxchainmask == 5 || ah->txchainmask == 5)
2146 && (i != 0)) 1633 && (i != 0))
2147 regChainOffset = (i == 1) ? 0x2000 : 0x1000; 1634 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
2148 else 1635 else
@@ -2163,9 +1650,7 @@ static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah,
2163 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 1650 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
2164 1651
2165 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) { 1652 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
2166 if ((eep->baseEepHeader.version & 1653 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
2167 AR5416_EEP_VER_MINOR_MASK) >=
2168 AR5416_EEP_MINOR_VER_3) {
2169 txRxAttenLocal = pModal->txRxAttenCh[i]; 1654 txRxAttenLocal = pModal->txRxAttenCh[i];
2170 if (AR_SREV_9280_10_OR_LATER(ah)) { 1655 if (AR_SREV_9280_10_OR_LATER(ah)) {
2171 REG_RMW_FIELD(ah, 1656 REG_RMW_FIELD(ah,
@@ -2332,8 +1817,7 @@ static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah,
2332 pModal->thresh62); 1817 pModal->thresh62);
2333 } 1818 }
2334 1819
2335 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1820 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
2336 AR5416_EEP_MINOR_VER_2) {
2337 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, 1821 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
2338 AR_PHY_TX_END_DATA_START, 1822 AR_PHY_TX_END_DATA_START,
2339 pModal->txFrameToDataStart); 1823 pModal->txFrameToDataStart);
@@ -2341,296 +1825,803 @@ static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah,
2341 pModal->txFrameToPaOn); 1825 pModal->txFrameToPaOn);
2342 } 1826 }
2343 1827
2344 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1828 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
2345 AR5416_EEP_MINOR_VER_3) {
2346 if (IS_CHAN_HT40(chan)) 1829 if (IS_CHAN_HT40(chan))
2347 REG_RMW_FIELD(ah, AR_PHY_SETTLING, 1830 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
2348 AR_PHY_SETTLING_SWITCH, 1831 AR_PHY_SETTLING_SWITCH,
2349 pModal->swSettleHt40); 1832 pModal->swSettleHt40);
2350 } 1833 }
2351 1834
1835 if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
1836 if (IS_CHAN_HT20(chan))
1837 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
1838 eep->baseEepHeader.dacLpMode);
1839 else if (eep->baseEepHeader.dacHiPwrMode_5G)
1840 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0);
1841 else
1842 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
1843 eep->baseEepHeader.dacLpMode);
1844
1845 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
1846 pModal->miscBits >> 2);
1847 }
1848
2352 return true; 1849 return true;
1850#undef AR5416_VER_MASK
2353} 1851}
2354 1852
2355static bool ath9k_hw_eeprom_set_4k_board_values(struct ath_hal *ah, 1853static void ath9k_hw_def_set_addac(struct ath_hw *ah,
2356 struct ath9k_channel *chan) 1854 struct ath9k_channel *chan)
2357{ 1855{
2358 struct modal_eep_4k_header *pModal; 1856#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt])
2359 struct ath_hal_5416 *ahp = AH5416(ah); 1857 struct modal_eep_header *pModal;
2360 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k; 1858 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
2361 int regChainOffset; 1859 u8 biaslevel;
2362 u8 txRxAttenLocal;
2363 u16 ant_config = 0;
2364 u8 ob[5], db1[5], db2[5];
2365 u8 ant_div_control1, ant_div_control2;
2366 u32 regVal;
2367 1860
1861 if (ah->hw_version.macVersion != AR_SREV_VERSION_9160)
1862 return;
2368 1863
2369 pModal = &eep->modalHeader; 1864 if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7)
1865 return;
2370 1866
2371 txRxAttenLocal = 23; 1867 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2372 1868
2373 ath9k_hw_get_eeprom_antenna_cfg(ah, chan, 0, &ant_config); 1869 if (pModal->xpaBiasLvl != 0xff) {
2374 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); 1870 biaslevel = pModal->xpaBiasLvl;
1871 } else {
1872 u16 resetFreqBin, freqBin, freqCount = 0;
1873 struct chan_centers centers;
2375 1874
2376 regChainOffset = 0; 1875 ath9k_hw_get_channel_centers(ah, chan, &centers);
2377 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
2378 pModal->antCtrlChain[0]);
2379 1876
2380 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, 1877 resetFreqBin = FREQ2FBIN(centers.synth_center,
2381 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & 1878 IS_CHAN_2GHZ(chan));
2382 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 1879 freqBin = XPA_LVL_FREQ(0) & 0xff;
2383 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 1880 biaslevel = (u8) (XPA_LVL_FREQ(0) >> 14);
2384 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
2385 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
2386 1881
2387 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1882 freqCount++;
2388 AR5416_EEP_MINOR_VER_3) {
2389 txRxAttenLocal = pModal->txRxAttenCh[0];
2390 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2391 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
2392 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2393 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
2394 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2395 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
2396 pModal->xatten2Margin[0]);
2397 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2398 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
2399 }
2400 1883
2401 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1884 while (freqCount < 3) {
2402 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 1885 if (XPA_LVL_FREQ(freqCount) == 0x0)
2403 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1886 break;
2404 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
2405 1887
2406 if (AR_SREV_9285_11(ah)) 1888 freqBin = XPA_LVL_FREQ(freqCount) & 0xff;
2407 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); 1889 if (resetFreqBin >= freqBin)
1890 biaslevel = (u8)(XPA_LVL_FREQ(freqCount) >> 14);
1891 else
1892 break;
1893 freqCount++;
1894 }
1895 }
2408 1896
2409 /* Initialize Ant Diversity settings from EEPROM */ 1897 if (IS_CHAN_2GHZ(chan)) {
2410 if (pModal->version == 3) { 1898 INI_RA(&ah->iniAddac, 7, 1) = (INI_RA(&ah->iniAddac,
2411 ant_div_control1 = ((pModal->ob_234 >> 12) & 0xf); 1899 7, 1) & (~0x18)) | biaslevel << 3;
2412 ant_div_control2 = ((pModal->db1_234 >> 12) & 0xf); 1900 } else {
2413 regVal = REG_READ(ah, 0x99ac); 1901 INI_RA(&ah->iniAddac, 6, 1) = (INI_RA(&ah->iniAddac,
2414 regVal &= (~(0x7f000000)); 1902 6, 1) & (~0xc0)) | biaslevel << 6;
2415 regVal |= ((ant_div_control1 & 0x1) << 24);
2416 regVal |= (((ant_div_control1 >> 1) & 0x1) << 29);
2417 regVal |= (((ant_div_control1 >> 2) & 0x1) << 30);
2418 regVal |= ((ant_div_control2 & 0x3) << 25);
2419 regVal |= (((ant_div_control2 >> 2) & 0x3) << 27);
2420 REG_WRITE(ah, 0x99ac, regVal);
2421 regVal = REG_READ(ah, 0x99ac);
2422 regVal = REG_READ(ah, 0xa208);
2423 regVal &= (~(0x1 << 13));
2424 regVal |= (((ant_div_control1 >> 3) & 0x1) << 13);
2425 REG_WRITE(ah, 0xa208, regVal);
2426 regVal = REG_READ(ah, 0xa208);
2427 } 1903 }
1904#undef XPA_LVL_FREQ
1905}
2428 1906
2429 if (pModal->version >= 2) { 1907static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
2430 ob[0] = (pModal->ob_01 & 0xf); 1908 struct ath9k_channel *chan,
2431 ob[1] = (pModal->ob_01 >> 4) & 0xf; 1909 struct cal_data_per_freq *pRawDataSet,
2432 ob[2] = (pModal->ob_234 & 0xf); 1910 u8 *bChans, u16 availPiers,
2433 ob[3] = ((pModal->ob_234 >> 4) & 0xf); 1911 u16 tPdGainOverlap, int16_t *pMinCalPower,
2434 ob[4] = ((pModal->ob_234 >> 8) & 0xf); 1912 u16 *pPdGainBoundaries, u8 *pPDADCValues,
1913 u16 numXpdGains)
1914{
1915 int i, j, k;
1916 int16_t ss;
1917 u16 idxL = 0, idxR = 0, numPiers;
1918 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
1919 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
1920 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
1921 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
1922 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
1923 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
2435 1924
2436 db1[0] = (pModal->db1_01 & 0xf); 1925 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
2437 db1[1] = ((pModal->db1_01 >> 4) & 0xf); 1926 u8 minPwrT4[AR5416_NUM_PD_GAINS];
2438 db1[2] = (pModal->db1_234 & 0xf); 1927 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
2439 db1[3] = ((pModal->db1_234 >> 4) & 0xf); 1928 int16_t vpdStep;
2440 db1[4] = ((pModal->db1_234 >> 8) & 0xf); 1929 int16_t tmpVal;
1930 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
1931 bool match;
1932 int16_t minDelta = 0;
1933 struct chan_centers centers;
2441 1934
2442 db2[0] = (pModal->db2_01 & 0xf); 1935 ath9k_hw_get_channel_centers(ah, chan, &centers);
2443 db2[1] = ((pModal->db2_01 >> 4) & 0xf);
2444 db2[2] = (pModal->db2_234 & 0xf);
2445 db2[3] = ((pModal->db2_234 >> 4) & 0xf);
2446 db2[4] = ((pModal->db2_234 >> 8) & 0xf);
2447 1936
2448 } else if (pModal->version == 1) { 1937 for (numPiers = 0; numPiers < availPiers; numPiers++) {
1938 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
1939 break;
1940 }
2449 1941
2450 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1942 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
2451 "EEPROM Model version is set to 1 \n"); 1943 IS_CHAN_2GHZ(chan)),
2452 ob[0] = (pModal->ob_01 & 0xf); 1944 bChans, numPiers, &idxL, &idxR);
2453 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf; 1945
2454 db1[0] = (pModal->db1_01 & 0xf); 1946 if (match) {
2455 db1[1] = db1[2] = db1[3] = 1947 for (i = 0; i < numXpdGains; i++) {
2456 db1[4] = ((pModal->db1_01 >> 4) & 0xf); 1948 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
2457 db2[0] = (pModal->db2_01 & 0xf); 1949 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
2458 db2[1] = db2[2] = db2[3] = 1950 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
2459 db2[4] = ((pModal->db2_01 >> 4) & 0xf); 1951 pRawDataSet[idxL].pwrPdg[i],
1952 pRawDataSet[idxL].vpdPdg[i],
1953 AR5416_PD_GAIN_ICEPTS,
1954 vpdTableI[i]);
1955 }
2460 } else { 1956 } else {
2461 int i; 1957 for (i = 0; i < numXpdGains; i++) {
2462 for (i = 0; i < 5; i++) { 1958 pVpdL = pRawDataSet[idxL].vpdPdg[i];
2463 ob[i] = pModal->ob_01; 1959 pPwrL = pRawDataSet[idxL].pwrPdg[i];
2464 db1[i] = pModal->db1_01; 1960 pVpdR = pRawDataSet[idxR].vpdPdg[i];
2465 db2[i] = pModal->db1_01; 1961 pPwrR = pRawDataSet[idxR].pwrPdg[i];
1962
1963 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
1964
1965 maxPwrT4[i] =
1966 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
1967 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
1968
1969
1970 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
1971 pPwrL, pVpdL,
1972 AR5416_PD_GAIN_ICEPTS,
1973 vpdTableL[i]);
1974 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
1975 pPwrR, pVpdR,
1976 AR5416_PD_GAIN_ICEPTS,
1977 vpdTableR[i]);
1978
1979 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
1980 vpdTableI[i][j] =
1981 (u8)(ath9k_hw_interpolate((u16)
1982 FREQ2FBIN(centers.
1983 synth_center,
1984 IS_CHAN_2GHZ
1985 (chan)),
1986 bChans[idxL], bChans[idxR],
1987 vpdTableL[i][j], vpdTableR[i][j]));
1988 }
2466 } 1989 }
2467 } 1990 }
2468 1991
2469 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, 1992 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
2470 AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]);
2471 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2472 AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]);
2473 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2474 AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]);
2475 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2476 AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]);
2477 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2478 AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]);
2479 1993
2480 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, 1994 k = 0;
2481 AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]);
2482 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2483 AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]);
2484 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2485 AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]);
2486 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2487 AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]);
2488 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2489 AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]);
2490 1995
2491 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, 1996 for (i = 0; i < numXpdGains; i++) {
2492 AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]); 1997 if (i == (numXpdGains - 1))
2493 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, 1998 pPdGainBoundaries[i] =
2494 AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]); 1999 (u16)(maxPwrT4[i] / 2);
2495 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, 2000 else
2496 AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]); 2001 pPdGainBoundaries[i] =
2497 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, 2002 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
2498 AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]); 2003
2499 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, 2004 pPdGainBoundaries[i] =
2500 AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]); 2005 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
2501 2006
2007 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
2008 minDelta = pPdGainBoundaries[0] - 23;
2009 pPdGainBoundaries[0] = 23;
2010 } else {
2011 minDelta = 0;
2012 }
2502 2013
2503 if (AR_SREV_9285_11(ah)) 2014 if (i == 0) {
2504 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT); 2015 if (AR_SREV_9280_10_OR_LATER(ah))
2016 ss = (int16_t)(0 - (minPwrT4[i] / 2));
2017 else
2018 ss = 0;
2019 } else {
2020 ss = (int16_t)((pPdGainBoundaries[i - 1] -
2021 (minPwrT4[i] / 2)) -
2022 tPdGainOverlap + 1 + minDelta);
2023 }
2024 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
2025 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
2505 2026
2506 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 2027 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
2507 pModal->switchSettling); 2028 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
2508 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, 2029 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
2509 pModal->adcDesiredSize); 2030 ss++;
2031 }
2510 2032
2511 REG_WRITE(ah, AR_PHY_RF_CTL4, 2033 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
2512 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | 2034 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
2513 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | 2035 (minPwrT4[i] / 2));
2514 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | 2036 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
2515 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); 2037 tgtIndex : sizeCurrVpdTable;
2516 2038
2517 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 2039 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
2518 pModal->txEndToRxOn); 2040 pPDADCValues[k++] = vpdTableI[i][ss++];
2519 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 2041 }
2520 pModal->thresh62);
2521 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
2522 pModal->thresh62);
2523 2042
2524 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 2043 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
2525 AR5416_EEP_MINOR_VER_2) { 2044 vpdTableI[i][sizeCurrVpdTable - 2]);
2526 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, 2045 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
2527 pModal->txFrameToDataStart); 2046
2528 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, 2047 if (tgtIndex > maxIndex) {
2529 pModal->txFrameToPaOn); 2048 while ((ss <= tgtIndex) &&
2049 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
2050 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
2051 (ss - maxIndex + 1) * vpdStep));
2052 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
2053 255 : tmpVal);
2054 ss++;
2055 }
2056 }
2530 } 2057 }
2531 2058
2532 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 2059 while (i < AR5416_PD_GAINS_IN_MASK) {
2533 AR5416_EEP_MINOR_VER_3) { 2060 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
2534 if (IS_CHAN_HT40(chan)) 2061 i++;
2535 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
2536 AR_PHY_SETTLING_SWITCH,
2537 pModal->swSettleHt40);
2538 } 2062 }
2539 2063
2540 return true; 2064 while (k < AR5416_NUM_PDADC_VALUES) {
2541} 2065 pPDADCValues[k] = pPDADCValues[k - 1];
2066 k++;
2067 }
2542 2068
2543static bool (*ath9k_eeprom_set_board_values[])(struct ath_hal *, 2069 return;
2544 struct ath9k_channel *) = { 2070}
2545 ath9k_hw_eeprom_set_def_board_values,
2546 ath9k_hw_eeprom_set_4k_board_values
2547};
2548 2071
2549bool ath9k_hw_eeprom_set_board_values(struct ath_hal *ah, 2072static bool ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
2550 struct ath9k_channel *chan) 2073 struct ath9k_channel *chan,
2074 int16_t *pTxPowerIndexOffset)
2551{ 2075{
2552 struct ath_hal_5416 *ahp = AH5416(ah); 2076 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
2077 struct cal_data_per_freq *pRawDataset;
2078 u8 *pCalBChans = NULL;
2079 u16 pdGainOverlap_t2;
2080 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
2081 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
2082 u16 numPiers, i, j;
2083 int16_t tMinCalPower;
2084 u16 numXpdGain, xpdMask;
2085 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
2086 u32 reg32, regOffset, regChainOffset;
2087 int16_t modalIdx;
2553 2088
2554 return ath9k_eeprom_set_board_values[ahp->ah_eep_map](ah, chan); 2089 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
2090 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
2091
2092 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2093 AR5416_EEP_MINOR_VER_2) {
2094 pdGainOverlap_t2 =
2095 pEepData->modalHeader[modalIdx].pdGainOverlap;
2096 } else {
2097 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
2098 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
2099 }
2100
2101 if (IS_CHAN_2GHZ(chan)) {
2102 pCalBChans = pEepData->calFreqPier2G;
2103 numPiers = AR5416_NUM_2G_CAL_PIERS;
2104 } else {
2105 pCalBChans = pEepData->calFreqPier5G;
2106 numPiers = AR5416_NUM_5G_CAL_PIERS;
2107 }
2108
2109 numXpdGain = 0;
2110
2111 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
2112 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
2113 if (numXpdGain >= AR5416_NUM_PD_GAINS)
2114 break;
2115 xpdGainValues[numXpdGain] =
2116 (u16)(AR5416_PD_GAINS_IN_MASK - i);
2117 numXpdGain++;
2118 }
2119 }
2120
2121 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
2122 (numXpdGain - 1) & 0x3);
2123 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
2124 xpdGainValues[0]);
2125 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
2126 xpdGainValues[1]);
2127 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
2128 xpdGainValues[2]);
2129
2130 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
2131 if (AR_SREV_5416_V20_OR_LATER(ah) &&
2132 (ah->rxchainmask == 5 || ah->txchainmask == 5) &&
2133 (i != 0)) {
2134 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
2135 } else
2136 regChainOffset = i * 0x1000;
2137
2138 if (pEepData->baseEepHeader.txMask & (1 << i)) {
2139 if (IS_CHAN_2GHZ(chan))
2140 pRawDataset = pEepData->calPierData2G[i];
2141 else
2142 pRawDataset = pEepData->calPierData5G[i];
2143
2144 ath9k_hw_get_def_gain_boundaries_pdadcs(ah, chan,
2145 pRawDataset, pCalBChans,
2146 numPiers, pdGainOverlap_t2,
2147 &tMinCalPower, gainBoundaries,
2148 pdadcValues, numXpdGain);
2149
2150 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
2151 REG_WRITE(ah,
2152 AR_PHY_TPCRG5 + regChainOffset,
2153 SM(pdGainOverlap_t2,
2154 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
2155 | SM(gainBoundaries[0],
2156 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
2157 | SM(gainBoundaries[1],
2158 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
2159 | SM(gainBoundaries[2],
2160 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
2161 | SM(gainBoundaries[3],
2162 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
2163 }
2164
2165 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
2166 for (j = 0; j < 32; j++) {
2167 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
2168 ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
2169 ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
2170 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
2171 REG_WRITE(ah, regOffset, reg32);
2172
2173 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2174 "PDADC (%d,%4x): %4.4x %8.8x\n",
2175 i, regChainOffset, regOffset,
2176 reg32);
2177 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2178 "PDADC: Chain %d | PDADC %3d "
2179 "Value %3d | PDADC %3d Value %3d | "
2180 "PDADC %3d Value %3d | PDADC %3d "
2181 "Value %3d |\n",
2182 i, 4 * j, pdadcValues[4 * j],
2183 4 * j + 1, pdadcValues[4 * j + 1],
2184 4 * j + 2, pdadcValues[4 * j + 2],
2185 4 * j + 3,
2186 pdadcValues[4 * j + 3]);
2187
2188 regOffset += 4;
2189 }
2190 }
2191 }
2192
2193 *pTxPowerIndexOffset = 0;
2194
2195 return true;
2555} 2196}
2556 2197
2557static int ath9k_hw_get_def_eeprom_antenna_cfg(struct ath_hal *ah, 2198static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
2558 struct ath9k_channel *chan, 2199 struct ath9k_channel *chan,
2559 u8 index, u16 *config) 2200 int16_t *ratesArray,
2201 u16 cfgCtl,
2202 u16 AntennaReduction,
2203 u16 twiceMaxRegulatoryPower,
2204 u16 powerLimit)
2560{ 2205{
2561 struct ath_hal_5416 *ahp = AH5416(ah); 2206#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
2562 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def; 2207#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */
2563 struct modal_eep_header *pModal =
2564 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2565 struct base_eep_header *pBase = &eep->baseEepHeader;
2566 2208
2567 switch (index) { 2209 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
2568 case 0: 2210 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
2569 *config = pModal->antCtrlCommon & 0xFFFF; 2211 static const u16 tpScaleReductionTable[5] =
2570 return 0; 2212 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
2213
2214 int i;
2215 int16_t twiceLargestAntenna;
2216 struct cal_ctl_data *rep;
2217 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
2218 0, { 0, 0, 0, 0}
2219 };
2220 struct cal_target_power_leg targetPowerOfdmExt = {
2221 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
2222 0, { 0, 0, 0, 0 }
2223 };
2224 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
2225 0, {0, 0, 0, 0}
2226 };
2227 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
2228 u16 ctlModesFor11a[] =
2229 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
2230 u16 ctlModesFor11g[] =
2231 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
2232 CTL_2GHT40
2233 };
2234 u16 numCtlModes, *pCtlMode, ctlMode, freq;
2235 struct chan_centers centers;
2236 int tx_chainmask;
2237 u16 twiceMinEdgePower;
2238
2239 tx_chainmask = ah->txchainmask;
2240
2241 ath9k_hw_get_channel_centers(ah, chan, &centers);
2242
2243 twiceLargestAntenna = max(
2244 pEepData->modalHeader
2245 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
2246 pEepData->modalHeader
2247 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
2248
2249 twiceLargestAntenna = max((u8)twiceLargestAntenna,
2250 pEepData->modalHeader
2251 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
2252
2253 twiceLargestAntenna = (int16_t)min(AntennaReduction -
2254 twiceLargestAntenna, 0);
2255
2256 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
2257
2258 if (ah->regulatory.tp_scale != ATH9K_TP_SCALE_MAX) {
2259 maxRegAllowedPower -=
2260 (tpScaleReductionTable[(ah->regulatory.tp_scale)] * 2);
2261 }
2262
2263 scaledPower = min(powerLimit, maxRegAllowedPower);
2264
2265 switch (ar5416_get_ntxchains(tx_chainmask)) {
2571 case 1: 2266 case 1:
2572 if (pBase->version >= 0x0E0D) {
2573 if (pModal->useAnt1) {
2574 *config =
2575 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
2576 return 0;
2577 }
2578 }
2579 break; 2267 break;
2580 default: 2268 case 2:
2269 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
2581 break; 2270 break;
2271 case 3:
2272 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
2273 break;
2274 }
2275
2276 scaledPower = max((u16)0, scaledPower);
2277
2278 if (IS_CHAN_2GHZ(chan)) {
2279 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
2280 SUB_NUM_CTL_MODES_AT_2G_40;
2281 pCtlMode = ctlModesFor11g;
2282
2283 ath9k_hw_get_legacy_target_powers(ah, chan,
2284 pEepData->calTargetPowerCck,
2285 AR5416_NUM_2G_CCK_TARGET_POWERS,
2286 &targetPowerCck, 4, false);
2287 ath9k_hw_get_legacy_target_powers(ah, chan,
2288 pEepData->calTargetPower2G,
2289 AR5416_NUM_2G_20_TARGET_POWERS,
2290 &targetPowerOfdm, 4, false);
2291 ath9k_hw_get_target_powers(ah, chan,
2292 pEepData->calTargetPower2GHT20,
2293 AR5416_NUM_2G_20_TARGET_POWERS,
2294 &targetPowerHt20, 8, false);
2295
2296 if (IS_CHAN_HT40(chan)) {
2297 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
2298 ath9k_hw_get_target_powers(ah, chan,
2299 pEepData->calTargetPower2GHT40,
2300 AR5416_NUM_2G_40_TARGET_POWERS,
2301 &targetPowerHt40, 8, true);
2302 ath9k_hw_get_legacy_target_powers(ah, chan,
2303 pEepData->calTargetPowerCck,
2304 AR5416_NUM_2G_CCK_TARGET_POWERS,
2305 &targetPowerCckExt, 4, true);
2306 ath9k_hw_get_legacy_target_powers(ah, chan,
2307 pEepData->calTargetPower2G,
2308 AR5416_NUM_2G_20_TARGET_POWERS,
2309 &targetPowerOfdmExt, 4, true);
2310 }
2311 } else {
2312 numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
2313 SUB_NUM_CTL_MODES_AT_5G_40;
2314 pCtlMode = ctlModesFor11a;
2315
2316 ath9k_hw_get_legacy_target_powers(ah, chan,
2317 pEepData->calTargetPower5G,
2318 AR5416_NUM_5G_20_TARGET_POWERS,
2319 &targetPowerOfdm, 4, false);
2320 ath9k_hw_get_target_powers(ah, chan,
2321 pEepData->calTargetPower5GHT20,
2322 AR5416_NUM_5G_20_TARGET_POWERS,
2323 &targetPowerHt20, 8, false);
2324
2325 if (IS_CHAN_HT40(chan)) {
2326 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
2327 ath9k_hw_get_target_powers(ah, chan,
2328 pEepData->calTargetPower5GHT40,
2329 AR5416_NUM_5G_40_TARGET_POWERS,
2330 &targetPowerHt40, 8, true);
2331 ath9k_hw_get_legacy_target_powers(ah, chan,
2332 pEepData->calTargetPower5G,
2333 AR5416_NUM_5G_20_TARGET_POWERS,
2334 &targetPowerOfdmExt, 4, true);
2335 }
2582 } 2336 }
2583 2337
2584 return -EINVAL; 2338 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
2339 bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
2340 (pCtlMode[ctlMode] == CTL_2GHT40);
2341 if (isHt40CtlMode)
2342 freq = centers.synth_center;
2343 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
2344 freq = centers.ext_center;
2345 else
2346 freq = centers.ctl_center;
2347
2348 if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
2349 ah->eep_ops->get_eeprom_rev(ah) <= 2)
2350 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
2351
2352 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2353 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
2354 "EXT_ADDITIVE %d\n",
2355 ctlMode, numCtlModes, isHt40CtlMode,
2356 (pCtlMode[ctlMode] & EXT_ADDITIVE));
2357
2358 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
2359 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2360 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
2361 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
2362 "chan %d\n",
2363 i, cfgCtl, pCtlMode[ctlMode],
2364 pEepData->ctlIndex[i], chan->channel);
2365
2366 if ((((cfgCtl & ~CTL_MODE_M) |
2367 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
2368 pEepData->ctlIndex[i]) ||
2369 (((cfgCtl & ~CTL_MODE_M) |
2370 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
2371 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
2372 rep = &(pEepData->ctlData[i]);
2373
2374 twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
2375 rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1],
2376 IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
2377
2378 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2379 " MATCH-EE_IDX %d: ch %d is2 %d "
2380 "2xMinEdge %d chainmask %d chains %d\n",
2381 i, freq, IS_CHAN_2GHZ(chan),
2382 twiceMinEdgePower, tx_chainmask,
2383 ar5416_get_ntxchains
2384 (tx_chainmask));
2385 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
2386 twiceMaxEdgePower = min(twiceMaxEdgePower,
2387 twiceMinEdgePower);
2388 } else {
2389 twiceMaxEdgePower = twiceMinEdgePower;
2390 break;
2391 }
2392 }
2393 }
2394
2395 minCtlPower = min(twiceMaxEdgePower, scaledPower);
2396
2397 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2398 " SEL-Min ctlMode %d pCtlMode %d "
2399 "2xMaxEdge %d sP %d minCtlPwr %d\n",
2400 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
2401 scaledPower, minCtlPower);
2402
2403 switch (pCtlMode[ctlMode]) {
2404 case CTL_11B:
2405 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
2406 targetPowerCck.tPow2x[i] =
2407 min((u16)targetPowerCck.tPow2x[i],
2408 minCtlPower);
2409 }
2410 break;
2411 case CTL_11A:
2412 case CTL_11G:
2413 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
2414 targetPowerOfdm.tPow2x[i] =
2415 min((u16)targetPowerOfdm.tPow2x[i],
2416 minCtlPower);
2417 }
2418 break;
2419 case CTL_5GHT20:
2420 case CTL_2GHT20:
2421 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
2422 targetPowerHt20.tPow2x[i] =
2423 min((u16)targetPowerHt20.tPow2x[i],
2424 minCtlPower);
2425 }
2426 break;
2427 case CTL_11B_EXT:
2428 targetPowerCckExt.tPow2x[0] = min((u16)
2429 targetPowerCckExt.tPow2x[0],
2430 minCtlPower);
2431 break;
2432 case CTL_11A_EXT:
2433 case CTL_11G_EXT:
2434 targetPowerOfdmExt.tPow2x[0] = min((u16)
2435 targetPowerOfdmExt.tPow2x[0],
2436 minCtlPower);
2437 break;
2438 case CTL_5GHT40:
2439 case CTL_2GHT40:
2440 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
2441 targetPowerHt40.tPow2x[i] =
2442 min((u16)targetPowerHt40.tPow2x[i],
2443 minCtlPower);
2444 }
2445 break;
2446 default:
2447 break;
2448 }
2449 }
2450
2451 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
2452 ratesArray[rate18mb] = ratesArray[rate24mb] =
2453 targetPowerOfdm.tPow2x[0];
2454 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
2455 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
2456 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
2457 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
2458
2459 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
2460 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
2461
2462 if (IS_CHAN_2GHZ(chan)) {
2463 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
2464 ratesArray[rate2s] = ratesArray[rate2l] =
2465 targetPowerCck.tPow2x[1];
2466 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
2467 targetPowerCck.tPow2x[2];
2468 ;
2469 ratesArray[rate11s] = ratesArray[rate11l] =
2470 targetPowerCck.tPow2x[3];
2471 ;
2472 }
2473 if (IS_CHAN_HT40(chan)) {
2474 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
2475 ratesArray[rateHt40_0 + i] =
2476 targetPowerHt40.tPow2x[i];
2477 }
2478 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
2479 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
2480 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
2481 if (IS_CHAN_2GHZ(chan)) {
2482 ratesArray[rateExtCck] =
2483 targetPowerCckExt.tPow2x[0];
2484 }
2485 }
2486 return true;
2585} 2487}
2586 2488
2587static int ath9k_hw_get_4k_eeprom_antenna_cfg(struct ath_hal *ah, 2489static int ath9k_hw_def_set_txpower(struct ath_hw *ah,
2588 struct ath9k_channel *chan, 2490 struct ath9k_channel *chan,
2589 u8 index, u16 *config) 2491 u16 cfgCtl,
2492 u8 twiceAntennaReduction,
2493 u8 twiceMaxRegulatoryPower,
2494 u8 powerLimit)
2590{ 2495{
2591 struct ath_hal_5416 *ahp = AH5416(ah); 2496 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
2592 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k; 2497 struct modal_eep_header *pModal =
2593 struct modal_eep_4k_header *pModal = &eep->modalHeader; 2498 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
2499 int16_t ratesArray[Ar5416RateSize];
2500 int16_t txPowerIndexOffset = 0;
2501 u8 ht40PowerIncForPdadc = 2;
2502 int i;
2594 2503
2595 switch (index) { 2504 memset(ratesArray, 0, sizeof(ratesArray));
2596 case 0: 2505
2597 *config = pModal->antCtrlCommon & 0xFFFF; 2506 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2598 return 0; 2507 AR5416_EEP_MINOR_VER_2) {
2599 default: 2508 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
2600 break;
2601 } 2509 }
2602 2510
2603 return -EINVAL; 2511 if (!ath9k_hw_set_def_power_per_rate_table(ah, chan,
2604} 2512 &ratesArray[0], cfgCtl,
2513 twiceAntennaReduction,
2514 twiceMaxRegulatoryPower,
2515 powerLimit)) {
2516 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2517 "ath9k_hw_set_txpower: unable to set "
2518 "tx power per rate table\n");
2519 return -EIO;
2520 }
2605 2521
2606static int (*ath9k_get_eeprom_antenna_cfg[])(struct ath_hal *, 2522 if (!ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset)) {
2607 struct ath9k_channel *, 2523 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2608 u8, u16 *) = { 2524 "ath9k_hw_set_txpower: unable to set power table\n");
2609 ath9k_hw_get_def_eeprom_antenna_cfg, 2525 return -EIO;
2610 ath9k_hw_get_4k_eeprom_antenna_cfg 2526 }
2611};
2612 2527
2613int ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal *ah, 2528 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
2614 struct ath9k_channel *chan, 2529 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
2615 u8 index, u16 *config) 2530 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
2616{ 2531 ratesArray[i] = AR5416_MAX_RATE_POWER;
2617 struct ath_hal_5416 *ahp = AH5416(ah); 2532 }
2618 2533
2619 return ath9k_get_eeprom_antenna_cfg[ahp->ah_eep_map](ah, chan, 2534 if (AR_SREV_9280_10_OR_LATER(ah)) {
2620 index, config); 2535 for (i = 0; i < Ar5416RateSize; i++)
2621} 2536 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
2537 }
2622 2538
2623static u8 ath9k_hw_get_4k_num_ant_config(struct ath_hal *ah, 2539 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
2624 enum ieee80211_band freq_band) 2540 ATH9K_POW_SM(ratesArray[rate18mb], 24)
2625{ 2541 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
2626 return 1; 2542 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
2543 | ATH9K_POW_SM(ratesArray[rate6mb], 0));
2544 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
2545 ATH9K_POW_SM(ratesArray[rate54mb], 24)
2546 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
2547 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
2548 | ATH9K_POW_SM(ratesArray[rate24mb], 0));
2549
2550 if (IS_CHAN_2GHZ(chan)) {
2551 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
2552 ATH9K_POW_SM(ratesArray[rate2s], 24)
2553 | ATH9K_POW_SM(ratesArray[rate2l], 16)
2554 | ATH9K_POW_SM(ratesArray[rateXr], 8)
2555 | ATH9K_POW_SM(ratesArray[rate1l], 0));
2556 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
2557 ATH9K_POW_SM(ratesArray[rate11s], 24)
2558 | ATH9K_POW_SM(ratesArray[rate11l], 16)
2559 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
2560 | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
2561 }
2562
2563 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
2564 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
2565 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
2566 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
2567 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
2568 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
2569 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
2570 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
2571 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
2572 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
2573
2574 if (IS_CHAN_HT40(chan)) {
2575 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
2576 ATH9K_POW_SM(ratesArray[rateHt40_3] +
2577 ht40PowerIncForPdadc, 24)
2578 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
2579 ht40PowerIncForPdadc, 16)
2580 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
2581 ht40PowerIncForPdadc, 8)
2582 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
2583 ht40PowerIncForPdadc, 0));
2584 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
2585 ATH9K_POW_SM(ratesArray[rateHt40_7] +
2586 ht40PowerIncForPdadc, 24)
2587 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
2588 ht40PowerIncForPdadc, 16)
2589 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
2590 ht40PowerIncForPdadc, 8)
2591 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
2592 ht40PowerIncForPdadc, 0));
2593
2594 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
2595 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
2596 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
2597 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
2598 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
2599 }
2600
2601 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
2602 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
2603 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
2604
2605 i = rate6mb;
2606
2607 if (IS_CHAN_HT40(chan))
2608 i = rateHt40_0;
2609 else if (IS_CHAN_HT20(chan))
2610 i = rateHt20_0;
2611
2612 if (AR_SREV_9280_10_OR_LATER(ah))
2613 ah->regulatory.max_power_level =
2614 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
2615 else
2616 ah->regulatory.max_power_level = ratesArray[i];
2617
2618 return 0;
2627} 2619}
2628 2620
2629static u8 ath9k_hw_get_def_num_ant_config(struct ath_hal *ah, 2621static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
2630 enum ieee80211_band freq_band) 2622 enum ieee80211_band freq_band)
2631{ 2623{
2632 struct ath_hal_5416 *ahp = AH5416(ah); 2624 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
2633 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2634 struct modal_eep_header *pModal = 2625 struct modal_eep_header *pModal =
2635 &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]); 2626 &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]);
2636 struct base_eep_header *pBase = &eep->baseEepHeader; 2627 struct base_eep_header *pBase = &eep->baseEepHeader;
@@ -2645,180 +2636,75 @@ static u8 ath9k_hw_get_def_num_ant_config(struct ath_hal *ah,
2645 return num_ant_config; 2636 return num_ant_config;
2646} 2637}
2647 2638
2648static u8 (*ath9k_get_num_ant_config[])(struct ath_hal *, 2639static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
2649 enum ieee80211_band) = { 2640 struct ath9k_channel *chan)
2650 ath9k_hw_get_def_num_ant_config,
2651 ath9k_hw_get_4k_num_ant_config
2652};
2653
2654u8 ath9k_hw_get_num_ant_config(struct ath_hal *ah,
2655 enum ieee80211_band freq_band)
2656{ 2641{
2657 struct ath_hal_5416 *ahp = AH5416(ah); 2642 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
2643 struct modal_eep_header *pModal =
2644 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2658 2645
2659 return ath9k_get_num_ant_config[ahp->ah_eep_map](ah, freq_band); 2646 return pModal->antCtrlCommon & 0xFFFF;
2660} 2647}
2661 2648
2662u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah, u16 i, bool is2GHz) 2649u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
2663{ 2650{
2664#define EEP_MAP4K_SPURCHAN \
2665 (ahp->ah_eeprom.map4k.modalHeader.spurChans[i].spurChan)
2666#define EEP_DEF_SPURCHAN \ 2651#define EEP_DEF_SPURCHAN \
2667 (ahp->ah_eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan) 2652 (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
2668 struct ath_hal_5416 *ahp = AH5416(ah); 2653
2669 u16 spur_val = AR_NO_SPUR; 2654 u16 spur_val = AR_NO_SPUR;
2670 2655
2671 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 2656 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2672 "Getting spur idx %d is2Ghz. %d val %x\n", 2657 "Getting spur idx %d is2Ghz. %d val %x\n",
2673 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]); 2658 i, is2GHz, ah->config.spurchans[i][is2GHz]);
2674 2659
2675 switch (ah->ah_config.spurmode) { 2660 switch (ah->config.spurmode) {
2676 case SPUR_DISABLE: 2661 case SPUR_DISABLE:
2677 break; 2662 break;
2678 case SPUR_ENABLE_IOCTL: 2663 case SPUR_ENABLE_IOCTL:
2679 spur_val = ah->ah_config.spurchans[i][is2GHz]; 2664 spur_val = ah->config.spurchans[i][is2GHz];
2680 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 2665 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2681 "Getting spur val from new loc. %d\n", spur_val); 2666 "Getting spur val from new loc. %d\n", spur_val);
2682 break; 2667 break;
2683 case SPUR_ENABLE_EEPROM: 2668 case SPUR_ENABLE_EEPROM:
2684 if (ahp->ah_eep_map == EEP_MAP_4KBITS) 2669 spur_val = EEP_DEF_SPURCHAN;
2685 spur_val = EEP_MAP4K_SPURCHAN;
2686 else
2687 spur_val = EEP_DEF_SPURCHAN;
2688 break; 2670 break;
2689
2690 } 2671 }
2691 2672
2692 return spur_val; 2673 return spur_val;
2693#undef EEP_DEF_SPURCHAN
2694#undef EEP_MAP4K_SPURCHAN
2695}
2696
2697static u32 ath9k_hw_get_eeprom_4k(struct ath_hal *ah,
2698 enum eeprom_param param)
2699{
2700 struct ath_hal_5416 *ahp = AH5416(ah);
2701 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2702 struct modal_eep_4k_header *pModal = &eep->modalHeader;
2703 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
2704
2705 switch (param) {
2706 case EEP_NFTHRESH_2:
2707 return pModal[1].noiseFloorThreshCh[0];
2708 case AR_EEPROM_MAC(0):
2709 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
2710 case AR_EEPROM_MAC(1):
2711 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
2712 case AR_EEPROM_MAC(2):
2713 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
2714 case EEP_REG_0:
2715 return pBase->regDmn[0];
2716 case EEP_REG_1:
2717 return pBase->regDmn[1];
2718 case EEP_OP_CAP:
2719 return pBase->deviceCap;
2720 case EEP_OP_MODE:
2721 return pBase->opCapFlags;
2722 case EEP_RF_SILENT:
2723 return pBase->rfSilent;
2724 case EEP_OB_2:
2725 return pModal->ob_01;
2726 case EEP_DB_2:
2727 return pModal->db1_01;
2728 case EEP_MINOR_REV:
2729 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
2730 case EEP_TX_MASK:
2731 return pBase->txMask;
2732 case EEP_RX_MASK:
2733 return pBase->rxMask;
2734 default:
2735 return 0;
2736 }
2737}
2738
2739static u32 ath9k_hw_get_eeprom_def(struct ath_hal *ah,
2740 enum eeprom_param param)
2741{
2742 struct ath_hal_5416 *ahp = AH5416(ah);
2743 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2744 struct modal_eep_header *pModal = eep->modalHeader;
2745 struct base_eep_header *pBase = &eep->baseEepHeader;
2746
2747 switch (param) {
2748 case EEP_NFTHRESH_5:
2749 return pModal[0].noiseFloorThreshCh[0];
2750 case EEP_NFTHRESH_2:
2751 return pModal[1].noiseFloorThreshCh[0];
2752 case AR_EEPROM_MAC(0):
2753 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
2754 case AR_EEPROM_MAC(1):
2755 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
2756 case AR_EEPROM_MAC(2):
2757 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
2758 case EEP_REG_0:
2759 return pBase->regDmn[0];
2760 case EEP_REG_1:
2761 return pBase->regDmn[1];
2762 case EEP_OP_CAP:
2763 return pBase->deviceCap;
2764 case EEP_OP_MODE:
2765 return pBase->opCapFlags;
2766 case EEP_RF_SILENT:
2767 return pBase->rfSilent;
2768 case EEP_OB_5:
2769 return pModal[0].ob;
2770 case EEP_DB_5:
2771 return pModal[0].db;
2772 case EEP_OB_2:
2773 return pModal[1].ob;
2774 case EEP_DB_2:
2775 return pModal[1].db;
2776 case EEP_MINOR_REV:
2777 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
2778 case EEP_TX_MASK:
2779 return pBase->txMask;
2780 case EEP_RX_MASK:
2781 return pBase->rxMask;
2782 case EEP_RXGAIN_TYPE:
2783 return pBase->rxGainType;
2784 case EEP_TXGAIN_TYPE:
2785 return pBase->txGainType;
2786 2674
2787 default: 2675#undef EEP_DEF_SPURCHAN
2788 return 0;
2789 }
2790} 2676}
2791 2677
2792static u32 (*ath9k_get_eeprom[])(struct ath_hal *, enum eeprom_param) = { 2678struct eeprom_ops eep_def_ops = {
2793 ath9k_hw_get_eeprom_def, 2679 .check_eeprom = ath9k_hw_def_check_eeprom,
2794 ath9k_hw_get_eeprom_4k 2680 .get_eeprom = ath9k_hw_def_get_eeprom,
2681 .fill_eeprom = ath9k_hw_def_fill_eeprom,
2682 .get_eeprom_ver = ath9k_hw_def_get_eeprom_ver,
2683 .get_eeprom_rev = ath9k_hw_def_get_eeprom_rev,
2684 .get_num_ant_config = ath9k_hw_def_get_num_ant_config,
2685 .get_eeprom_antenna_cfg = ath9k_hw_def_get_eeprom_antenna_cfg,
2686 .set_board_values = ath9k_hw_def_set_board_values,
2687 .set_addac = ath9k_hw_def_set_addac,
2688 .set_txpower = ath9k_hw_def_set_txpower,
2689 .get_spur_channel = ath9k_hw_def_get_spur_channel
2795}; 2690};
2796 2691
2797u32 ath9k_hw_get_eeprom(struct ath_hal *ah, 2692int ath9k_hw_eeprom_attach(struct ath_hw *ah)
2798 enum eeprom_param param)
2799{
2800 struct ath_hal_5416 *ahp = AH5416(ah);
2801
2802 return ath9k_get_eeprom[ahp->ah_eep_map](ah, param);
2803}
2804
2805int ath9k_hw_eeprom_attach(struct ath_hal *ah)
2806{ 2693{
2807 int status; 2694 int status;
2808 struct ath_hal_5416 *ahp = AH5416(ah);
2809
2810 if (ath9k_hw_use_flash(ah))
2811 ath9k_hw_flash_map(ah);
2812 2695
2813 if (AR_SREV_9285(ah)) 2696 if (AR_SREV_9285(ah)) {
2814 ahp->ah_eep_map = EEP_MAP_4KBITS; 2697 ah->eep_map = EEP_MAP_4KBITS;
2815 else 2698 ah->eep_ops = &eep_4k_ops;
2816 ahp->ah_eep_map = EEP_MAP_DEFAULT; 2699 } else {
2700 ah->eep_map = EEP_MAP_DEFAULT;
2701 ah->eep_ops = &eep_def_ops;
2702 }
2817 2703
2818 if (!ath9k_hw_fill_eeprom(ah)) 2704 if (!ah->eep_ops->fill_eeprom(ah))
2819 return -EIO; 2705 return -EIO;
2820 2706
2821 status = ath9k_hw_check_eeprom(ah); 2707 status = ah->eep_ops->check_eeprom(ah);
2822 2708
2823 return status; 2709 return status;
2824} 2710}
diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h
new file mode 100644
index 00000000000..99863b57044
--- /dev/null
+++ b/drivers/net/wireless/ath9k/eeprom.h
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef EEPROM_H
18#define EEPROM_H
19
20#define AH_USE_EEPROM 0x1
21
22#ifdef __BIG_ENDIAN
23#define AR5416_EEPROM_MAGIC 0x5aa5
24#else
25#define AR5416_EEPROM_MAGIC 0xa55a
26#endif
27
28#define CTRY_DEBUG 0x1ff
29#define CTRY_DEFAULT 0
30
31#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
32#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
33#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
34#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
35#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
36#define AR_EEPROM_EEPCAP_MAXQCU_S 4
37#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
38#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
39#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
40
41#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
42#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
43#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
44#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
45#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
46#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
47
48#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
49#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
50
51#define AR5416_EEPROM_MAGIC_OFFSET 0x0
52#define AR5416_EEPROM_S 2
53#define AR5416_EEPROM_OFFSET 0x2000
54#define AR5416_EEPROM_MAX 0xae0
55
56#define AR5416_EEPROM_START_ADDR \
57 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
58
59#define SD_NO_CTL 0xE0
60#define NO_CTL 0xff
61#define CTL_MODE_M 7
62#define CTL_11A 0
63#define CTL_11B 1
64#define CTL_11G 2
65#define CTL_2GHT20 5
66#define CTL_5GHT20 6
67#define CTL_2GHT40 7
68#define CTL_5GHT40 8
69
70#define EXT_ADDITIVE (0x8000)
71#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
72#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
73#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
74
75#define SUB_NUM_CTL_MODES_AT_5G_40 2
76#define SUB_NUM_CTL_MODES_AT_2G_40 3
77
78#define AR_EEPROM_MAC(i) (0x1d+(i))
79#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
80#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
81#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
82
83#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
84#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
85#define AR_EEPROM_RFSILENT_POLARITY 0x0002
86#define AR_EEPROM_RFSILENT_POLARITY_S 1
87
88#define EEP_RFSILENT_ENABLED 0x0001
89#define EEP_RFSILENT_ENABLED_S 0
90#define EEP_RFSILENT_POLARITY 0x0002
91#define EEP_RFSILENT_POLARITY_S 1
92#define EEP_RFSILENT_GPIO_SEL 0x001c
93#define EEP_RFSILENT_GPIO_SEL_S 2
94
95#define AR5416_OPFLAGS_11A 0x01
96#define AR5416_OPFLAGS_11G 0x02
97#define AR5416_OPFLAGS_N_5G_HT40 0x04
98#define AR5416_OPFLAGS_N_2G_HT40 0x08
99#define AR5416_OPFLAGS_N_5G_HT20 0x10
100#define AR5416_OPFLAGS_N_2G_HT20 0x20
101
102#define AR5416_EEP_NO_BACK_VER 0x1
103#define AR5416_EEP_VER 0xE
104#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
105#define AR5416_EEP_MINOR_VER_2 0x2
106#define AR5416_EEP_MINOR_VER_3 0x3
107#define AR5416_EEP_MINOR_VER_7 0x7
108#define AR5416_EEP_MINOR_VER_9 0x9
109#define AR5416_EEP_MINOR_VER_16 0x10
110#define AR5416_EEP_MINOR_VER_17 0x11
111#define AR5416_EEP_MINOR_VER_19 0x13
112#define AR5416_EEP_MINOR_VER_20 0x14
113
114#define AR5416_NUM_5G_CAL_PIERS 8
115#define AR5416_NUM_2G_CAL_PIERS 4
116#define AR5416_NUM_5G_20_TARGET_POWERS 8
117#define AR5416_NUM_5G_40_TARGET_POWERS 8
118#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
119#define AR5416_NUM_2G_20_TARGET_POWERS 4
120#define AR5416_NUM_2G_40_TARGET_POWERS 4
121#define AR5416_NUM_CTLS 24
122#define AR5416_NUM_BAND_EDGES 8
123#define AR5416_NUM_PD_GAINS 4
124#define AR5416_PD_GAINS_IN_MASK 4
125#define AR5416_PD_GAIN_ICEPTS 5
126#define AR5416_EEPROM_MODAL_SPURS 5
127#define AR5416_MAX_RATE_POWER 63
128#define AR5416_NUM_PDADC_VALUES 128
129#define AR5416_BCHAN_UNUSED 0xFF
130#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
131#define AR5416_MAX_CHAINS 3
132#define AR5416_PWR_TABLE_OFFSET -5
133
134/* Rx gain type values */
135#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
136#define AR5416_EEP_RXGAIN_13DB_BACKOFF 1
137#define AR5416_EEP_RXGAIN_ORIG 2
138
139/* Tx gain type values */
140#define AR5416_EEP_TXGAIN_ORIGINAL 0
141#define AR5416_EEP_TXGAIN_HIGH_POWER 1
142
143#define AR5416_EEP4K_START_LOC 64
144#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3
145#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
146#define AR5416_EEP4K_NUM_2G_20_TARGET_POWERS 3
147#define AR5416_EEP4K_NUM_2G_40_TARGET_POWERS 3
148#define AR5416_EEP4K_NUM_CTLS 12
149#define AR5416_EEP4K_NUM_BAND_EDGES 4
150#define AR5416_EEP4K_NUM_PD_GAINS 2
151#define AR5416_EEP4K_PD_GAINS_IN_MASK 4
152#define AR5416_EEP4K_PD_GAIN_ICEPTS 5
153#define AR5416_EEP4K_MAX_CHAINS 1
154
155enum eeprom_param {
156 EEP_NFTHRESH_5,
157 EEP_NFTHRESH_2,
158 EEP_MAC_MSW,
159 EEP_MAC_MID,
160 EEP_MAC_LSW,
161 EEP_REG_0,
162 EEP_REG_1,
163 EEP_OP_CAP,
164 EEP_OP_MODE,
165 EEP_RF_SILENT,
166 EEP_OB_5,
167 EEP_DB_5,
168 EEP_OB_2,
169 EEP_DB_2,
170 EEP_MINOR_REV,
171 EEP_TX_MASK,
172 EEP_RX_MASK,
173 EEP_RXGAIN_TYPE,
174 EEP_TXGAIN_TYPE,
175 EEP_DAC_HPWR_5G,
176};
177
178enum ar5416_rates {
179 rate6mb, rate9mb, rate12mb, rate18mb,
180 rate24mb, rate36mb, rate48mb, rate54mb,
181 rate1l, rate2l, rate2s, rate5_5l,
182 rate5_5s, rate11l, rate11s, rateXr,
183 rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
184 rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
185 rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
186 rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
187 rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
188 Ar5416RateSize
189};
190
191enum ath9k_hal_freq_band {
192 ATH9K_HAL_FREQ_BAND_5GHZ = 0,
193 ATH9K_HAL_FREQ_BAND_2GHZ = 1
194};
195
196struct base_eep_header {
197 u16 length;
198 u16 checksum;
199 u16 version;
200 u8 opCapFlags;
201 u8 eepMisc;
202 u16 regDmn[2];
203 u8 macAddr[6];
204 u8 rxMask;
205 u8 txMask;
206 u16 rfSilent;
207 u16 blueToothOptions;
208 u16 deviceCap;
209 u32 binBuildNumber;
210 u8 deviceType;
211 u8 pwdclkind;
212 u8 futureBase_1[2];
213 u8 rxGainType;
214 u8 dacHiPwrMode_5G;
215 u8 futureBase_2;
216 u8 dacLpMode;
217 u8 txGainType;
218 u8 rcChainMask;
219 u8 desiredScaleCCK;
220 u8 futureBase_3[23];
221} __packed;
222
223struct base_eep_header_4k {
224 u16 length;
225 u16 checksum;
226 u16 version;
227 u8 opCapFlags;
228 u8 eepMisc;
229 u16 regDmn[2];
230 u8 macAddr[6];
231 u8 rxMask;
232 u8 txMask;
233 u16 rfSilent;
234 u16 blueToothOptions;
235 u16 deviceCap;
236 u32 binBuildNumber;
237 u8 deviceType;
238 u8 futureBase[1];
239} __packed;
240
241
242struct spur_chan {
243 u16 spurChan;
244 u8 spurRangeLow;
245 u8 spurRangeHigh;
246} __packed;
247
248struct modal_eep_header {
249 u32 antCtrlChain[AR5416_MAX_CHAINS];
250 u32 antCtrlCommon;
251 u8 antennaGainCh[AR5416_MAX_CHAINS];
252 u8 switchSettling;
253 u8 txRxAttenCh[AR5416_MAX_CHAINS];
254 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
255 u8 adcDesiredSize;
256 u8 pgaDesiredSize;
257 u8 xlnaGainCh[AR5416_MAX_CHAINS];
258 u8 txEndToXpaOff;
259 u8 txEndToRxOn;
260 u8 txFrameToXpaOn;
261 u8 thresh62;
262 u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
263 u8 xpdGain;
264 u8 xpd;
265 u8 iqCalICh[AR5416_MAX_CHAINS];
266 u8 iqCalQCh[AR5416_MAX_CHAINS];
267 u8 pdGainOverlap;
268 u8 ob;
269 u8 db;
270 u8 xpaBiasLvl;
271 u8 pwrDecreaseFor2Chain;
272 u8 pwrDecreaseFor3Chain;
273 u8 txFrameToDataStart;
274 u8 txFrameToPaOn;
275 u8 ht40PowerIncForPdadc;
276 u8 bswAtten[AR5416_MAX_CHAINS];
277 u8 bswMargin[AR5416_MAX_CHAINS];
278 u8 swSettleHt40;
279 u8 xatten2Db[AR5416_MAX_CHAINS];
280 u8 xatten2Margin[AR5416_MAX_CHAINS];
281 u8 ob_ch1;
282 u8 db_ch1;
283 u8 useAnt1:1,
284 force_xpaon:1,
285 local_bias:1,
286 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
287 u8 miscBits;
288 u16 xpaBiasLvlFreq[3];
289 u8 futureModal[6];
290
291 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
292} __packed;
293
294struct modal_eep_4k_header {
295 u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
296 u32 antCtrlCommon;
297 u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
298 u8 switchSettling;
299 u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
300 u8 rxTxMarginCh[AR5416_EEP4K_MAX_CHAINS];
301 u8 adcDesiredSize;
302 u8 pgaDesiredSize;
303 u8 xlnaGainCh[AR5416_EEP4K_MAX_CHAINS];
304 u8 txEndToXpaOff;
305 u8 txEndToRxOn;
306 u8 txFrameToXpaOn;
307 u8 thresh62;
308 u8 noiseFloorThreshCh[AR5416_EEP4K_MAX_CHAINS];
309 u8 xpdGain;
310 u8 xpd;
311 u8 iqCalICh[AR5416_EEP4K_MAX_CHAINS];
312 u8 iqCalQCh[AR5416_EEP4K_MAX_CHAINS];
313 u8 pdGainOverlap;
314 u8 ob_01;
315 u8 db1_01;
316 u8 xpaBiasLvl;
317 u8 txFrameToDataStart;
318 u8 txFrameToPaOn;
319 u8 ht40PowerIncForPdadc;
320 u8 bswAtten[AR5416_EEP4K_MAX_CHAINS];
321 u8 bswMargin[AR5416_EEP4K_MAX_CHAINS];
322 u8 swSettleHt40;
323 u8 xatten2Db[AR5416_EEP4K_MAX_CHAINS];
324 u8 xatten2Margin[AR5416_EEP4K_MAX_CHAINS];
325 u8 db2_01;
326 u8 version;
327 u16 ob_234;
328 u16 db1_234;
329 u16 db2_234;
330 u8 futureModal[4];
331
332 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
333} __packed;
334
335
336struct cal_data_per_freq {
337 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
338 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
339} __packed;
340
341struct cal_data_per_freq_4k {
342 u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
343 u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
344} __packed;
345
346struct cal_target_power_leg {
347 u8 bChannel;
348 u8 tPow2x[4];
349} __packed;
350
351struct cal_target_power_ht {
352 u8 bChannel;
353 u8 tPow2x[8];
354} __packed;
355
356
357#ifdef __BIG_ENDIAN_BITFIELD
358struct cal_ctl_edges {
359 u8 bChannel;
360 u8 flag:2, tPower:6;
361} __packed;
362#else
363struct cal_ctl_edges {
364 u8 bChannel;
365 u8 tPower:6, flag:2;
366} __packed;
367#endif
368
369struct cal_ctl_data {
370 struct cal_ctl_edges
371 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
372} __packed;
373
374struct cal_ctl_data_4k {
375 struct cal_ctl_edges
376 ctlEdges[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_BAND_EDGES];
377} __packed;
378
379struct ar5416_eeprom_def {
380 struct base_eep_header baseEepHeader;
381 u8 custData[64];
382 struct modal_eep_header modalHeader[2];
383 u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
384 u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
385 struct cal_data_per_freq
386 calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
387 struct cal_data_per_freq
388 calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
389 struct cal_target_power_leg
390 calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
391 struct cal_target_power_ht
392 calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
393 struct cal_target_power_ht
394 calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
395 struct cal_target_power_leg
396 calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
397 struct cal_target_power_leg
398 calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
399 struct cal_target_power_ht
400 calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
401 struct cal_target_power_ht
402 calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
403 u8 ctlIndex[AR5416_NUM_CTLS];
404 struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
405 u8 padding;
406} __packed;
407
408struct ar5416_eeprom_4k {
409 struct base_eep_header_4k baseEepHeader;
410 u8 custData[20];
411 struct modal_eep_4k_header modalHeader;
412 u8 calFreqPier2G[AR5416_EEP4K_NUM_2G_CAL_PIERS];
413 struct cal_data_per_freq_4k
414 calPierData2G[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_2G_CAL_PIERS];
415 struct cal_target_power_leg
416 calTargetPowerCck[AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS];
417 struct cal_target_power_leg
418 calTargetPower2G[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
419 struct cal_target_power_ht
420 calTargetPower2GHT20[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
421 struct cal_target_power_ht
422 calTargetPower2GHT40[AR5416_EEP4K_NUM_2G_40_TARGET_POWERS];
423 u8 ctlIndex[AR5416_EEP4K_NUM_CTLS];
424 struct cal_ctl_data_4k ctlData[AR5416_EEP4K_NUM_CTLS];
425 u8 padding;
426} __packed;
427
428enum reg_ext_bitmap {
429 REG_EXT_JAPAN_MIDBAND = 1,
430 REG_EXT_FCC_DFS_HT40 = 2,
431 REG_EXT_JAPAN_NONDFS_HT40 = 3,
432 REG_EXT_JAPAN_DFS_HT40 = 4
433};
434
435struct ath9k_country_entry {
436 u16 countryCode;
437 u16 regDmnEnum;
438 u16 regDmn5G;
439 u16 regDmn2G;
440 u8 isMultidomain;
441 u8 iso[3];
442};
443
444enum ath9k_eep_map {
445 EEP_MAP_DEFAULT = 0x0,
446 EEP_MAP_4KBITS,
447 EEP_MAP_MAX
448};
449
450struct eeprom_ops {
451 int (*check_eeprom)(struct ath_hw *hw);
452 u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
453 bool (*fill_eeprom)(struct ath_hw *hw);
454 int (*get_eeprom_ver)(struct ath_hw *hw);
455 int (*get_eeprom_rev)(struct ath_hw *hw);
456 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
457 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
458 struct ath9k_channel *chan);
459 bool (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
460 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
461 int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
462 u16 cfgCtl, u8 twiceAntennaReduction,
463 u8 twiceMaxRegulatoryPower, u8 powerLimit);
464 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
465};
466
467#define ar5416_get_ntxchains(_txchainmask) \
468 (((_txchainmask >> 2) & 1) + \
469 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
470
471int ath9k_hw_eeprom_attach(struct ath_hw *ah);
472
473#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 34474edefc9..cad8e39c201 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -17,84 +17,74 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19 19
20#include "core.h" 20#include "ath9k.h"
21#include "hw.h"
22#include "reg.h"
23#include "phy.h"
24#include "initvals.h" 21#include "initvals.h"
25 22
26static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 }; 23static int btcoex_enable;
24module_param(btcoex_enable, bool, 0);
25MODULE_PARM_DESC(btcoex_enable, "Enable Bluetooth coexistence support");
27 26
28extern struct hal_percal_data iq_cal_multi_sample; 27#define ATH9K_CLOCK_RATE_CCK 22
29extern struct hal_percal_data iq_cal_single_sample; 28#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
30extern struct hal_percal_data adc_gain_cal_multi_sample; 29#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
31extern struct hal_percal_data adc_gain_cal_single_sample;
32extern struct hal_percal_data adc_dc_cal_multi_sample;
33extern struct hal_percal_data adc_dc_cal_single_sample;
34extern struct hal_percal_data adc_init_dc_cal;
35 30
36static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, u32 type); 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
37static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan, 32static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
38 enum ath9k_ht_macmode macmode); 33 enum ath9k_ht_macmode macmode);
39static u32 ath9k_hw_ini_fixup(struct ath_hal *ah, 34static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
40 struct ar5416_eeprom_def *pEepData, 35 struct ar5416_eeprom_def *pEepData,
41 u32 reg, u32 value); 36 u32 reg, u32 value);
42static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan); 37static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
43static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan); 38static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
44 39
45/********************/ 40/********************/
46/* Helper Functions */ 41/* Helper Functions */
47/********************/ 42/********************/
48 43
49static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks) 44static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
50{ 45{
51 if (ah->ah_curchan != NULL) 46 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
52 return clks / CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)]; 47
53 else 48 if (!ah->curchan) /* should really check for CCK instead */
54 return clks / CLOCK_RATE[ATH9K_MODE_11B]; 49 return clks / ATH9K_CLOCK_RATE_CCK;
50 if (conf->channel->band == IEEE80211_BAND_2GHZ)
51 return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
52
53 return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
55} 54}
56 55
57static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks) 56static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
58{ 57{
59 struct ath9k_channel *chan = ah->ah_curchan; 58 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
60 59
61 if (chan && IS_CHAN_HT40(chan)) 60 if (conf_is_ht40(conf))
62 return ath9k_hw_mac_usec(ah, clks) / 2; 61 return ath9k_hw_mac_usec(ah, clks) / 2;
63 else 62 else
64 return ath9k_hw_mac_usec(ah, clks); 63 return ath9k_hw_mac_usec(ah, clks);
65} 64}
66 65
67static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs) 66static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
68{ 67{
69 if (ah->ah_curchan != NULL) 68 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
70 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah, 69
71 ah->ah_curchan)]; 70 if (!ah->curchan) /* should really check for CCK instead */
72 else 71 return usecs *ATH9K_CLOCK_RATE_CCK;
73 return usecs * CLOCK_RATE[ATH9K_MODE_11B]; 72 if (conf->channel->band == IEEE80211_BAND_2GHZ)
73 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
74 return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
74} 75}
75 76
76static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs) 77static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
77{ 78{
78 struct ath9k_channel *chan = ah->ah_curchan; 79 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
79 80
80 if (chan && IS_CHAN_HT40(chan)) 81 if (conf_is_ht40(conf))
81 return ath9k_hw_mac_clks(ah, usecs) * 2; 82 return ath9k_hw_mac_clks(ah, usecs) * 2;
82 else 83 else
83 return ath9k_hw_mac_clks(ah, usecs); 84 return ath9k_hw_mac_clks(ah, usecs);
84} 85}
85 86
86enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah, 87bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val)
87 const struct ath9k_channel *chan)
88{
89 if (IS_CHAN_B(chan))
90 return ATH9K_MODE_11B;
91 if (IS_CHAN_G(chan))
92 return ATH9K_MODE_11G;
93
94 return ATH9K_MODE_11A;
95}
96
97bool ath9k_hw_wait(struct ath_hal *ah, u32 reg, u32 mask, u32 val)
98{ 88{
99 int i; 89 int i;
100 90
@@ -124,11 +114,11 @@ u32 ath9k_hw_reverse_bits(u32 val, u32 n)
124 return retval; 114 return retval;
125} 115}
126 116
127bool ath9k_get_channel_edges(struct ath_hal *ah, 117bool ath9k_get_channel_edges(struct ath_hw *ah,
128 u16 flags, u16 *low, 118 u16 flags, u16 *low,
129 u16 *high) 119 u16 *high)
130{ 120{
131 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 121 struct ath9k_hw_capabilities *pCap = &ah->caps;
132 122
133 if (flags & CHANNEL_5GHZ) { 123 if (flags & CHANNEL_5GHZ) {
134 *low = pCap->low_5ghz_chan; 124 *low = pCap->low_5ghz_chan;
@@ -143,7 +133,7 @@ bool ath9k_get_channel_edges(struct ath_hal *ah,
143 return false; 133 return false;
144} 134}
145 135
146u16 ath9k_hw_computetxtime(struct ath_hal *ah, 136u16 ath9k_hw_computetxtime(struct ath_hw *ah,
147 struct ath_rate_table *rates, 137 struct ath_rate_table *rates,
148 u32 frameLen, u16 rateix, 138 u32 frameLen, u16 rateix,
149 bool shortPreamble) 139 bool shortPreamble)
@@ -165,15 +155,15 @@ u16 ath9k_hw_computetxtime(struct ath_hal *ah,
165 txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); 155 txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
166 break; 156 break;
167 case WLAN_RC_PHY_OFDM: 157 case WLAN_RC_PHY_OFDM:
168 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) { 158 if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) {
169 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; 159 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
170 numBits = OFDM_PLCP_BITS + (frameLen << 3); 160 numBits = OFDM_PLCP_BITS + (frameLen << 3);
171 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); 161 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
172 txTime = OFDM_SIFS_TIME_QUARTER 162 txTime = OFDM_SIFS_TIME_QUARTER
173 + OFDM_PREAMBLE_TIME_QUARTER 163 + OFDM_PREAMBLE_TIME_QUARTER
174 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); 164 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
175 } else if (ah->ah_curchan && 165 } else if (ah->curchan &&
176 IS_CHAN_HALF_RATE(ah->ah_curchan)) { 166 IS_CHAN_HALF_RATE(ah->curchan)) {
177 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; 167 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
178 numBits = OFDM_PLCP_BITS + (frameLen << 3); 168 numBits = OFDM_PLCP_BITS + (frameLen << 3);
179 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); 169 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
@@ -199,52 +189,11 @@ u16 ath9k_hw_computetxtime(struct ath_hal *ah,
199 return txTime; 189 return txTime;
200} 190}
201 191
202u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags) 192void ath9k_hw_get_channel_centers(struct ath_hw *ah,
203{
204 if (flags & CHANNEL_2GHZ) {
205 if (freq == 2484)
206 return 14;
207 if (freq < 2484)
208 return (freq - 2407) / 5;
209 else
210 return 15 + ((freq - 2512) / 20);
211 } else if (flags & CHANNEL_5GHZ) {
212 if (ath9k_regd_is_public_safety_sku(ah) &&
213 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
214 return ((freq * 10) +
215 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
216 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
217 return (freq - 4000) / 5;
218 } else {
219 return (freq - 5000) / 5;
220 }
221 } else {
222 if (freq == 2484)
223 return 14;
224 if (freq < 2484)
225 return (freq - 2407) / 5;
226 if (freq < 5000) {
227 if (ath9k_regd_is_public_safety_sku(ah)
228 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
229 return ((freq * 10) +
230 (((freq % 5) ==
231 2) ? 5 : 0) - 49400) / 5;
232 } else if (freq > 4900) {
233 return (freq - 4000) / 5;
234 } else {
235 return 15 + ((freq - 2512) / 20);
236 }
237 }
238 return (freq - 5000) / 5;
239 }
240}
241
242void ath9k_hw_get_channel_centers(struct ath_hal *ah,
243 struct ath9k_channel *chan, 193 struct ath9k_channel *chan,
244 struct chan_centers *centers) 194 struct chan_centers *centers)
245{ 195{
246 int8_t extoff; 196 int8_t extoff;
247 struct ath_hal_5416 *ahp = AH5416(ah);
248 197
249 if (!IS_CHAN_HT40(chan)) { 198 if (!IS_CHAN_HT40(chan)) {
250 centers->ctl_center = centers->ext_center = 199 centers->ctl_center = centers->ext_center =
@@ -267,16 +216,15 @@ void ath9k_hw_get_channel_centers(struct ath_hal *ah,
267 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); 216 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
268 centers->ext_center = 217 centers->ext_center =
269 centers->synth_center + (extoff * 218 centers->synth_center + (extoff *
270 ((ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ? 219 ((ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ?
271 HT40_CHANNEL_CENTER_SHIFT : 15)); 220 HT40_CHANNEL_CENTER_SHIFT : 15));
272
273} 221}
274 222
275/******************/ 223/******************/
276/* Chip Revisions */ 224/* Chip Revisions */
277/******************/ 225/******************/
278 226
279static void ath9k_hw_read_revisions(struct ath_hal *ah) 227static void ath9k_hw_read_revisions(struct ath_hw *ah)
280{ 228{
281 u32 val; 229 u32 val;
282 230
@@ -284,21 +232,22 @@ static void ath9k_hw_read_revisions(struct ath_hal *ah)
284 232
285 if (val == 0xFF) { 233 if (val == 0xFF) {
286 val = REG_READ(ah, AR_SREV); 234 val = REG_READ(ah, AR_SREV);
287 ah->ah_macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; 235 ah->hw_version.macVersion =
288 ah->ah_macRev = MS(val, AR_SREV_REVISION2); 236 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
289 ah->ah_isPciExpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; 237 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
238 ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
290 } else { 239 } else {
291 if (!AR_SREV_9100(ah)) 240 if (!AR_SREV_9100(ah))
292 ah->ah_macVersion = MS(val, AR_SREV_VERSION); 241 ah->hw_version.macVersion = MS(val, AR_SREV_VERSION);
293 242
294 ah->ah_macRev = val & AR_SREV_REVISION; 243 ah->hw_version.macRev = val & AR_SREV_REVISION;
295 244
296 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) 245 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
297 ah->ah_isPciExpress = true; 246 ah->is_pciexpress = true;
298 } 247 }
299} 248}
300 249
301static int ath9k_hw_get_radiorev(struct ath_hal *ah) 250static int ath9k_hw_get_radiorev(struct ath_hw *ah)
302{ 251{
303 u32 val; 252 u32 val;
304 int i; 253 int i;
@@ -317,9 +266,9 @@ static int ath9k_hw_get_radiorev(struct ath_hal *ah)
317/* HW Attach, Detach, Init Routines */ 266/* HW Attach, Detach, Init Routines */
318/************************************/ 267/************************************/
319 268
320static void ath9k_hw_disablepcie(struct ath_hal *ah) 269static void ath9k_hw_disablepcie(struct ath_hw *ah)
321{ 270{
322 if (!AR_SREV_9100(ah)) 271 if (AR_SREV_9100(ah))
323 return; 272 return;
324 273
325 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 274 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
@@ -335,7 +284,7 @@ static void ath9k_hw_disablepcie(struct ath_hal *ah)
335 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 284 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
336} 285}
337 286
338static bool ath9k_hw_chip_test(struct ath_hal *ah) 287static bool ath9k_hw_chip_test(struct ath_hw *ah)
339{ 288{
340 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 289 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
341 u32 regHold[2]; 290 u32 regHold[2];
@@ -377,6 +326,7 @@ static bool ath9k_hw_chip_test(struct ath_hal *ah)
377 REG_WRITE(ah, regAddr[i], regHold[i]); 326 REG_WRITE(ah, regAddr[i], regHold[i]);
378 } 327 }
379 udelay(100); 328 udelay(100);
329
380 return true; 330 return true;
381} 331}
382 332
@@ -389,6 +339,8 @@ static const char *ath9k_hw_devname(u16 devid)
389 return "Atheros 5418"; 339 return "Atheros 5418";
390 case AR9160_DEVID_PCI: 340 case AR9160_DEVID_PCI:
391 return "Atheros 9160"; 341 return "Atheros 9160";
342 case AR5416_AR9100_DEVID:
343 return "Atheros 9100";
392 case AR9280_DEVID_PCI: 344 case AR9280_DEVID_PCI:
393 case AR9280_DEVID_PCIE: 345 case AR9280_DEVID_PCIE:
394 return "Atheros 9280"; 346 return "Atheros 9280";
@@ -399,99 +351,91 @@ static const char *ath9k_hw_devname(u16 devid)
399 return NULL; 351 return NULL;
400} 352}
401 353
402static void ath9k_hw_set_defaults(struct ath_hal *ah) 354static void ath9k_hw_set_defaults(struct ath_hw *ah)
403{ 355{
404 int i; 356 int i;
405 357
406 ah->ah_config.dma_beacon_response_time = 2; 358 ah->config.dma_beacon_response_time = 2;
407 ah->ah_config.sw_beacon_response_time = 10; 359 ah->config.sw_beacon_response_time = 10;
408 ah->ah_config.additional_swba_backoff = 0; 360 ah->config.additional_swba_backoff = 0;
409 ah->ah_config.ack_6mb = 0x0; 361 ah->config.ack_6mb = 0x0;
410 ah->ah_config.cwm_ignore_extcca = 0; 362 ah->config.cwm_ignore_extcca = 0;
411 ah->ah_config.pcie_powersave_enable = 0; 363 ah->config.pcie_powersave_enable = 0;
412 ah->ah_config.pcie_l1skp_enable = 0; 364 ah->config.pcie_l1skp_enable = 0;
413 ah->ah_config.pcie_clock_req = 0; 365 ah->config.pcie_clock_req = 0;
414 ah->ah_config.pcie_power_reset = 0x100; 366 ah->config.pcie_power_reset = 0x100;
415 ah->ah_config.pcie_restore = 0; 367 ah->config.pcie_restore = 0;
416 ah->ah_config.pcie_waen = 0; 368 ah->config.pcie_waen = 0;
417 ah->ah_config.analog_shiftreg = 1; 369 ah->config.analog_shiftreg = 1;
418 ah->ah_config.ht_enable = 1; 370 ah->config.ht_enable = 1;
419 ah->ah_config.ofdm_trig_low = 200; 371 ah->config.ofdm_trig_low = 200;
420 ah->ah_config.ofdm_trig_high = 500; 372 ah->config.ofdm_trig_high = 500;
421 ah->ah_config.cck_trig_high = 200; 373 ah->config.cck_trig_high = 200;
422 ah->ah_config.cck_trig_low = 100; 374 ah->config.cck_trig_low = 100;
423 ah->ah_config.enable_ani = 1; 375 ah->config.enable_ani = 1;
424 ah->ah_config.noise_immunity_level = 4; 376 ah->config.noise_immunity_level = 4;
425 ah->ah_config.ofdm_weaksignal_det = 1; 377 ah->config.ofdm_weaksignal_det = 1;
426 ah->ah_config.cck_weaksignal_thr = 0; 378 ah->config.cck_weaksignal_thr = 0;
427 ah->ah_config.spur_immunity_level = 2; 379 ah->config.spur_immunity_level = 2;
428 ah->ah_config.firstep_level = 0; 380 ah->config.firstep_level = 0;
429 ah->ah_config.rssi_thr_high = 40; 381 ah->config.rssi_thr_high = 40;
430 ah->ah_config.rssi_thr_low = 7; 382 ah->config.rssi_thr_low = 7;
431 ah->ah_config.diversity_control = 0; 383 ah->config.diversity_control = 0;
432 ah->ah_config.antenna_switch_swap = 0; 384 ah->config.antenna_switch_swap = 0;
433 385
434 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 386 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
435 ah->ah_config.spurchans[i][0] = AR_NO_SPUR; 387 ah->config.spurchans[i][0] = AR_NO_SPUR;
436 ah->ah_config.spurchans[i][1] = AR_NO_SPUR; 388 ah->config.spurchans[i][1] = AR_NO_SPUR;
437 } 389 }
438 390
439 ah->ah_config.intr_mitigation = 1; 391 ah->config.intr_mitigation = 1;
440} 392}
441 393
442static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid, 394static struct ath_hw *ath9k_hw_newstate(u16 devid, struct ath_softc *sc,
443 struct ath_softc *sc, 395 int *status)
444 void __iomem *mem,
445 int *status)
446{ 396{
447 static const u8 defbssidmask[ETH_ALEN] = 397 struct ath_hw *ah;
448 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
449 struct ath_hal_5416 *ahp;
450 struct ath_hal *ah;
451 398
452 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL); 399 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
453 if (ahp == NULL) { 400 if (ah == NULL) {
454 DPRINTF(sc, ATH_DBG_FATAL, 401 DPRINTF(sc, ATH_DBG_FATAL,
455 "Cannot allocate memory for state block\n"); 402 "Cannot allocate memory for state block\n");
456 *status = -ENOMEM; 403 *status = -ENOMEM;
457 return NULL; 404 return NULL;
458 } 405 }
459 406
460 ah = &ahp->ah;
461 ah->ah_sc = sc; 407 ah->ah_sc = sc;
462 ah->ah_sh = mem; 408 ah->hw_version.magic = AR5416_MAGIC;
463 ah->ah_magic = AR5416_MAGIC; 409 ah->regulatory.country_code = CTRY_DEFAULT;
464 ah->ah_countryCode = CTRY_DEFAULT; 410 ah->hw_version.devid = devid;
465 ah->ah_devid = devid; 411 ah->hw_version.subvendorid = 0;
466 ah->ah_subvendorid = 0;
467 412
468 ah->ah_flags = 0; 413 ah->ah_flags = 0;
469 if ((devid == AR5416_AR9100_DEVID)) 414 if ((devid == AR5416_AR9100_DEVID))
470 ah->ah_macVersion = AR_SREV_VERSION_9100; 415 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
471 if (!AR_SREV_9100(ah)) 416 if (!AR_SREV_9100(ah))
472 ah->ah_flags = AH_USE_EEPROM; 417 ah->ah_flags = AH_USE_EEPROM;
473 418
474 ah->ah_powerLimit = MAX_RATE_POWER; 419 ah->regulatory.power_limit = MAX_RATE_POWER;
475 ah->ah_tpScale = ATH9K_TP_SCALE_MAX; 420 ah->regulatory.tp_scale = ATH9K_TP_SCALE_MAX;
476 ahp->ah_atimWindow = 0; 421 ah->atim_window = 0;
477 ahp->ah_diversityControl = ah->ah_config.diversity_control; 422 ah->diversity_control = ah->config.diversity_control;
478 ahp->ah_antennaSwitchSwap = 423 ah->antenna_switch_swap =
479 ah->ah_config.antenna_switch_swap; 424 ah->config.antenna_switch_swap;
480 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE; 425 ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
481 ahp->ah_beaconInterval = 100; 426 ah->beacon_interval = 100;
482 ahp->ah_enable32kHzClock = DONT_USE_32KHZ; 427 ah->enable_32kHz_clock = DONT_USE_32KHZ;
483 ahp->ah_slottime = (u32) -1; 428 ah->slottime = (u32) -1;
484 ahp->ah_acktimeout = (u32) -1; 429 ah->acktimeout = (u32) -1;
485 ahp->ah_ctstimeout = (u32) -1; 430 ah->ctstimeout = (u32) -1;
486 ahp->ah_globaltxtimeout = (u32) -1; 431 ah->globaltxtimeout = (u32) -1;
487 memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN); 432
488 433 ah->gbeacon_rate = 0;
489 ahp->ah_gBeaconRate = 0;
490 434
491 return ahp; 435 return ah;
492} 436}
493 437
494static int ath9k_hw_rfattach(struct ath_hal *ah) 438static int ath9k_hw_rfattach(struct ath_hw *ah)
495{ 439{
496 bool rfStatus = false; 440 bool rfStatus = false;
497 int ecode = 0; 441 int ecode = 0;
@@ -506,7 +450,7 @@ static int ath9k_hw_rfattach(struct ath_hal *ah)
506 return 0; 450 return 0;
507} 451}
508 452
509static int ath9k_hw_rf_claim(struct ath_hal *ah) 453static int ath9k_hw_rf_claim(struct ath_hw *ah)
510{ 454{
511 u32 val; 455 u32 val;
512 456
@@ -526,88 +470,87 @@ static int ath9k_hw_rf_claim(struct ath_hal *ah)
526 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 470 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
527 "5G Radio Chip Rev 0x%02X is not " 471 "5G Radio Chip Rev 0x%02X is not "
528 "supported by this driver\n", 472 "supported by this driver\n",
529 ah->ah_analog5GhzRev); 473 ah->hw_version.analog5GhzRev);
530 return -EOPNOTSUPP; 474 return -EOPNOTSUPP;
531 } 475 }
532 476
533 ah->ah_analog5GhzRev = val; 477 ah->hw_version.analog5GhzRev = val;
534 478
535 return 0; 479 return 0;
536} 480}
537 481
538static int ath9k_hw_init_macaddr(struct ath_hal *ah) 482static int ath9k_hw_init_macaddr(struct ath_hw *ah)
539{ 483{
540 u32 sum; 484 u32 sum;
541 int i; 485 int i;
542 u16 eeval; 486 u16 eeval;
543 struct ath_hal_5416 *ahp = AH5416(ah);
544 487
545 sum = 0; 488 sum = 0;
546 for (i = 0; i < 3; i++) { 489 for (i = 0; i < 3; i++) {
547 eeval = ath9k_hw_get_eeprom(ah, AR_EEPROM_MAC(i)); 490 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
548 sum += eeval; 491 sum += eeval;
549 ahp->ah_macaddr[2 * i] = eeval >> 8; 492 ah->macaddr[2 * i] = eeval >> 8;
550 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff; 493 ah->macaddr[2 * i + 1] = eeval & 0xff;
551 } 494 }
552 if (sum == 0 || sum == 0xffff * 3) { 495 if (sum == 0 || sum == 0xffff * 3) {
553 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 496 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
554 "mac address read failed: %pM\n", 497 "mac address read failed: %pM\n",
555 ahp->ah_macaddr); 498 ah->macaddr);
556 return -EADDRNOTAVAIL; 499 return -EADDRNOTAVAIL;
557 } 500 }
558 501
559 return 0; 502 return 0;
560} 503}
561 504
562static void ath9k_hw_init_rxgain_ini(struct ath_hal *ah) 505static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
563{ 506{
564 u32 rxgain_type; 507 u32 rxgain_type;
565 struct ath_hal_5416 *ahp = AH5416(ah);
566 508
567 if (ath9k_hw_get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) { 509 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
568 rxgain_type = ath9k_hw_get_eeprom(ah, EEP_RXGAIN_TYPE); 510 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
569 511
570 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) 512 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
571 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain, 513 INIT_INI_ARRAY(&ah->iniModesRxGain,
572 ar9280Modes_backoff_13db_rxgain_9280_2, 514 ar9280Modes_backoff_13db_rxgain_9280_2,
573 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6); 515 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
574 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF) 516 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
575 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain, 517 INIT_INI_ARRAY(&ah->iniModesRxGain,
576 ar9280Modes_backoff_23db_rxgain_9280_2, 518 ar9280Modes_backoff_23db_rxgain_9280_2,
577 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6); 519 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
578 else 520 else
579 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain, 521 INIT_INI_ARRAY(&ah->iniModesRxGain,
580 ar9280Modes_original_rxgain_9280_2, 522 ar9280Modes_original_rxgain_9280_2,
581 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); 523 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
582 } else 524 } else {
583 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain, 525 INIT_INI_ARRAY(&ah->iniModesRxGain,
584 ar9280Modes_original_rxgain_9280_2, 526 ar9280Modes_original_rxgain_9280_2,
585 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); 527 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
528 }
586} 529}
587 530
588static void ath9k_hw_init_txgain_ini(struct ath_hal *ah) 531static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
589{ 532{
590 u32 txgain_type; 533 u32 txgain_type;
591 struct ath_hal_5416 *ahp = AH5416(ah);
592 534
593 if (ath9k_hw_get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) { 535 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
594 txgain_type = ath9k_hw_get_eeprom(ah, EEP_TXGAIN_TYPE); 536 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
595 537
596 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) 538 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
597 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain, 539 INIT_INI_ARRAY(&ah->iniModesTxGain,
598 ar9280Modes_high_power_tx_gain_9280_2, 540 ar9280Modes_high_power_tx_gain_9280_2,
599 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6); 541 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
600 else 542 else
601 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain, 543 INIT_INI_ARRAY(&ah->iniModesTxGain,
602 ar9280Modes_original_tx_gain_9280_2, 544 ar9280Modes_original_tx_gain_9280_2,
603 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); 545 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
604 } else 546 } else {
605 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain, 547 INIT_INI_ARRAY(&ah->iniModesTxGain,
606 ar9280Modes_original_tx_gain_9280_2, 548 ar9280Modes_original_tx_gain_9280_2,
607 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); 549 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
550 }
608} 551}
609 552
610static int ath9k_hw_post_attach(struct ath_hal *ah) 553static int ath9k_hw_post_attach(struct ath_hw *ah)
611{ 554{
612 int ecode; 555 int ecode;
613 556
@@ -636,237 +579,234 @@ static int ath9k_hw_post_attach(struct ath_hal *ah)
636 return 0; 579 return 0;
637} 580}
638 581
639static struct ath_hal *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc, 582static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
640 void __iomem *mem, int *status) 583 int *status)
641{ 584{
642 struct ath_hal_5416 *ahp; 585 struct ath_hw *ah;
643 struct ath_hal *ah;
644 int ecode; 586 int ecode;
645 u32 i, j; 587 u32 i, j;
646 588
647 ahp = ath9k_hw_newstate(devid, sc, mem, status); 589 ah = ath9k_hw_newstate(devid, sc, status);
648 if (ahp == NULL) 590 if (ah == NULL)
649 return NULL; 591 return NULL;
650 592
651 ah = &ahp->ah;
652
653 ath9k_hw_set_defaults(ah); 593 ath9k_hw_set_defaults(ah);
654 594
655 if (ah->ah_config.intr_mitigation != 0) 595 if (ah->config.intr_mitigation != 0)
656 ahp->ah_intrMitigation = true; 596 ah->intr_mitigation = true;
657 597
658 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 598 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
659 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "Couldn't reset chip\n"); 599 DPRINTF(sc, ATH_DBG_RESET, "Couldn't reset chip\n");
660 ecode = -EIO; 600 ecode = -EIO;
661 goto bad; 601 goto bad;
662 } 602 }
663 603
664 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 604 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
665 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "Couldn't wakeup chip\n"); 605 DPRINTF(sc, ATH_DBG_RESET, "Couldn't wakeup chip\n");
666 ecode = -EIO; 606 ecode = -EIO;
667 goto bad; 607 goto bad;
668 } 608 }
669 609
670 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) { 610 if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
671 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) { 611 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) {
672 ah->ah_config.serialize_regmode = 612 ah->config.serialize_regmode =
673 SER_REG_MODE_ON; 613 SER_REG_MODE_ON;
674 } else { 614 } else {
675 ah->ah_config.serialize_regmode = 615 ah->config.serialize_regmode =
676 SER_REG_MODE_OFF; 616 SER_REG_MODE_OFF;
677 } 617 }
678 } 618 }
679 619
680 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 620 DPRINTF(sc, ATH_DBG_RESET, "serialize_regmode is %d\n",
681 "serialize_regmode is %d\n", 621 ah->config.serialize_regmode);
682 ah->ah_config.serialize_regmode);
683 622
684 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) && 623 if ((ah->hw_version.macVersion != AR_SREV_VERSION_5416_PCI) &&
685 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) && 624 (ah->hw_version.macVersion != AR_SREV_VERSION_5416_PCIE) &&
686 (ah->ah_macVersion != AR_SREV_VERSION_9160) && 625 (ah->hw_version.macVersion != AR_SREV_VERSION_9160) &&
687 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah)) && (!AR_SREV_9285(ah))) { 626 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah)) && (!AR_SREV_9285(ah))) {
688 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 627 DPRINTF(sc, ATH_DBG_RESET,
689 "Mac Chip Rev 0x%02x.%x is not supported by " 628 "Mac Chip Rev 0x%02x.%x is not supported by "
690 "this driver\n", ah->ah_macVersion, ah->ah_macRev); 629 "this driver\n", ah->hw_version.macVersion,
630 ah->hw_version.macRev);
691 ecode = -EOPNOTSUPP; 631 ecode = -EOPNOTSUPP;
692 goto bad; 632 goto bad;
693 } 633 }
694 634
695 if (AR_SREV_9100(ah)) { 635 if (AR_SREV_9100(ah)) {
696 ahp->ah_iqCalData.calData = &iq_cal_multi_sample; 636 ah->iq_caldata.calData = &iq_cal_multi_sample;
697 ahp->ah_suppCals = IQ_MISMATCH_CAL; 637 ah->supp_cals = IQ_MISMATCH_CAL;
698 ah->ah_isPciExpress = false; 638 ah->is_pciexpress = false;
699 } 639 }
700 ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 640 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
701 641
702 if (AR_SREV_9160_10_OR_LATER(ah)) { 642 if (AR_SREV_9160_10_OR_LATER(ah)) {
703 if (AR_SREV_9280_10_OR_LATER(ah)) { 643 if (AR_SREV_9280_10_OR_LATER(ah)) {
704 ahp->ah_iqCalData.calData = &iq_cal_single_sample; 644 ah->iq_caldata.calData = &iq_cal_single_sample;
705 ahp->ah_adcGainCalData.calData = 645 ah->adcgain_caldata.calData =
706 &adc_gain_cal_single_sample; 646 &adc_gain_cal_single_sample;
707 ahp->ah_adcDcCalData.calData = 647 ah->adcdc_caldata.calData =
708 &adc_dc_cal_single_sample; 648 &adc_dc_cal_single_sample;
709 ahp->ah_adcDcCalInitData.calData = 649 ah->adcdc_calinitdata.calData =
710 &adc_init_dc_cal; 650 &adc_init_dc_cal;
711 } else { 651 } else {
712 ahp->ah_iqCalData.calData = &iq_cal_multi_sample; 652 ah->iq_caldata.calData = &iq_cal_multi_sample;
713 ahp->ah_adcGainCalData.calData = 653 ah->adcgain_caldata.calData =
714 &adc_gain_cal_multi_sample; 654 &adc_gain_cal_multi_sample;
715 ahp->ah_adcDcCalData.calData = 655 ah->adcdc_caldata.calData =
716 &adc_dc_cal_multi_sample; 656 &adc_dc_cal_multi_sample;
717 ahp->ah_adcDcCalInitData.calData = 657 ah->adcdc_calinitdata.calData =
718 &adc_init_dc_cal; 658 &adc_init_dc_cal;
719 } 659 }
720 ahp->ah_suppCals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; 660 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
721 } 661 }
722 662
723 if (AR_SREV_9160(ah)) { 663 if (AR_SREV_9160(ah)) {
724 ah->ah_config.enable_ani = 1; 664 ah->config.enable_ani = 1;
725 ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | 665 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
726 ATH9K_ANI_FIRSTEP_LEVEL); 666 ATH9K_ANI_FIRSTEP_LEVEL);
727 } else { 667 } else {
728 ahp->ah_ani_function = ATH9K_ANI_ALL; 668 ah->ani_function = ATH9K_ANI_ALL;
729 if (AR_SREV_9280_10_OR_LATER(ah)) { 669 if (AR_SREV_9280_10_OR_LATER(ah)) {
730 ahp->ah_ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 670 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
731 } 671 }
732 } 672 }
733 673
734 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 674 DPRINTF(sc, ATH_DBG_RESET,
735 "This Mac Chip Rev 0x%02x.%x is \n", 675 "This Mac Chip Rev 0x%02x.%x is \n",
736 ah->ah_macVersion, ah->ah_macRev); 676 ah->hw_version.macVersion, ah->hw_version.macRev);
737 677
738 if (AR_SREV_9285_12_OR_LATER(ah)) { 678 if (AR_SREV_9285_12_OR_LATER(ah)) {
739 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9285Modes_9285_1_2, 679 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
740 ARRAY_SIZE(ar9285Modes_9285_1_2), 6); 680 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
741 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9285Common_9285_1_2, 681 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
742 ARRAY_SIZE(ar9285Common_9285_1_2), 2); 682 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
743 683
744 if (ah->ah_config.pcie_clock_req) { 684 if (ah->config.pcie_clock_req) {
745 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 685 INIT_INI_ARRAY(&ah->iniPcieSerdes,
746 ar9285PciePhy_clkreq_off_L1_9285_1_2, 686 ar9285PciePhy_clkreq_off_L1_9285_1_2,
747 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2); 687 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
748 } else { 688 } else {
749 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 689 INIT_INI_ARRAY(&ah->iniPcieSerdes,
750 ar9285PciePhy_clkreq_always_on_L1_9285_1_2, 690 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
751 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), 691 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
752 2); 692 2);
753 } 693 }
754 } else if (AR_SREV_9285_10_OR_LATER(ah)) { 694 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
755 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9285Modes_9285, 695 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
756 ARRAY_SIZE(ar9285Modes_9285), 6); 696 ARRAY_SIZE(ar9285Modes_9285), 6);
757 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9285Common_9285, 697 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
758 ARRAY_SIZE(ar9285Common_9285), 2); 698 ARRAY_SIZE(ar9285Common_9285), 2);
759 699
760 if (ah->ah_config.pcie_clock_req) { 700 if (ah->config.pcie_clock_req) {
761 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 701 INIT_INI_ARRAY(&ah->iniPcieSerdes,
762 ar9285PciePhy_clkreq_off_L1_9285, 702 ar9285PciePhy_clkreq_off_L1_9285,
763 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2); 703 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
764 } else { 704 } else {
765 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 705 INIT_INI_ARRAY(&ah->iniPcieSerdes,
766 ar9285PciePhy_clkreq_always_on_L1_9285, 706 ar9285PciePhy_clkreq_always_on_L1_9285,
767 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2); 707 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
768 } 708 }
769 } else if (AR_SREV_9280_20_OR_LATER(ah)) { 709 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
770 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2, 710 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
771 ARRAY_SIZE(ar9280Modes_9280_2), 6); 711 ARRAY_SIZE(ar9280Modes_9280_2), 6);
772 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2, 712 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
773 ARRAY_SIZE(ar9280Common_9280_2), 2); 713 ARRAY_SIZE(ar9280Common_9280_2), 2);
774 714
775 if (ah->ah_config.pcie_clock_req) { 715 if (ah->config.pcie_clock_req) {
776 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 716 INIT_INI_ARRAY(&ah->iniPcieSerdes,
777 ar9280PciePhy_clkreq_off_L1_9280, 717 ar9280PciePhy_clkreq_off_L1_9280,
778 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2); 718 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
779 } else { 719 } else {
780 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 720 INIT_INI_ARRAY(&ah->iniPcieSerdes,
781 ar9280PciePhy_clkreq_always_on_L1_9280, 721 ar9280PciePhy_clkreq_always_on_L1_9280,
782 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2); 722 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
783 } 723 }
784 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional, 724 INIT_INI_ARRAY(&ah->iniModesAdditional,
785 ar9280Modes_fast_clock_9280_2, 725 ar9280Modes_fast_clock_9280_2,
786 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); 726 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
787 } else if (AR_SREV_9280_10_OR_LATER(ah)) { 727 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
788 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280, 728 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
789 ARRAY_SIZE(ar9280Modes_9280), 6); 729 ARRAY_SIZE(ar9280Modes_9280), 6);
790 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280, 730 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
791 ARRAY_SIZE(ar9280Common_9280), 2); 731 ARRAY_SIZE(ar9280Common_9280), 2);
792 } else if (AR_SREV_9160_10_OR_LATER(ah)) { 732 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
793 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160, 733 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
794 ARRAY_SIZE(ar5416Modes_9160), 6); 734 ARRAY_SIZE(ar5416Modes_9160), 6);
795 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160, 735 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
796 ARRAY_SIZE(ar5416Common_9160), 2); 736 ARRAY_SIZE(ar5416Common_9160), 2);
797 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160, 737 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
798 ARRAY_SIZE(ar5416Bank0_9160), 2); 738 ARRAY_SIZE(ar5416Bank0_9160), 2);
799 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160, 739 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
800 ARRAY_SIZE(ar5416BB_RfGain_9160), 3); 740 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
801 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160, 741 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
802 ARRAY_SIZE(ar5416Bank1_9160), 2); 742 ARRAY_SIZE(ar5416Bank1_9160), 2);
803 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160, 743 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
804 ARRAY_SIZE(ar5416Bank2_9160), 2); 744 ARRAY_SIZE(ar5416Bank2_9160), 2);
805 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160, 745 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
806 ARRAY_SIZE(ar5416Bank3_9160), 3); 746 ARRAY_SIZE(ar5416Bank3_9160), 3);
807 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160, 747 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
808 ARRAY_SIZE(ar5416Bank6_9160), 3); 748 ARRAY_SIZE(ar5416Bank6_9160), 3);
809 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160, 749 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
810 ARRAY_SIZE(ar5416Bank6TPC_9160), 3); 750 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
811 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160, 751 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
812 ARRAY_SIZE(ar5416Bank7_9160), 2); 752 ARRAY_SIZE(ar5416Bank7_9160), 2);
813 if (AR_SREV_9160_11(ah)) { 753 if (AR_SREV_9160_11(ah)) {
814 INIT_INI_ARRAY(&ahp->ah_iniAddac, 754 INIT_INI_ARRAY(&ah->iniAddac,
815 ar5416Addac_91601_1, 755 ar5416Addac_91601_1,
816 ARRAY_SIZE(ar5416Addac_91601_1), 2); 756 ARRAY_SIZE(ar5416Addac_91601_1), 2);
817 } else { 757 } else {
818 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160, 758 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
819 ARRAY_SIZE(ar5416Addac_9160), 2); 759 ARRAY_SIZE(ar5416Addac_9160), 2);
820 } 760 }
821 } else if (AR_SREV_9100_OR_LATER(ah)) { 761 } else if (AR_SREV_9100_OR_LATER(ah)) {
822 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100, 762 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
823 ARRAY_SIZE(ar5416Modes_9100), 6); 763 ARRAY_SIZE(ar5416Modes_9100), 6);
824 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100, 764 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
825 ARRAY_SIZE(ar5416Common_9100), 2); 765 ARRAY_SIZE(ar5416Common_9100), 2);
826 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100, 766 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
827 ARRAY_SIZE(ar5416Bank0_9100), 2); 767 ARRAY_SIZE(ar5416Bank0_9100), 2);
828 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100, 768 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
829 ARRAY_SIZE(ar5416BB_RfGain_9100), 3); 769 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
830 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100, 770 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
831 ARRAY_SIZE(ar5416Bank1_9100), 2); 771 ARRAY_SIZE(ar5416Bank1_9100), 2);
832 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100, 772 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
833 ARRAY_SIZE(ar5416Bank2_9100), 2); 773 ARRAY_SIZE(ar5416Bank2_9100), 2);
834 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100, 774 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
835 ARRAY_SIZE(ar5416Bank3_9100), 3); 775 ARRAY_SIZE(ar5416Bank3_9100), 3);
836 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100, 776 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
837 ARRAY_SIZE(ar5416Bank6_9100), 3); 777 ARRAY_SIZE(ar5416Bank6_9100), 3);
838 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100, 778 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
839 ARRAY_SIZE(ar5416Bank6TPC_9100), 3); 779 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
840 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100, 780 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
841 ARRAY_SIZE(ar5416Bank7_9100), 2); 781 ARRAY_SIZE(ar5416Bank7_9100), 2);
842 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100, 782 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
843 ARRAY_SIZE(ar5416Addac_9100), 2); 783 ARRAY_SIZE(ar5416Addac_9100), 2);
844 } else { 784 } else {
845 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes, 785 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
846 ARRAY_SIZE(ar5416Modes), 6); 786 ARRAY_SIZE(ar5416Modes), 6);
847 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common, 787 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
848 ARRAY_SIZE(ar5416Common), 2); 788 ARRAY_SIZE(ar5416Common), 2);
849 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0, 789 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
850 ARRAY_SIZE(ar5416Bank0), 2); 790 ARRAY_SIZE(ar5416Bank0), 2);
851 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain, 791 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
852 ARRAY_SIZE(ar5416BB_RfGain), 3); 792 ARRAY_SIZE(ar5416BB_RfGain), 3);
853 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1, 793 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
854 ARRAY_SIZE(ar5416Bank1), 2); 794 ARRAY_SIZE(ar5416Bank1), 2);
855 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2, 795 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
856 ARRAY_SIZE(ar5416Bank2), 2); 796 ARRAY_SIZE(ar5416Bank2), 2);
857 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3, 797 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
858 ARRAY_SIZE(ar5416Bank3), 3); 798 ARRAY_SIZE(ar5416Bank3), 3);
859 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6, 799 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
860 ARRAY_SIZE(ar5416Bank6), 3); 800 ARRAY_SIZE(ar5416Bank6), 3);
861 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC, 801 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
862 ARRAY_SIZE(ar5416Bank6TPC), 3); 802 ARRAY_SIZE(ar5416Bank6TPC), 3);
863 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7, 803 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
864 ARRAY_SIZE(ar5416Bank7), 2); 804 ARRAY_SIZE(ar5416Bank7), 2);
865 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac, 805 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
866 ARRAY_SIZE(ar5416Addac), 2); 806 ARRAY_SIZE(ar5416Addac), 2);
867 } 807 }
868 808
869 if (ah->ah_isPciExpress) 809 if (ah->is_pciexpress)
870 ath9k_hw_configpcipowersave(ah, 0); 810 ath9k_hw_configpcipowersave(ah, 0);
871 else 811 else
872 ath9k_hw_disablepcie(ah); 812 ath9k_hw_disablepcie(ah);
@@ -883,23 +823,23 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
883 if (AR_SREV_9280_20(ah)) 823 if (AR_SREV_9280_20(ah))
884 ath9k_hw_init_txgain_ini(ah); 824 ath9k_hw_init_txgain_ini(ah);
885 825
886 if (ah->ah_devid == AR9280_DEVID_PCI) { 826 if (ah->hw_version.devid == AR9280_DEVID_PCI) {
887 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) { 827 for (i = 0; i < ah->iniModes.ia_rows; i++) {
888 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0); 828 u32 reg = INI_RA(&ah->iniModes, i, 0);
889 829
890 for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) { 830 for (j = 1; j < ah->iniModes.ia_columns; j++) {
891 u32 val = INI_RA(&ahp->ah_iniModes, i, j); 831 u32 val = INI_RA(&ah->iniModes, i, j);
892 832
893 INI_RA(&ahp->ah_iniModes, i, j) = 833 INI_RA(&ah->iniModes, i, j) =
894 ath9k_hw_ini_fixup(ah, 834 ath9k_hw_ini_fixup(ah,
895 &ahp->ah_eeprom.def, 835 &ah->eeprom.def,
896 reg, val); 836 reg, val);
897 } 837 }
898 } 838 }
899 } 839 }
900 840
901 if (!ath9k_hw_fill_cap_info(ah)) { 841 if (!ath9k_hw_fill_cap_info(ah)) {
902 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 842 DPRINTF(sc, ATH_DBG_RESET,
903 "failed ath9k_hw_fill_cap_info\n"); 843 "failed ath9k_hw_fill_cap_info\n");
904 ecode = -EINVAL; 844 ecode = -EINVAL;
905 goto bad; 845 goto bad;
@@ -907,29 +847,29 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
907 847
908 ecode = ath9k_hw_init_macaddr(ah); 848 ecode = ath9k_hw_init_macaddr(ah);
909 if (ecode != 0) { 849 if (ecode != 0) {
910 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 850 DPRINTF(sc, ATH_DBG_RESET,
911 "failed initializing mac address\n"); 851 "failed initializing mac address\n");
912 goto bad; 852 goto bad;
913 } 853 }
914 854
915 if (AR_SREV_9285(ah)) 855 if (AR_SREV_9285(ah))
916 ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S); 856 ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
917 else 857 else
918 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S); 858 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
919 859
920 ath9k_init_nfcal_hist_buffer(ah); 860 ath9k_init_nfcal_hist_buffer(ah);
921 861
922 return ah; 862 return ah;
923bad: 863bad:
924 if (ahp) 864 if (ah)
925 ath9k_hw_detach((struct ath_hal *) ahp); 865 ath9k_hw_detach(ah);
926 if (status) 866 if (status)
927 *status = ecode; 867 *status = ecode;
928 868
929 return NULL; 869 return NULL;
930} 870}
931 871
932static void ath9k_hw_init_bb(struct ath_hal *ah, 872static void ath9k_hw_init_bb(struct ath_hw *ah,
933 struct ath9k_channel *chan) 873 struct ath9k_channel *chan)
934{ 874{
935 u32 synthDelay; 875 u32 synthDelay;
@@ -945,7 +885,7 @@ static void ath9k_hw_init_bb(struct ath_hal *ah,
945 udelay(synthDelay + BASE_ACTIVATE_DELAY); 885 udelay(synthDelay + BASE_ACTIVATE_DELAY);
946} 886}
947 887
948static void ath9k_hw_init_qos(struct ath_hal *ah) 888static void ath9k_hw_init_qos(struct ath_hw *ah)
949{ 889{
950 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 890 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
951 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 891 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -962,7 +902,7 @@ static void ath9k_hw_init_qos(struct ath_hal *ah)
962 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 902 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
963} 903}
964 904
965static void ath9k_hw_init_pll(struct ath_hal *ah, 905static void ath9k_hw_init_pll(struct ath_hw *ah,
966 struct ath9k_channel *chan) 906 struct ath9k_channel *chan)
967{ 907{
968 u32 pll; 908 u32 pll;
@@ -1023,27 +963,26 @@ static void ath9k_hw_init_pll(struct ath_hal *ah,
1023 pll |= SM(0xb, AR_RTC_PLL_DIV); 963 pll |= SM(0xb, AR_RTC_PLL_DIV);
1024 } 964 }
1025 } 965 }
1026 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll); 966 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1027 967
1028 udelay(RTC_PLL_SETTLE_DELAY); 968 udelay(RTC_PLL_SETTLE_DELAY);
1029 969
1030 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 970 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1031} 971}
1032 972
1033static void ath9k_hw_init_chain_masks(struct ath_hal *ah) 973static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
1034{ 974{
1035 struct ath_hal_5416 *ahp = AH5416(ah);
1036 int rx_chainmask, tx_chainmask; 975 int rx_chainmask, tx_chainmask;
1037 976
1038 rx_chainmask = ahp->ah_rxchainmask; 977 rx_chainmask = ah->rxchainmask;
1039 tx_chainmask = ahp->ah_txchainmask; 978 tx_chainmask = ah->txchainmask;
1040 979
1041 switch (rx_chainmask) { 980 switch (rx_chainmask) {
1042 case 0x5: 981 case 0x5:
1043 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 982 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1044 AR_PHY_SWAP_ALT_CHAIN); 983 AR_PHY_SWAP_ALT_CHAIN);
1045 case 0x3: 984 case 0x3:
1046 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) { 985 if (((ah)->hw_version.macVersion <= AR_SREV_VERSION_9160)) {
1047 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); 986 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
1048 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); 987 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
1049 break; 988 break;
@@ -1068,28 +1007,26 @@ static void ath9k_hw_init_chain_masks(struct ath_hal *ah)
1068 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001); 1007 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
1069} 1008}
1070 1009
1071static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah, 1010static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1072 enum nl80211_iftype opmode) 1011 enum nl80211_iftype opmode)
1073{ 1012{
1074 struct ath_hal_5416 *ahp = AH5416(ah); 1013 ah->mask_reg = AR_IMR_TXERR |
1075
1076 ahp->ah_maskReg = AR_IMR_TXERR |
1077 AR_IMR_TXURN | 1014 AR_IMR_TXURN |
1078 AR_IMR_RXERR | 1015 AR_IMR_RXERR |
1079 AR_IMR_RXORN | 1016 AR_IMR_RXORN |
1080 AR_IMR_BCNMISC; 1017 AR_IMR_BCNMISC;
1081 1018
1082 if (ahp->ah_intrMitigation) 1019 if (ah->intr_mitigation)
1083 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 1020 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1084 else 1021 else
1085 ahp->ah_maskReg |= AR_IMR_RXOK; 1022 ah->mask_reg |= AR_IMR_RXOK;
1086 1023
1087 ahp->ah_maskReg |= AR_IMR_TXOK; 1024 ah->mask_reg |= AR_IMR_TXOK;
1088 1025
1089 if (opmode == NL80211_IFTYPE_AP) 1026 if (opmode == NL80211_IFTYPE_AP)
1090 ahp->ah_maskReg |= AR_IMR_MIB; 1027 ah->mask_reg |= AR_IMR_MIB;
1091 1028
1092 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg); 1029 REG_WRITE(ah, AR_IMR, ah->mask_reg);
1093 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); 1030 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
1094 1031
1095 if (!AR_SREV_9100(ah)) { 1032 if (!AR_SREV_9100(ah)) {
@@ -1099,72 +1036,64 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
1099 } 1036 }
1100} 1037}
1101 1038
1102static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us) 1039static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1103{ 1040{
1104 struct ath_hal_5416 *ahp = AH5416(ah);
1105
1106 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1041 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
1107 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us); 1042 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us);
1108 ahp->ah_acktimeout = (u32) -1; 1043 ah->acktimeout = (u32) -1;
1109 return false; 1044 return false;
1110 } else { 1045 } else {
1111 REG_RMW_FIELD(ah, AR_TIME_OUT, 1046 REG_RMW_FIELD(ah, AR_TIME_OUT,
1112 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us)); 1047 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
1113 ahp->ah_acktimeout = us; 1048 ah->acktimeout = us;
1114 return true; 1049 return true;
1115 } 1050 }
1116} 1051}
1117 1052
1118static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us) 1053static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1119{ 1054{
1120 struct ath_hal_5416 *ahp = AH5416(ah);
1121
1122 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1055 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
1123 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us); 1056 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us);
1124 ahp->ah_ctstimeout = (u32) -1; 1057 ah->ctstimeout = (u32) -1;
1125 return false; 1058 return false;
1126 } else { 1059 } else {
1127 REG_RMW_FIELD(ah, AR_TIME_OUT, 1060 REG_RMW_FIELD(ah, AR_TIME_OUT,
1128 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us)); 1061 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
1129 ahp->ah_ctstimeout = us; 1062 ah->ctstimeout = us;
1130 return true; 1063 return true;
1131 } 1064 }
1132} 1065}
1133 1066
1134static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah, u32 tu) 1067static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1135{ 1068{
1136 struct ath_hal_5416 *ahp = AH5416(ah);
1137
1138 if (tu > 0xFFFF) { 1069 if (tu > 0xFFFF) {
1139 DPRINTF(ah->ah_sc, ATH_DBG_XMIT, 1070 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
1140 "bad global tx timeout %u\n", tu); 1071 "bad global tx timeout %u\n", tu);
1141 ahp->ah_globaltxtimeout = (u32) -1; 1072 ah->globaltxtimeout = (u32) -1;
1142 return false; 1073 return false;
1143 } else { 1074 } else {
1144 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); 1075 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
1145 ahp->ah_globaltxtimeout = tu; 1076 ah->globaltxtimeout = tu;
1146 return true; 1077 return true;
1147 } 1078 }
1148} 1079}
1149 1080
1150static void ath9k_hw_init_user_settings(struct ath_hal *ah) 1081static void ath9k_hw_init_user_settings(struct ath_hw *ah)
1151{ 1082{
1152 struct ath_hal_5416 *ahp = AH5416(ah); 1083 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1153 1084 ah->misc_mode);
1154 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ahp->ah_miscMode 0x%x\n",
1155 ahp->ah_miscMode);
1156 1085
1157 if (ahp->ah_miscMode != 0) 1086 if (ah->misc_mode != 0)
1158 REG_WRITE(ah, AR_PCU_MISC, 1087 REG_WRITE(ah, AR_PCU_MISC,
1159 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode); 1088 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
1160 if (ahp->ah_slottime != (u32) -1) 1089 if (ah->slottime != (u32) -1)
1161 ath9k_hw_setslottime(ah, ahp->ah_slottime); 1090 ath9k_hw_setslottime(ah, ah->slottime);
1162 if (ahp->ah_acktimeout != (u32) -1) 1091 if (ah->acktimeout != (u32) -1)
1163 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout); 1092 ath9k_hw_set_ack_timeout(ah, ah->acktimeout);
1164 if (ahp->ah_ctstimeout != (u32) -1) 1093 if (ah->ctstimeout != (u32) -1)
1165 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout); 1094 ath9k_hw_set_cts_timeout(ah, ah->ctstimeout);
1166 if (ahp->ah_globaltxtimeout != (u32) -1) 1095 if (ah->globaltxtimeout != (u32) -1)
1167 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout); 1096 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
1168} 1097}
1169 1098
1170const char *ath9k_hw_probe(u16 vendorid, u16 devid) 1099const char *ath9k_hw_probe(u16 vendorid, u16 devid)
@@ -1173,7 +1102,7 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid)
1173 ath9k_hw_devname(devid) : NULL; 1102 ath9k_hw_devname(devid) : NULL;
1174} 1103}
1175 1104
1176void ath9k_hw_detach(struct ath_hal *ah) 1105void ath9k_hw_detach(struct ath_hw *ah)
1177{ 1106{
1178 if (!AR_SREV_9100(ah)) 1107 if (!AR_SREV_9100(ah))
1179 ath9k_hw_ani_detach(ah); 1108 ath9k_hw_ani_detach(ah);
@@ -1183,19 +1112,19 @@ void ath9k_hw_detach(struct ath_hal *ah)
1183 kfree(ah); 1112 kfree(ah);
1184} 1113}
1185 1114
1186struct ath_hal *ath9k_hw_attach(u16 devid, struct ath_softc *sc, 1115struct ath_hw *ath9k_hw_attach(u16 devid, struct ath_softc *sc, int *error)
1187 void __iomem *mem, int *error)
1188{ 1116{
1189 struct ath_hal *ah = NULL; 1117 struct ath_hw *ah = NULL;
1190 1118
1191 switch (devid) { 1119 switch (devid) {
1192 case AR5416_DEVID_PCI: 1120 case AR5416_DEVID_PCI:
1193 case AR5416_DEVID_PCIE: 1121 case AR5416_DEVID_PCIE:
1122 case AR5416_AR9100_DEVID:
1194 case AR9160_DEVID_PCI: 1123 case AR9160_DEVID_PCI:
1195 case AR9280_DEVID_PCI: 1124 case AR9280_DEVID_PCI:
1196 case AR9280_DEVID_PCIE: 1125 case AR9280_DEVID_PCIE:
1197 case AR9285_DEVID_PCIE: 1126 case AR9285_DEVID_PCIE:
1198 ah = ath9k_hw_do_attach(devid, sc, mem, error); 1127 ah = ath9k_hw_do_attach(devid, sc, error);
1199 break; 1128 break;
1200 default: 1129 default:
1201 *error = -ENXIO; 1130 *error = -ENXIO;
@@ -1209,7 +1138,7 @@ struct ath_hal *ath9k_hw_attach(u16 devid, struct ath_softc *sc,
1209/* INI */ 1138/* INI */
1210/*******/ 1139/*******/
1211 1140
1212static void ath9k_hw_override_ini(struct ath_hal *ah, 1141static void ath9k_hw_override_ini(struct ath_hw *ah,
1213 struct ath9k_channel *chan) 1142 struct ath9k_channel *chan)
1214{ 1143{
1215 /* 1144 /*
@@ -1227,13 +1156,13 @@ static void ath9k_hw_override_ini(struct ath_hal *ah,
1227 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); 1156 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
1228} 1157}
1229 1158
1230static u32 ath9k_hw_def_ini_fixup(struct ath_hal *ah, 1159static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
1231 struct ar5416_eeprom_def *pEepData, 1160 struct ar5416_eeprom_def *pEepData,
1232 u32 reg, u32 value) 1161 u32 reg, u32 value)
1233{ 1162{
1234 struct base_eep_header *pBase = &(pEepData->baseEepHeader); 1163 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
1235 1164
1236 switch (ah->ah_devid) { 1165 switch (ah->hw_version.devid) {
1237 case AR9280_DEVID_PCI: 1166 case AR9280_DEVID_PCI:
1238 if (reg == 0x7894) { 1167 if (reg == 0x7894) {
1239 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 1168 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
@@ -1261,24 +1190,22 @@ static u32 ath9k_hw_def_ini_fixup(struct ath_hal *ah,
1261 return value; 1190 return value;
1262} 1191}
1263 1192
1264static u32 ath9k_hw_ini_fixup(struct ath_hal *ah, 1193static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
1265 struct ar5416_eeprom_def *pEepData, 1194 struct ar5416_eeprom_def *pEepData,
1266 u32 reg, u32 value) 1195 u32 reg, u32 value)
1267{ 1196{
1268 struct ath_hal_5416 *ahp = AH5416(ah); 1197 if (ah->eep_map == EEP_MAP_4KBITS)
1269
1270 if (ahp->ah_eep_map == EEP_MAP_4KBITS)
1271 return value; 1198 return value;
1272 else 1199 else
1273 return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value); 1200 return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
1274} 1201}
1275 1202
1276static int ath9k_hw_process_ini(struct ath_hal *ah, 1203static int ath9k_hw_process_ini(struct ath_hw *ah,
1277 struct ath9k_channel *chan, 1204 struct ath9k_channel *chan,
1278 enum ath9k_ht_macmode macmode) 1205 enum ath9k_ht_macmode macmode)
1279{ 1206{
1280 int i, regWrites = 0; 1207 int i, regWrites = 0;
1281 struct ath_hal_5416 *ahp = AH5416(ah); 1208 struct ieee80211_channel *channel = chan->chan;
1282 u32 modesIndex, freqIndex; 1209 u32 modesIndex, freqIndex;
1283 int status; 1210 int status;
1284 1211
@@ -1310,40 +1237,38 @@ static int ath9k_hw_process_ini(struct ath_hal *ah,
1310 } 1237 }
1311 1238
1312 REG_WRITE(ah, AR_PHY(0), 0x00000007); 1239 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1313
1314 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); 1240 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
1315 1241 ah->eep_ops->set_addac(ah, chan);
1316 ath9k_hw_set_addac(ah, chan);
1317 1242
1318 if (AR_SREV_5416_V22_OR_LATER(ah)) { 1243 if (AR_SREV_5416_V22_OR_LATER(ah)) {
1319 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites); 1244 REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
1320 } else { 1245 } else {
1321 struct ar5416IniArray temp; 1246 struct ar5416IniArray temp;
1322 u32 addacSize = 1247 u32 addacSize =
1323 sizeof(u32) * ahp->ah_iniAddac.ia_rows * 1248 sizeof(u32) * ah->iniAddac.ia_rows *
1324 ahp->ah_iniAddac.ia_columns; 1249 ah->iniAddac.ia_columns;
1325 1250
1326 memcpy(ahp->ah_addac5416_21, 1251 memcpy(ah->addac5416_21,
1327 ahp->ah_iniAddac.ia_array, addacSize); 1252 ah->iniAddac.ia_array, addacSize);
1328 1253
1329 (ahp->ah_addac5416_21)[31 * ahp->ah_iniAddac.ia_columns + 1] = 0; 1254 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
1330 1255
1331 temp.ia_array = ahp->ah_addac5416_21; 1256 temp.ia_array = ah->addac5416_21;
1332 temp.ia_columns = ahp->ah_iniAddac.ia_columns; 1257 temp.ia_columns = ah->iniAddac.ia_columns;
1333 temp.ia_rows = ahp->ah_iniAddac.ia_rows; 1258 temp.ia_rows = ah->iniAddac.ia_rows;
1334 REG_WRITE_ARRAY(&temp, 1, regWrites); 1259 REG_WRITE_ARRAY(&temp, 1, regWrites);
1335 } 1260 }
1336 1261
1337 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); 1262 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
1338 1263
1339 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) { 1264 for (i = 0; i < ah->iniModes.ia_rows; i++) {
1340 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0); 1265 u32 reg = INI_RA(&ah->iniModes, i, 0);
1341 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex); 1266 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
1342 1267
1343 REG_WRITE(ah, reg, val); 1268 REG_WRITE(ah, reg, val);
1344 1269
1345 if (reg >= 0x7800 && reg < 0x78a0 1270 if (reg >= 0x7800 && reg < 0x78a0
1346 && ah->ah_config.analog_shiftreg) { 1271 && ah->config.analog_shiftreg) {
1347 udelay(100); 1272 udelay(100);
1348 } 1273 }
1349 1274
@@ -1351,19 +1276,19 @@ static int ath9k_hw_process_ini(struct ath_hal *ah,
1351 } 1276 }
1352 1277
1353 if (AR_SREV_9280(ah)) 1278 if (AR_SREV_9280(ah))
1354 REG_WRITE_ARRAY(&ahp->ah_iniModesRxGain, modesIndex, regWrites); 1279 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
1355 1280
1356 if (AR_SREV_9280(ah)) 1281 if (AR_SREV_9280(ah))
1357 REG_WRITE_ARRAY(&ahp->ah_iniModesTxGain, modesIndex, regWrites); 1282 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1358 1283
1359 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) { 1284 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
1360 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0); 1285 u32 reg = INI_RA(&ah->iniCommon, i, 0);
1361 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1); 1286 u32 val = INI_RA(&ah->iniCommon, i, 1);
1362 1287
1363 REG_WRITE(ah, reg, val); 1288 REG_WRITE(ah, reg, val);
1364 1289
1365 if (reg >= 0x7800 && reg < 0x78a0 1290 if (reg >= 0x7800 && reg < 0x78a0
1366 && ah->ah_config.analog_shiftreg) { 1291 && ah->config.analog_shiftreg) {
1367 udelay(100); 1292 udelay(100);
1368 } 1293 }
1369 1294
@@ -1373,7 +1298,7 @@ static int ath9k_hw_process_ini(struct ath_hal *ah,
1373 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites); 1298 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
1374 1299
1375 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { 1300 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1376 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex, 1301 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
1377 regWrites); 1302 regWrites);
1378 } 1303 }
1379 1304
@@ -1381,13 +1306,12 @@ static int ath9k_hw_process_ini(struct ath_hal *ah,
1381 ath9k_hw_set_regs(ah, chan, macmode); 1306 ath9k_hw_set_regs(ah, chan, macmode);
1382 ath9k_hw_init_chain_masks(ah); 1307 ath9k_hw_init_chain_masks(ah);
1383 1308
1384 status = ath9k_hw_set_txpower(ah, chan, 1309 status = ah->eep_ops->set_txpower(ah, chan,
1385 ath9k_regd_get_ctl(ah, chan), 1310 ath9k_regd_get_ctl(ah, chan),
1386 ath9k_regd_get_antenna_allowed(ah, 1311 channel->max_antenna_gain * 2,
1387 chan), 1312 channel->max_power * 2,
1388 chan->maxRegTxPower * 2, 1313 min((u32) MAX_RATE_POWER,
1389 min((u32) MAX_RATE_POWER, 1314 (u32) ah->regulatory.power_limit));
1390 (u32) ah->ah_powerLimit));
1391 if (status != 0) { 1315 if (status != 0) {
1392 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, 1316 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1393 "error init'ing transmit power\n"); 1317 "error init'ing transmit power\n");
@@ -1407,7 +1331,7 @@ static int ath9k_hw_process_ini(struct ath_hal *ah,
1407/* Reset and Channel Switching Routines */ 1331/* Reset and Channel Switching Routines */
1408/****************************************/ 1332/****************************************/
1409 1333
1410static void ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan) 1334static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
1411{ 1335{
1412 u32 rfMode = 0; 1336 u32 rfMode = 0;
1413 1337
@@ -1427,12 +1351,12 @@ static void ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1427 REG_WRITE(ah, AR_PHY_MODE, rfMode); 1351 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1428} 1352}
1429 1353
1430static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah) 1354static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
1431{ 1355{
1432 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 1356 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1433} 1357}
1434 1358
1435static inline void ath9k_hw_set_dma(struct ath_hal *ah) 1359static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1436{ 1360{
1437 u32 regval; 1361 u32 regval;
1438 1362
@@ -1442,7 +1366,7 @@ static inline void ath9k_hw_set_dma(struct ath_hal *ah)
1442 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; 1366 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
1443 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); 1367 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
1444 1368
1445 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel); 1369 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
1446 1370
1447 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; 1371 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
1448 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B); 1372 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
@@ -1458,7 +1382,7 @@ static inline void ath9k_hw_set_dma(struct ath_hal *ah)
1458 } 1382 }
1459} 1383}
1460 1384
1461static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode) 1385static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1462{ 1386{
1463 u32 val; 1387 u32 val;
1464 1388
@@ -1482,7 +1406,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1482 } 1406 }
1483} 1407}
1484 1408
1485static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah, 1409static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
1486 u32 coef_scaled, 1410 u32 coef_scaled,
1487 u32 *coef_mantissa, 1411 u32 *coef_mantissa,
1488 u32 *coef_exponent) 1412 u32 *coef_exponent)
@@ -1501,7 +1425,7 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
1501 *coef_exponent = coef_exp - 16; 1425 *coef_exponent = coef_exp - 16;
1502} 1426}
1503 1427
1504static void ath9k_hw_set_delta_slope(struct ath_hal *ah, 1428static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
1505 struct ath9k_channel *chan) 1429 struct ath9k_channel *chan)
1506{ 1430{
1507 u32 coef_scaled, ds_coef_exp, ds_coef_man; 1431 u32 coef_scaled, ds_coef_exp, ds_coef_man;
@@ -1535,7 +1459,7 @@ static void ath9k_hw_set_delta_slope(struct ath_hal *ah,
1535 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); 1459 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
1536} 1460}
1537 1461
1538static bool ath9k_hw_set_reset(struct ath_hal *ah, int type) 1462static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1539{ 1463{
1540 u32 rst_flags; 1464 u32 rst_flags;
1541 u32 tmpReg; 1465 u32 tmpReg;
@@ -1562,11 +1486,11 @@ static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1562 rst_flags |= AR_RTC_RC_MAC_COLD; 1486 rst_flags |= AR_RTC_RC_MAC_COLD;
1563 } 1487 }
1564 1488
1565 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags); 1489 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1566 udelay(50); 1490 udelay(50);
1567 1491
1568 REG_WRITE(ah, (u16) (AR_RTC_RC), 0); 1492 REG_WRITE(ah, AR_RTC_RC, 0);
1569 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) { 1493 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0)) {
1570 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 1494 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1571 "RTC stuck in MAC reset\n"); 1495 "RTC stuck in MAC reset\n");
1572 return false; 1496 return false;
@@ -1583,13 +1507,13 @@ static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1583 return true; 1507 return true;
1584} 1508}
1585 1509
1586static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah) 1510static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1587{ 1511{
1588 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1512 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1589 AR_RTC_FORCE_WAKE_ON_INT); 1513 AR_RTC_FORCE_WAKE_ON_INT);
1590 1514
1591 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0); 1515 REG_WRITE(ah, AR_RTC_RESET, 0);
1592 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1); 1516 REG_WRITE(ah, AR_RTC_RESET, 1);
1593 1517
1594 if (!ath9k_hw_wait(ah, 1518 if (!ath9k_hw_wait(ah,
1595 AR_RTC_STATUS, 1519 AR_RTC_STATUS,
@@ -1604,7 +1528,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1604 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); 1528 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1605} 1529}
1606 1530
1607static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, u32 type) 1531static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1608{ 1532{
1609 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1533 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1610 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); 1534 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
@@ -1622,12 +1546,11 @@ static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, u32 type)
1622 } 1546 }
1623} 1547}
1624 1548
1625static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan, 1549static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
1626 enum ath9k_ht_macmode macmode) 1550 enum ath9k_ht_macmode macmode)
1627{ 1551{
1628 u32 phymode; 1552 u32 phymode;
1629 u32 enableDacFifo = 0; 1553 u32 enableDacFifo = 0;
1630 struct ath_hal_5416 *ahp = AH5416(ah);
1631 1554
1632 if (AR_SREV_9285_10_OR_LATER(ah)) 1555 if (AR_SREV_9285_10_OR_LATER(ah))
1633 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) & 1556 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
@@ -1643,7 +1566,7 @@ static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1643 (chan->chanmode == CHANNEL_G_HT40PLUS)) 1566 (chan->chanmode == CHANNEL_G_HT40PLUS))
1644 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 1567 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1645 1568
1646 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25) 1569 if (ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1647 phymode |= AR_PHY_FC_DYN2040_EXT_CH; 1570 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1648 } 1571 }
1649 REG_WRITE(ah, AR_PHY_TURBO, phymode); 1572 REG_WRITE(ah, AR_PHY_TURBO, phymode);
@@ -1654,54 +1577,27 @@ static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1654 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); 1577 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1655} 1578}
1656 1579
1657static bool ath9k_hw_chip_reset(struct ath_hal *ah, 1580static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1658 struct ath9k_channel *chan) 1581 struct ath9k_channel *chan)
1659{ 1582{
1660 struct ath_hal_5416 *ahp = AH5416(ah);
1661
1662 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) 1583 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
1663 return false; 1584 return false;
1664 1585
1665 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1586 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1666 return false; 1587 return false;
1667 1588
1668 ahp->ah_chipFullSleep = false; 1589 ah->chip_fullsleep = false;
1669
1670 ath9k_hw_init_pll(ah, chan); 1590 ath9k_hw_init_pll(ah, chan);
1671
1672 ath9k_hw_set_rfmode(ah, chan); 1591 ath9k_hw_set_rfmode(ah, chan);
1673 1592
1674 return true; 1593 return true;
1675} 1594}
1676 1595
1677static struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah, 1596static bool ath9k_hw_channel_change(struct ath_hw *ah,
1678 struct ath9k_channel *chan)
1679{
1680 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1681 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1682 "invalid channel %u/0x%x; not marked as "
1683 "2GHz or 5GHz\n", chan->channel, chan->channelFlags);
1684 return NULL;
1685 }
1686
1687 if (!IS_CHAN_OFDM(chan) &&
1688 !IS_CHAN_B(chan) &&
1689 !IS_CHAN_HT20(chan) &&
1690 !IS_CHAN_HT40(chan)) {
1691 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1692 "invalid channel %u/0x%x; not marked as "
1693 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1694 chan->channel, chan->channelFlags);
1695 return NULL;
1696 }
1697
1698 return ath9k_regd_check_channel(ah, chan);
1699}
1700
1701static bool ath9k_hw_channel_change(struct ath_hal *ah,
1702 struct ath9k_channel *chan, 1597 struct ath9k_channel *chan,
1703 enum ath9k_ht_macmode macmode) 1598 enum ath9k_ht_macmode macmode)
1704{ 1599{
1600 struct ieee80211_channel *channel = chan->chan;
1705 u32 synthDelay, qnum; 1601 u32 synthDelay, qnum;
1706 1602
1707 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1603 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1736,12 +1632,12 @@ static bool ath9k_hw_channel_change(struct ath_hal *ah,
1736 } 1632 }
1737 } 1633 }
1738 1634
1739 if (ath9k_hw_set_txpower(ah, chan, 1635 if (ah->eep_ops->set_txpower(ah, chan,
1740 ath9k_regd_get_ctl(ah, chan), 1636 ath9k_regd_get_ctl(ah, chan),
1741 ath9k_regd_get_antenna_allowed(ah, chan), 1637 channel->max_antenna_gain * 2,
1742 chan->maxRegTxPower * 2, 1638 channel->max_power * 2,
1743 min((u32) MAX_RATE_POWER, 1639 min((u32) MAX_RATE_POWER,
1744 (u32) ah->ah_powerLimit)) != 0) { 1640 (u32) ah->regulatory.power_limit)) != 0) {
1745 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1641 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1746 "error init'ing transmit power\n"); 1642 "error init'ing transmit power\n");
1747 return false; 1643 return false;
@@ -1771,7 +1667,7 @@ static bool ath9k_hw_channel_change(struct ath_hal *ah,
1771 return true; 1667 return true;
1772} 1668}
1773 1669
1774static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan) 1670static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
1775{ 1671{
1776 int bb_spur = AR_NO_SPUR; 1672 int bb_spur = AR_NO_SPUR;
1777 int freq; 1673 int freq;
@@ -1805,9 +1701,9 @@ static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel
1805 ath9k_hw_get_channel_centers(ah, chan, &centers); 1701 ath9k_hw_get_channel_centers(ah, chan, &centers);
1806 freq = centers.synth_center; 1702 freq = centers.synth_center;
1807 1703
1808 ah->ah_config.spurmode = SPUR_ENABLE_EEPROM; 1704 ah->config.spurmode = SPUR_ENABLE_EEPROM;
1809 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 1705 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
1810 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz); 1706 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
1811 1707
1812 if (is2GHz) 1708 if (is2GHz)
1813 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; 1709 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
@@ -1918,9 +1814,9 @@ static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel
1918 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { 1814 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
1919 1815
1920 /* workaround for gcc bug #37014 */ 1816 /* workaround for gcc bug #37014 */
1921 volatile int tmp = abs(cur_vit_mask - bin); 1817 volatile int tmp_v = abs(cur_vit_mask - bin);
1922 1818
1923 if (tmp < 75) 1819 if (tmp_v < 75)
1924 mask_amt = 1; 1820 mask_amt = 1;
1925 else 1821 else
1926 mask_amt = 0; 1822 mask_amt = 0;
@@ -2021,7 +1917,7 @@ static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel
2021 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 1917 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2022} 1918}
2023 1919
2024static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan) 1920static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
2025{ 1921{
2026 int bb_spur = AR_NO_SPUR; 1922 int bb_spur = AR_NO_SPUR;
2027 int bin, cur_bin; 1923 int bin, cur_bin;
@@ -2050,7 +1946,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *cha
2050 memset(&mask_p, 0, sizeof(int8_t) * 123); 1946 memset(&mask_p, 0, sizeof(int8_t) * 123);
2051 1947
2052 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 1948 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
2053 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz); 1949 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
2054 if (AR_NO_SPUR == cur_bb_spur) 1950 if (AR_NO_SPUR == cur_bb_spur)
2055 break; 1951 break;
2056 cur_bb_spur = cur_bb_spur - (chan->channel * 10); 1952 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
@@ -2119,9 +2015,9 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *cha
2119 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { 2015 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
2120 2016
2121 /* workaround for gcc bug #37014 */ 2017 /* workaround for gcc bug #37014 */
2122 volatile int tmp = abs(cur_vit_mask - bin); 2018 volatile int tmp_v = abs(cur_vit_mask - bin);
2123 2019
2124 if (tmp < 75) 2020 if (tmp_v < 75)
2125 mask_amt = 1; 2021 mask_amt = 1;
2126 else 2022 else
2127 mask_amt = 0; 2023 mask_amt = 0;
@@ -2222,58 +2118,47 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *cha
2222 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 2118 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2223} 2119}
2224 2120
2225bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan, 2121int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2226 enum ath9k_ht_macmode macmode, 2122 bool bChannelChange)
2227 u8 txchainmask, u8 rxchainmask,
2228 enum ath9k_ht_extprotspacing extprotspacing,
2229 bool bChannelChange, int *status)
2230{ 2123{
2231 u32 saveLedState; 2124 u32 saveLedState;
2232 struct ath_hal_5416 *ahp = AH5416(ah); 2125 struct ath_softc *sc = ah->ah_sc;
2233 struct ath9k_channel *curchan = ah->ah_curchan; 2126 struct ath9k_channel *curchan = ah->curchan;
2234 u32 saveDefAntenna; 2127 u32 saveDefAntenna;
2235 u32 macStaId1; 2128 u32 macStaId1;
2236 int ecode; 2129 int i, rx_chainmask, r;
2237 int i, rx_chainmask;
2238
2239 ahp->ah_extprotspacing = extprotspacing;
2240 ahp->ah_txchainmask = txchainmask;
2241 ahp->ah_rxchainmask = rxchainmask;
2242 2130
2243 if (AR_SREV_9280(ah)) { 2131 ah->extprotspacing = sc->ht_extprotspacing;
2244 ahp->ah_txchainmask &= 0x3; 2132 ah->txchainmask = sc->tx_chainmask;
2245 ahp->ah_rxchainmask &= 0x3; 2133 ah->rxchainmask = sc->rx_chainmask;
2246 }
2247 2134
2248 if (ath9k_hw_check_chan(ah, chan) == NULL) { 2135 if (AR_SREV_9285(ah)) {
2249 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 2136 ah->txchainmask &= 0x1;
2250 "invalid channel %u/0x%x; no mapping\n", 2137 ah->rxchainmask &= 0x1;
2251 chan->channel, chan->channelFlags); 2138 } else if (AR_SREV_9280(ah)) {
2252 ecode = -EINVAL; 2139 ah->txchainmask &= 0x3;
2253 goto bad; 2140 ah->rxchainmask &= 0x3;
2254 } 2141 }
2255 2142
2256 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 2143 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
2257 ecode = -EIO; 2144 return -EIO;
2258 goto bad;
2259 }
2260 2145
2261 if (curchan) 2146 if (curchan)
2262 ath9k_hw_getnf(ah, curchan); 2147 ath9k_hw_getnf(ah, curchan);
2263 2148
2264 if (bChannelChange && 2149 if (bChannelChange &&
2265 (ahp->ah_chipFullSleep != true) && 2150 (ah->chip_fullsleep != true) &&
2266 (ah->ah_curchan != NULL) && 2151 (ah->curchan != NULL) &&
2267 (chan->channel != ah->ah_curchan->channel) && 2152 (chan->channel != ah->curchan->channel) &&
2268 ((chan->channelFlags & CHANNEL_ALL) == 2153 ((chan->channelFlags & CHANNEL_ALL) ==
2269 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) && 2154 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
2270 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) && 2155 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
2271 !IS_CHAN_A_5MHZ_SPACED(ah->ah_curchan)))) { 2156 !IS_CHAN_A_5MHZ_SPACED(ah->curchan)))) {
2272 2157
2273 if (ath9k_hw_channel_change(ah, chan, macmode)) { 2158 if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) {
2274 ath9k_hw_loadnf(ah, ah->ah_curchan); 2159 ath9k_hw_loadnf(ah, ah->curchan);
2275 ath9k_hw_start_nfcal(ah); 2160 ath9k_hw_start_nfcal(ah);
2276 return true; 2161 return 0;
2277 } 2162 }
2278 } 2163 }
2279 2164
@@ -2291,28 +2176,32 @@ bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
2291 2176
2292 if (!ath9k_hw_chip_reset(ah, chan)) { 2177 if (!ath9k_hw_chip_reset(ah, chan)) {
2293 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "chip reset failed\n"); 2178 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "chip reset failed\n");
2294 ecode = -EINVAL; 2179 return -EINVAL;
2295 goto bad;
2296 } 2180 }
2297 2181
2298 if (AR_SREV_9280(ah)) { 2182 if (AR_SREV_9280_10_OR_LATER(ah))
2299 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 2183 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
2300 AR_GPIO_JTAG_DISABLE);
2301 2184
2302 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) { 2185 r = ath9k_hw_process_ini(ah, chan, sc->tx_chan_width);
2303 if (IS_CHAN_5GHZ(chan)) 2186 if (r)
2304 ath9k_hw_set_gpio(ah, 9, 0); 2187 return r;
2305 else
2306 ath9k_hw_set_gpio(ah, 9, 1);
2307 }
2308 ath9k_hw_cfg_output(ah, 9, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
2309 }
2310 2188
2311 ecode = ath9k_hw_process_ini(ah, chan, macmode); 2189 /* Setup MFP options for CCMP */
2312 if (ecode != 0) { 2190 if (AR_SREV_9280_20_OR_LATER(ah)) {
2313 ecode = -EINVAL; 2191 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
2314 goto bad; 2192 * frames when constructing CCMP AAD. */
2315 } 2193 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
2194 0xc7ff);
2195 ah->sw_mgmt_crypto = false;
2196 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
2197 /* Disable hardware crypto for management frames */
2198 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
2199 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
2200 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
2201 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
2202 ah->sw_mgmt_crypto = true;
2203 } else
2204 ah->sw_mgmt_crypto = true;
2316 2205
2317 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 2206 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
2318 ath9k_hw_set_delta_slope(ah, chan); 2207 ath9k_hw_set_delta_slope(ah, chan);
@@ -2322,61 +2211,56 @@ bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
2322 else 2211 else
2323 ath9k_hw_spur_mitigate(ah, chan); 2212 ath9k_hw_spur_mitigate(ah, chan);
2324 2213
2325 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) { 2214 if (!ah->eep_ops->set_board_values(ah, chan)) {
2326 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 2215 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2327 "error setting board options\n"); 2216 "error setting board options\n");
2328 ecode = -EIO; 2217 return -EIO;
2329 goto bad;
2330 } 2218 }
2331 2219
2332 ath9k_hw_decrease_chain_power(ah, chan); 2220 ath9k_hw_decrease_chain_power(ah, chan);
2333 2221
2334 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr)); 2222 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ah->macaddr));
2335 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4) 2223 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ah->macaddr + 4)
2336 | macStaId1 2224 | macStaId1
2337 | AR_STA_ID1_RTS_USE_DEF 2225 | AR_STA_ID1_RTS_USE_DEF
2338 | (ah->ah_config. 2226 | (ah->config.
2339 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 2227 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
2340 | ahp->ah_staId1Defaults); 2228 | ah->sta_id1_defaults);
2341 ath9k_hw_set_operating_mode(ah, ah->ah_opmode); 2229 ath9k_hw_set_operating_mode(ah, ah->opmode);
2342 2230
2343 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); 2231 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask));
2344 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4)); 2232 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
2345 2233
2346 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 2234 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
2347 2235
2348 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid)); 2236 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid));
2349 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) | 2237 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
2350 ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S)); 2238 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
2351 2239
2352 REG_WRITE(ah, AR_ISR, ~0); 2240 REG_WRITE(ah, AR_ISR, ~0);
2353 2241
2354 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 2242 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
2355 2243
2356 if (AR_SREV_9280_10_OR_LATER(ah)) { 2244 if (AR_SREV_9280_10_OR_LATER(ah)) {
2357 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) { 2245 if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
2358 ecode = -EIO; 2246 return -EIO;
2359 goto bad;
2360 }
2361 } else { 2247 } else {
2362 if (!(ath9k_hw_set_channel(ah, chan))) { 2248 if (!(ath9k_hw_set_channel(ah, chan)))
2363 ecode = -EIO; 2249 return -EIO;
2364 goto bad;
2365 }
2366 } 2250 }
2367 2251
2368 for (i = 0; i < AR_NUM_DCU; i++) 2252 for (i = 0; i < AR_NUM_DCU; i++)
2369 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 2253 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
2370 2254
2371 ahp->ah_intrTxqs = 0; 2255 ah->intr_txqs = 0;
2372 for (i = 0; i < ah->ah_caps.total_queues; i++) 2256 for (i = 0; i < ah->caps.total_queues; i++)
2373 ath9k_hw_resettxqueue(ah, i); 2257 ath9k_hw_resettxqueue(ah, i);
2374 2258
2375 ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode); 2259 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
2376 ath9k_hw_init_qos(ah); 2260 ath9k_hw_init_qos(ah);
2377 2261
2378#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2262#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2379 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2263 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2380 ath9k_enable_rfkill(ah); 2264 ath9k_enable_rfkill(ah);
2381#endif 2265#endif
2382 ath9k_hw_init_user_settings(ah); 2266 ath9k_hw_init_user_settings(ah);
@@ -2388,7 +2272,7 @@ bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
2388 2272
2389 REG_WRITE(ah, AR_OBS, 8); 2273 REG_WRITE(ah, AR_OBS, 8);
2390 2274
2391 if (ahp->ah_intrMitigation) { 2275 if (ah->intr_mitigation) {
2392 2276
2393 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 2277 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
2394 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 2278 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
@@ -2396,12 +2280,10 @@ bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
2396 2280
2397 ath9k_hw_init_bb(ah, chan); 2281 ath9k_hw_init_bb(ah, chan);
2398 2282
2399 if (!ath9k_hw_init_cal(ah, chan)){ 2283 if (!ath9k_hw_init_cal(ah, chan))
2400 ecode = -EIO;; 2284 return -EIO;;
2401 goto bad;
2402 }
2403 2285
2404 rx_chainmask = ahp->ah_rxchainmask; 2286 rx_chainmask = ah->rxchainmask;
2405 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) { 2287 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
2406 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); 2288 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
2407 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); 2289 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
@@ -2428,22 +2310,18 @@ bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
2428#endif 2310#endif
2429 } 2311 }
2430 2312
2431 return true; 2313 return 0;
2432bad:
2433 if (status)
2434 *status = ecode;
2435 return false;
2436} 2314}
2437 2315
2438/************************/ 2316/************************/
2439/* Key Cache Management */ 2317/* Key Cache Management */
2440/************************/ 2318/************************/
2441 2319
2442bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry) 2320bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
2443{ 2321{
2444 u32 keyType; 2322 u32 keyType;
2445 2323
2446 if (entry >= ah->ah_caps.keycache_size) { 2324 if (entry >= ah->caps.keycache_size) {
2447 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, 2325 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2448 "entry %u out of range\n", entry); 2326 "entry %u out of range\n", entry);
2449 return false; 2327 return false;
@@ -2470,17 +2348,17 @@ bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
2470 2348
2471 } 2349 }
2472 2350
2473 if (ah->ah_curchan == NULL) 2351 if (ah->curchan == NULL)
2474 return true; 2352 return true;
2475 2353
2476 return true; 2354 return true;
2477} 2355}
2478 2356
2479bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, const u8 *mac) 2357bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
2480{ 2358{
2481 u32 macHi, macLo; 2359 u32 macHi, macLo;
2482 2360
2483 if (entry >= ah->ah_caps.keycache_size) { 2361 if (entry >= ah->caps.keycache_size) {
2484 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, 2362 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2485 "entry %u out of range\n", entry); 2363 "entry %u out of range\n", entry);
2486 return false; 2364 return false;
@@ -2504,17 +2382,16 @@ bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, const u8 *mac)
2504 return true; 2382 return true;
2505} 2383}
2506 2384
2507bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry, 2385bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2508 const struct ath9k_keyval *k, 2386 const struct ath9k_keyval *k,
2509 const u8 *mac, int xorKey) 2387 const u8 *mac, int xorKey)
2510{ 2388{
2511 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2389 const struct ath9k_hw_capabilities *pCap = &ah->caps;
2512 u32 key0, key1, key2, key3, key4; 2390 u32 key0, key1, key2, key3, key4;
2513 u32 keyType; 2391 u32 keyType;
2514 u32 xorMask = xorKey ? 2392 u32 xorMask = xorKey ?
2515 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8 2393 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
2516 | ATH9K_KEY_XOR) : 0; 2394 | ATH9K_KEY_XOR) : 0;
2517 struct ath_hal_5416 *ahp = AH5416(ah);
2518 2395
2519 if (entry >= pCap->keycache_size) { 2396 if (entry >= pCap->keycache_size) {
2520 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, 2397 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
@@ -2530,7 +2407,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
2530 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { 2407 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
2531 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, 2408 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2532 "AES-CCM not supported by mac rev 0x%x\n", 2409 "AES-CCM not supported by mac rev 0x%x\n",
2533 ah->ah_macRev); 2410 ah->hw_version.macRev);
2534 return false; 2411 return false;
2535 } 2412 }
2536 keyType = AR_KEYTABLE_TYPE_CCM; 2413 keyType = AR_KEYTABLE_TYPE_CCM;
@@ -2585,7 +2462,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
2585 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); 2462 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
2586 (void) ath9k_hw_keysetmac(ah, entry, mac); 2463 (void) ath9k_hw_keysetmac(ah, entry, mac);
2587 2464
2588 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) { 2465 if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) {
2589 u32 mic0, mic1, mic2, mic3, mic4; 2466 u32 mic0, mic1, mic2, mic3, mic4;
2590 2467
2591 mic0 = get_unaligned_le32(k->kv_mic + 0); 2468 mic0 = get_unaligned_le32(k->kv_mic + 0);
@@ -2629,15 +2506,15 @@ bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
2629 (void) ath9k_hw_keysetmac(ah, entry, mac); 2506 (void) ath9k_hw_keysetmac(ah, entry, mac);
2630 } 2507 }
2631 2508
2632 if (ah->ah_curchan == NULL) 2509 if (ah->curchan == NULL)
2633 return true; 2510 return true;
2634 2511
2635 return true; 2512 return true;
2636} 2513}
2637 2514
2638bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry) 2515bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2639{ 2516{
2640 if (entry < ah->ah_caps.keycache_size) { 2517 if (entry < ah->caps.keycache_size) {
2641 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry)); 2518 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
2642 if (val & AR_KEYTABLE_VALID) 2519 if (val & AR_KEYTABLE_VALID)
2643 return true; 2520 return true;
@@ -2649,7 +2526,7 @@ bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
2649/* Power Management (Chipset) */ 2526/* Power Management (Chipset) */
2650/******************************/ 2527/******************************/
2651 2528
2652static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip) 2529static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2653{ 2530{
2654 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2531 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2655 if (setChip) { 2532 if (setChip) {
@@ -2658,16 +2535,16 @@ static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
2658 if (!AR_SREV_9100(ah)) 2535 if (!AR_SREV_9100(ah))
2659 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2536 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2660 2537
2661 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET), 2538 REG_CLR_BIT(ah, (AR_RTC_RESET),
2662 AR_RTC_RESET_EN); 2539 AR_RTC_RESET_EN);
2663 } 2540 }
2664} 2541}
2665 2542
2666static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip) 2543static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2667{ 2544{
2668 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2545 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2669 if (setChip) { 2546 if (setChip) {
2670 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2547 struct ath9k_hw_capabilities *pCap = &ah->caps;
2671 2548
2672 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2549 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2673 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 2550 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
@@ -2679,8 +2556,7 @@ static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
2679 } 2556 }
2680} 2557}
2681 2558
2682static bool ath9k_hw_set_power_awake(struct ath_hal *ah, 2559static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2683 int setChip)
2684{ 2560{
2685 u32 val; 2561 u32 val;
2686 int i; 2562 int i;
@@ -2721,20 +2597,18 @@ static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
2721 return true; 2597 return true;
2722} 2598}
2723 2599
2724bool ath9k_hw_setpower(struct ath_hal *ah, 2600bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2725 enum ath9k_power_mode mode)
2726{ 2601{
2727 struct ath_hal_5416 *ahp = AH5416(ah); 2602 int status = true, setChip = true;
2728 static const char *modes[] = { 2603 static const char *modes[] = {
2729 "AWAKE", 2604 "AWAKE",
2730 "FULL-SLEEP", 2605 "FULL-SLEEP",
2731 "NETWORK SLEEP", 2606 "NETWORK SLEEP",
2732 "UNDEFINED" 2607 "UNDEFINED"
2733 }; 2608 };
2734 int status = true, setChip = true;
2735 2609
2736 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s -> %s (%s)\n", 2610 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s -> %s (%s)\n",
2737 modes[ahp->ah_powerMode], modes[mode], 2611 modes[ah->power_mode], modes[mode],
2738 setChip ? "set chip " : ""); 2612 setChip ? "set chip " : "");
2739 2613
2740 switch (mode) { 2614 switch (mode) {
@@ -2743,7 +2617,7 @@ bool ath9k_hw_setpower(struct ath_hal *ah,
2743 break; 2617 break;
2744 case ATH9K_PM_FULL_SLEEP: 2618 case ATH9K_PM_FULL_SLEEP:
2745 ath9k_set_power_sleep(ah, setChip); 2619 ath9k_set_power_sleep(ah, setChip);
2746 ahp->ah_chipFullSleep = true; 2620 ah->chip_fullsleep = true;
2747 break; 2621 break;
2748 case ATH9K_PM_NETWORK_SLEEP: 2622 case ATH9K_PM_NETWORK_SLEEP:
2749 ath9k_set_power_network_sleep(ah, setChip); 2623 ath9k_set_power_network_sleep(ah, setChip);
@@ -2753,41 +2627,57 @@ bool ath9k_hw_setpower(struct ath_hal *ah,
2753 "Unknown power mode %u\n", mode); 2627 "Unknown power mode %u\n", mode);
2754 return false; 2628 return false;
2755 } 2629 }
2756 ahp->ah_powerMode = mode; 2630 ah->power_mode = mode;
2757 2631
2758 return status; 2632 return status;
2759} 2633}
2760 2634
2761void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore) 2635/*
2636 * Helper for ASPM support.
2637 *
2638 * Disable PLL when in L0s as well as receiver clock when in L1.
2639 * This power saving option must be enabled through the SerDes.
2640 *
2641 * Programming the SerDes must go through the same 288 bit serial shift
2642 * register as the other analog registers. Hence the 9 writes.
2643 */
2644void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
2762{ 2645{
2763 struct ath_hal_5416 *ahp = AH5416(ah);
2764 u8 i; 2646 u8 i;
2765 2647
2766 if (ah->ah_isPciExpress != true) 2648 if (ah->is_pciexpress != true)
2767 return; 2649 return;
2768 2650
2769 if (ah->ah_config.pcie_powersave_enable == 2) 2651 /* Do not touch SerDes registers */
2652 if (ah->config.pcie_powersave_enable == 2)
2770 return; 2653 return;
2771 2654
2655 /* Nothing to do on restore for 11N */
2772 if (restore) 2656 if (restore)
2773 return; 2657 return;
2774 2658
2775 if (AR_SREV_9280_20_OR_LATER(ah)) { 2659 if (AR_SREV_9280_20_OR_LATER(ah)) {
2776 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) { 2660 /*
2777 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0), 2661 * AR9280 2.0 or later chips use SerDes values from the
2778 INI_RA(&ahp->ah_iniPcieSerdes, i, 1)); 2662 * initvals.h initialized depending on chipset during
2663 * ath9k_hw_do_attach()
2664 */
2665 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
2666 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
2667 INI_RA(&ah->iniPcieSerdes, i, 1));
2779 } 2668 }
2780 udelay(1000);
2781 } else if (AR_SREV_9280(ah) && 2669 } else if (AR_SREV_9280(ah) &&
2782 (ah->ah_macRev == AR_SREV_REVISION_9280_10)) { 2670 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
2783 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00); 2671 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
2784 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 2672 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2785 2673
2674 /* RX shut off when elecidle is asserted */
2786 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); 2675 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
2787 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); 2676 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
2788 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560); 2677 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
2789 2678
2790 if (ah->ah_config.pcie_clock_req) 2679 /* Shut off CLKREQ active in L1 */
2680 if (ah->config.pcie_clock_req)
2791 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); 2681 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
2792 else 2682 else
2793 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd); 2683 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
@@ -2796,42 +2686,59 @@ void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
2796 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); 2686 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2797 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); 2687 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
2798 2688
2689 /* Load the new settings */
2799 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 2690 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2800 2691
2801 udelay(1000);
2802 } else { 2692 } else {
2803 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 2693 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
2804 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 2694 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2695
2696 /* RX shut off when elecidle is asserted */
2805 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); 2697 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
2806 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); 2698 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
2807 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579); 2699 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
2700
2701 /*
2702 * Ignore ah->ah_config.pcie_clock_req setting for
2703 * pre-AR9280 11n
2704 */
2808 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff); 2705 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
2706
2809 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); 2707 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2810 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); 2708 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2811 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007); 2709 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
2710
2711 /* Load the new settings */
2812 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 2712 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2813 } 2713 }
2814 2714
2715 udelay(1000);
2716
2717 /* set bit 19 to allow forcing of pcie core into L1 state */
2815 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); 2718 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
2816 2719
2817 if (ah->ah_config.pcie_waen) { 2720 /* Several PCIe massages to ensure proper behaviour */
2818 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen); 2721 if (ah->config.pcie_waen) {
2722 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
2819 } else { 2723 } else {
2820 if (AR_SREV_9285(ah)) 2724 if (AR_SREV_9285(ah))
2821 REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT); 2725 REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT);
2726 /*
2727 * On AR9280 chips bit 22 of 0x4004 needs to be set to
2728 * otherwise card may disappear.
2729 */
2822 else if (AR_SREV_9280(ah)) 2730 else if (AR_SREV_9280(ah))
2823 REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT); 2731 REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT);
2824 else 2732 else
2825 REG_WRITE(ah, AR_WA, AR_WA_DEFAULT); 2733 REG_WRITE(ah, AR_WA, AR_WA_DEFAULT);
2826 } 2734 }
2827
2828} 2735}
2829 2736
2830/**********************/ 2737/**********************/
2831/* Interrupt Handling */ 2738/* Interrupt Handling */
2832/**********************/ 2739/**********************/
2833 2740
2834bool ath9k_hw_intrpend(struct ath_hal *ah) 2741bool ath9k_hw_intrpend(struct ath_hw *ah)
2835{ 2742{
2836 u32 host_isr; 2743 u32 host_isr;
2837 2744
@@ -2850,14 +2757,13 @@ bool ath9k_hw_intrpend(struct ath_hal *ah)
2850 return false; 2757 return false;
2851} 2758}
2852 2759
2853bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked) 2760bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2854{ 2761{
2855 u32 isr = 0; 2762 u32 isr = 0;
2856 u32 mask2 = 0; 2763 u32 mask2 = 0;
2857 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2764 struct ath9k_hw_capabilities *pCap = &ah->caps;
2858 u32 sync_cause = 0; 2765 u32 sync_cause = 0;
2859 bool fatal_int = false; 2766 bool fatal_int = false;
2860 struct ath_hal_5416 *ahp = AH5416(ah);
2861 2767
2862 if (!AR_SREV_9100(ah)) { 2768 if (!AR_SREV_9100(ah)) {
2863 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { 2769 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
@@ -2905,7 +2811,7 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
2905 2811
2906 *masked = isr & ATH9K_INT_COMMON; 2812 *masked = isr & ATH9K_INT_COMMON;
2907 2813
2908 if (ahp->ah_intrMitigation) { 2814 if (ah->intr_mitigation) {
2909 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 2815 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2910 *masked |= ATH9K_INT_RX; 2816 *masked |= ATH9K_INT_RX;
2911 } 2817 }
@@ -2920,12 +2826,12 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
2920 *masked |= ATH9K_INT_TX; 2826 *masked |= ATH9K_INT_TX;
2921 2827
2922 s0_s = REG_READ(ah, AR_ISR_S0_S); 2828 s0_s = REG_READ(ah, AR_ISR_S0_S);
2923 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 2829 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
2924 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 2830 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
2925 2831
2926 s1_s = REG_READ(ah, AR_ISR_S1_S); 2832 s1_s = REG_READ(ah, AR_ISR_S1_S);
2927 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 2833 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
2928 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 2834 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
2929 } 2835 }
2930 2836
2931 if (isr & AR_ISR_RXORN) { 2837 if (isr & AR_ISR_RXORN) {
@@ -2982,17 +2888,16 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
2982 return true; 2888 return true;
2983} 2889}
2984 2890
2985enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah) 2891enum ath9k_int ath9k_hw_intrget(struct ath_hw *ah)
2986{ 2892{
2987 return AH5416(ah)->ah_maskReg; 2893 return ah->mask_reg;
2988} 2894}
2989 2895
2990enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints) 2896enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2991{ 2897{
2992 struct ath_hal_5416 *ahp = AH5416(ah); 2898 u32 omask = ah->mask_reg;
2993 u32 omask = ahp->ah_maskReg;
2994 u32 mask, mask2; 2899 u32 mask, mask2;
2995 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2900 struct ath9k_hw_capabilities *pCap = &ah->caps;
2996 2901
2997 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 2902 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
2998 2903
@@ -3013,18 +2918,18 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
3013 mask2 = 0; 2918 mask2 = 0;
3014 2919
3015 if (ints & ATH9K_INT_TX) { 2920 if (ints & ATH9K_INT_TX) {
3016 if (ahp->ah_txOkInterruptMask) 2921 if (ah->txok_interrupt_mask)
3017 mask |= AR_IMR_TXOK; 2922 mask |= AR_IMR_TXOK;
3018 if (ahp->ah_txDescInterruptMask) 2923 if (ah->txdesc_interrupt_mask)
3019 mask |= AR_IMR_TXDESC; 2924 mask |= AR_IMR_TXDESC;
3020 if (ahp->ah_txErrInterruptMask) 2925 if (ah->txerr_interrupt_mask)
3021 mask |= AR_IMR_TXERR; 2926 mask |= AR_IMR_TXERR;
3022 if (ahp->ah_txEolInterruptMask) 2927 if (ah->txeol_interrupt_mask)
3023 mask |= AR_IMR_TXEOL; 2928 mask |= AR_IMR_TXEOL;
3024 } 2929 }
3025 if (ints & ATH9K_INT_RX) { 2930 if (ints & ATH9K_INT_RX) {
3026 mask |= AR_IMR_RXERR; 2931 mask |= AR_IMR_RXERR;
3027 if (ahp->ah_intrMitigation) 2932 if (ah->intr_mitigation)
3028 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 2933 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
3029 else 2934 else
3030 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 2935 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3062,7 +2967,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
3062 AR_IMR_S2_TSFOOR | 2967 AR_IMR_S2_TSFOOR |
3063 AR_IMR_S2_GTT | AR_IMR_S2_CST); 2968 AR_IMR_S2_GTT | AR_IMR_S2_CST);
3064 REG_WRITE(ah, AR_IMR_S2, mask | mask2); 2969 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
3065 ahp->ah_maskReg = ints; 2970 ah->mask_reg = ints;
3066 2971
3067 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2972 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
3068 if (ints & ATH9K_INT_TIM_TIMER) 2973 if (ints & ATH9K_INT_TIM_TIMER)
@@ -3096,14 +3001,13 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
3096/* Beacon Handling */ 3001/* Beacon Handling */
3097/*******************/ 3002/*******************/
3098 3003
3099void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period) 3004void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3100{ 3005{
3101 struct ath_hal_5416 *ahp = AH5416(ah);
3102 int flags = 0; 3006 int flags = 0;
3103 3007
3104 ahp->ah_beaconInterval = beacon_period; 3008 ah->beacon_interval = beacon_period;
3105 3009
3106 switch (ah->ah_opmode) { 3010 switch (ah->opmode) {
3107 case NL80211_IFTYPE_STATION: 3011 case NL80211_IFTYPE_STATION:
3108 case NL80211_IFTYPE_MONITOR: 3012 case NL80211_IFTYPE_MONITOR:
3109 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 3013 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
@@ -3116,18 +3020,18 @@ void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period)
3116 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 3020 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
3117 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 3021 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
3118 TU_TO_USEC(next_beacon + 3022 TU_TO_USEC(next_beacon +
3119 (ahp->ah_atimWindow ? ahp-> 3023 (ah->atim_window ? ah->
3120 ah_atimWindow : 1))); 3024 atim_window : 1)));
3121 flags |= AR_NDP_TIMER_EN; 3025 flags |= AR_NDP_TIMER_EN;
3122 case NL80211_IFTYPE_AP: 3026 case NL80211_IFTYPE_AP:
3123 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 3027 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
3124 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 3028 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
3125 TU_TO_USEC(next_beacon - 3029 TU_TO_USEC(next_beacon -
3126 ah->ah_config. 3030 ah->config.
3127 dma_beacon_response_time)); 3031 dma_beacon_response_time));
3128 REG_WRITE(ah, AR_NEXT_SWBA, 3032 REG_WRITE(ah, AR_NEXT_SWBA,
3129 TU_TO_USEC(next_beacon - 3033 TU_TO_USEC(next_beacon -
3130 ah->ah_config. 3034 ah->config.
3131 sw_beacon_response_time)); 3035 sw_beacon_response_time));
3132 flags |= 3036 flags |=
3133 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 3037 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
@@ -3135,7 +3039,7 @@ void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period)
3135 default: 3039 default:
3136 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, 3040 DPRINTF(ah->ah_sc, ATH_DBG_BEACON,
3137 "%s: unsupported opmode: %d\n", 3041 "%s: unsupported opmode: %d\n",
3138 __func__, ah->ah_opmode); 3042 __func__, ah->opmode);
3139 return; 3043 return;
3140 break; 3044 break;
3141 } 3045 }
@@ -3154,11 +3058,11 @@ void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period)
3154 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 3058 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
3155} 3059}
3156 3060
3157void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah, 3061void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3158 const struct ath9k_beacon_state *bs) 3062 const struct ath9k_beacon_state *bs)
3159{ 3063{
3160 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; 3064 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
3161 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3065 struct ath9k_hw_capabilities *pCap = &ah->caps;
3162 3066
3163 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 3067 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
3164 3068
@@ -3218,37 +3122,37 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
3218/* HW Capabilities */ 3122/* HW Capabilities */
3219/*******************/ 3123/*******************/
3220 3124
3221bool ath9k_hw_fill_cap_info(struct ath_hal *ah) 3125bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
3222{ 3126{
3223 struct ath_hal_5416 *ahp = AH5416(ah); 3127 struct ath9k_hw_capabilities *pCap = &ah->caps;
3224 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3225 u16 capField = 0, eeval; 3128 u16 capField = 0, eeval;
3226 3129
3227 eeval = ath9k_hw_get_eeprom(ah, EEP_REG_0); 3130 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
3228 3131
3229 ah->ah_currentRD = eeval; 3132 ah->regulatory.current_rd = eeval;
3230 3133
3231 eeval = ath9k_hw_get_eeprom(ah, EEP_REG_1); 3134 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1);
3232 ah->ah_currentRDExt = eeval; 3135 ah->regulatory.current_rd_ext = eeval;
3233 3136
3234 capField = ath9k_hw_get_eeprom(ah, EEP_OP_CAP); 3137 capField = ah->eep_ops->get_eeprom(ah, EEP_OP_CAP);
3235 3138
3236 if (ah->ah_opmode != NL80211_IFTYPE_AP && 3139 if (ah->opmode != NL80211_IFTYPE_AP &&
3237 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) { 3140 ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
3238 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65) 3141 if (ah->regulatory.current_rd == 0x64 ||
3239 ah->ah_currentRD += 5; 3142 ah->regulatory.current_rd == 0x65)
3240 else if (ah->ah_currentRD == 0x41) 3143 ah->regulatory.current_rd += 5;
3241 ah->ah_currentRD = 0x43; 3144 else if (ah->regulatory.current_rd == 0x41)
3145 ah->regulatory.current_rd = 0x43;
3242 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 3146 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
3243 "regdomain mapped to 0x%x\n", ah->ah_currentRD); 3147 "regdomain mapped to 0x%x\n", ah->regulatory.current_rd);
3244 } 3148 }
3245 3149
3246 eeval = ath9k_hw_get_eeprom(ah, EEP_OP_MODE); 3150 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
3247 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX); 3151 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
3248 3152
3249 if (eeval & AR5416_OPFLAGS_11A) { 3153 if (eeval & AR5416_OPFLAGS_11A) {
3250 set_bit(ATH9K_MODE_11A, pCap->wireless_modes); 3154 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
3251 if (ah->ah_config.ht_enable) { 3155 if (ah->config.ht_enable) {
3252 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20)) 3156 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
3253 set_bit(ATH9K_MODE_11NA_HT20, 3157 set_bit(ATH9K_MODE_11NA_HT20,
3254 pCap->wireless_modes); 3158 pCap->wireless_modes);
@@ -3264,7 +3168,7 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3264 if (eeval & AR5416_OPFLAGS_11G) { 3168 if (eeval & AR5416_OPFLAGS_11G) {
3265 set_bit(ATH9K_MODE_11B, pCap->wireless_modes); 3169 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
3266 set_bit(ATH9K_MODE_11G, pCap->wireless_modes); 3170 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
3267 if (ah->ah_config.ht_enable) { 3171 if (ah->config.ht_enable) {
3268 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20)) 3172 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
3269 set_bit(ATH9K_MODE_11NG_HT20, 3173 set_bit(ATH9K_MODE_11NG_HT20,
3270 pCap->wireless_modes); 3174 pCap->wireless_modes);
@@ -3277,18 +3181,18 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3277 } 3181 }
3278 } 3182 }
3279 3183
3280 pCap->tx_chainmask = ath9k_hw_get_eeprom(ah, EEP_TX_MASK); 3184 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
3281 if ((ah->ah_isPciExpress) 3185 if ((ah->is_pciexpress)
3282 || (eeval & AR5416_OPFLAGS_11A)) { 3186 || (eeval & AR5416_OPFLAGS_11A)) {
3283 pCap->rx_chainmask = 3187 pCap->rx_chainmask =
3284 ath9k_hw_get_eeprom(ah, EEP_RX_MASK); 3188 ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
3285 } else { 3189 } else {
3286 pCap->rx_chainmask = 3190 pCap->rx_chainmask =
3287 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7; 3191 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
3288 } 3192 }
3289 3193
3290 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0))) 3194 if (!(AR_SREV_9280(ah) && (ah->hw_version.macRev == 0)))
3291 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA; 3195 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
3292 3196
3293 pCap->low_2ghz_chan = 2312; 3197 pCap->low_2ghz_chan = 2312;
3294 pCap->high_2ghz_chan = 2732; 3198 pCap->high_2ghz_chan = 2732;
@@ -3306,7 +3210,7 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3306 3210
3307 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD; 3211 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
3308 3212
3309 if (ah->ah_config.ht_enable) 3213 if (ah->config.ht_enable)
3310 pCap->hw_caps |= ATH9K_HW_CAP_HT; 3214 pCap->hw_caps |= ATH9K_HW_CAP_HT;
3311 else 3215 else
3312 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 3216 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
@@ -3332,7 +3236,9 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3332 pCap->num_mr_retries = 4; 3236 pCap->num_mr_retries = 4;
3333 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; 3237 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3334 3238
3335 if (AR_SREV_9280_10_OR_LATER(ah)) 3239 if (AR_SREV_9285_10_OR_LATER(ah))
3240 pCap->num_gpio_pins = AR9285_NUM_GPIO;
3241 else if (AR_SREV_9280_10_OR_LATER(ah))
3336 pCap->num_gpio_pins = AR928X_NUM_GPIO; 3242 pCap->num_gpio_pins = AR928X_NUM_GPIO;
3337 else 3243 else
3338 pCap->num_gpio_pins = AR_NUM_GPIO; 3244 pCap->num_gpio_pins = AR_NUM_GPIO;
@@ -3355,22 +3261,22 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3355 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; 3261 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3356 3262
3357#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 3263#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
3358 ah->ah_rfsilent = ath9k_hw_get_eeprom(ah, EEP_RF_SILENT); 3264 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
3359 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) { 3265 if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
3360 ah->ah_rfkill_gpio = 3266 ah->rfkill_gpio =
3361 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL); 3267 MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL);
3362 ah->ah_rfkill_polarity = 3268 ah->rfkill_polarity =
3363 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY); 3269 MS(ah->rfsilent, EEP_RFSILENT_POLARITY);
3364 3270
3365 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 3271 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3366 } 3272 }
3367#endif 3273#endif
3368 3274
3369 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) || 3275 if ((ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) ||
3370 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) || 3276 (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ||
3371 (ah->ah_macVersion == AR_SREV_VERSION_9160) || 3277 (ah->hw_version.macVersion == AR_SREV_VERSION_9160) ||
3372 (ah->ah_macVersion == AR_SREV_VERSION_9100) || 3278 (ah->hw_version.macVersion == AR_SREV_VERSION_9100) ||
3373 (ah->ah_macVersion == AR_SREV_VERSION_9280)) 3279 (ah->hw_version.macVersion == AR_SREV_VERSION_9280))
3374 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; 3280 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3375 else 3281 else
3376 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; 3282 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
@@ -3380,7 +3286,7 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3380 else 3286 else
3381 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; 3287 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
3382 3288
3383 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) { 3289 if (ah->regulatory.current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) {
3384 pCap->reg_cap = 3290 pCap->reg_cap =
3385 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | 3291 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3386 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN | 3292 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
@@ -3395,18 +3301,23 @@ bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3395 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; 3301 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3396 3302
3397 pCap->num_antcfg_5ghz = 3303 pCap->num_antcfg_5ghz =
3398 ath9k_hw_get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ); 3304 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
3399 pCap->num_antcfg_2ghz = 3305 pCap->num_antcfg_2ghz =
3400 ath9k_hw_get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); 3306 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
3307
3308 if (AR_SREV_9280_10_OR_LATER(ah) && btcoex_enable) {
3309 pCap->hw_caps |= ATH9K_HW_CAP_BT_COEX;
3310 ah->btactive_gpio = 6;
3311 ah->wlanactive_gpio = 5;
3312 }
3401 3313
3402 return true; 3314 return true;
3403} 3315}
3404 3316
3405bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type, 3317bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3406 u32 capability, u32 *result) 3318 u32 capability, u32 *result)
3407{ 3319{
3408 struct ath_hal_5416 *ahp = AH5416(ah); 3320 const struct ath9k_hw_capabilities *pCap = &ah->caps;
3409 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3410 3321
3411 switch (type) { 3322 switch (type) {
3412 case ATH9K_CAP_CIPHER: 3323 case ATH9K_CAP_CIPHER:
@@ -3426,17 +3337,17 @@ bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3426 case 0: 3337 case 0:
3427 return true; 3338 return true;
3428 case 1: 3339 case 1:
3429 return (ahp->ah_staId1Defaults & 3340 return (ah->sta_id1_defaults &
3430 AR_STA_ID1_CRPT_MIC_ENABLE) ? true : 3341 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
3431 false; 3342 false;
3432 } 3343 }
3433 case ATH9K_CAP_TKIP_SPLIT: 3344 case ATH9K_CAP_TKIP_SPLIT:
3434 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ? 3345 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
3435 false : true; 3346 false : true;
3436 case ATH9K_CAP_WME_TKIPMIC: 3347 case ATH9K_CAP_WME_TKIPMIC:
3437 return 0; 3348 return 0;
3438 case ATH9K_CAP_PHYCOUNTERS: 3349 case ATH9K_CAP_PHYCOUNTERS:
3439 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO; 3350 return ah->has_hw_phycounters ? 0 : -ENXIO;
3440 case ATH9K_CAP_DIVERSITY: 3351 case ATH9K_CAP_DIVERSITY:
3441 return (REG_READ(ah, AR_PHY_CCK_DETECT) & 3352 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
3442 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ? 3353 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
@@ -3451,14 +3362,14 @@ bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3451 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) { 3362 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
3452 return false; 3363 return false;
3453 } else { 3364 } else {
3454 return (ahp->ah_staId1Defaults & 3365 return (ah->sta_id1_defaults &
3455 AR_STA_ID1_MCAST_KSRCH) ? true : 3366 AR_STA_ID1_MCAST_KSRCH) ? true :
3456 false; 3367 false;
3457 } 3368 }
3458 } 3369 }
3459 return false; 3370 return false;
3460 case ATH9K_CAP_TSF_ADJUST: 3371 case ATH9K_CAP_TSF_ADJUST:
3461 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ? 3372 return (ah->misc_mode & AR_PCU_TX_ADD_TSF) ?
3462 true : false; 3373 true : false;
3463 case ATH9K_CAP_RFSILENT: 3374 case ATH9K_CAP_RFSILENT:
3464 if (capability == 3) 3375 if (capability == 3)
@@ -3474,13 +3385,13 @@ bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3474 case 0: 3385 case 0:
3475 return 0; 3386 return 0;
3476 case 1: 3387 case 1:
3477 *result = ah->ah_powerLimit; 3388 *result = ah->regulatory.power_limit;
3478 return 0; 3389 return 0;
3479 case 2: 3390 case 2:
3480 *result = ah->ah_maxPowerLevel; 3391 *result = ah->regulatory.max_power_level;
3481 return 0; 3392 return 0;
3482 case 3: 3393 case 3:
3483 *result = ah->ah_tpScale; 3394 *result = ah->regulatory.tp_scale;
3484 return 0; 3395 return 0;
3485 } 3396 }
3486 return false; 3397 return false;
@@ -3489,19 +3400,18 @@ bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3489 } 3400 }
3490} 3401}
3491 3402
3492bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type, 3403bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3493 u32 capability, u32 setting, int *status) 3404 u32 capability, u32 setting, int *status)
3494{ 3405{
3495 struct ath_hal_5416 *ahp = AH5416(ah);
3496 u32 v; 3406 u32 v;
3497 3407
3498 switch (type) { 3408 switch (type) {
3499 case ATH9K_CAP_TKIP_MIC: 3409 case ATH9K_CAP_TKIP_MIC:
3500 if (setting) 3410 if (setting)
3501 ahp->ah_staId1Defaults |= 3411 ah->sta_id1_defaults |=
3502 AR_STA_ID1_CRPT_MIC_ENABLE; 3412 AR_STA_ID1_CRPT_MIC_ENABLE;
3503 else 3413 else
3504 ahp->ah_staId1Defaults &= 3414 ah->sta_id1_defaults &=
3505 ~AR_STA_ID1_CRPT_MIC_ENABLE; 3415 ~AR_STA_ID1_CRPT_MIC_ENABLE;
3506 return true; 3416 return true;
3507 case ATH9K_CAP_DIVERSITY: 3417 case ATH9K_CAP_DIVERSITY:
@@ -3514,15 +3424,15 @@ bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3514 return true; 3424 return true;
3515 case ATH9K_CAP_MCAST_KEYSRCH: 3425 case ATH9K_CAP_MCAST_KEYSRCH:
3516 if (setting) 3426 if (setting)
3517 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH; 3427 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
3518 else 3428 else
3519 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH; 3429 ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
3520 return true; 3430 return true;
3521 case ATH9K_CAP_TSF_ADJUST: 3431 case ATH9K_CAP_TSF_ADJUST:
3522 if (setting) 3432 if (setting)
3523 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF; 3433 ah->misc_mode |= AR_PCU_TX_ADD_TSF;
3524 else 3434 else
3525 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF; 3435 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
3526 return true; 3436 return true;
3527 default: 3437 default:
3528 return false; 3438 return false;
@@ -3533,7 +3443,7 @@ bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3533/* GPIO / RFKILL / Antennae */ 3443/* GPIO / RFKILL / Antennae */
3534/****************************/ 3444/****************************/
3535 3445
3536static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah, 3446static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
3537 u32 gpio, u32 type) 3447 u32 gpio, u32 type)
3538{ 3448{
3539 int addr; 3449 int addr;
@@ -3561,11 +3471,11 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
3561 } 3471 }
3562} 3472}
3563 3473
3564void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio) 3474void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3565{ 3475{
3566 u32 gpio_shift; 3476 u32 gpio_shift;
3567 3477
3568 ASSERT(gpio < ah->ah_caps.num_gpio_pins); 3478 ASSERT(gpio < ah->caps.num_gpio_pins);
3569 3479
3570 gpio_shift = gpio << 1; 3480 gpio_shift = gpio << 1;
3571 3481
@@ -3575,22 +3485,23 @@ void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
3575 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3485 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3576} 3486}
3577 3487
3578u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio) 3488u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3579{ 3489{
3580 if (gpio >= ah->ah_caps.num_gpio_pins) 3490#define MS_REG_READ(x, y) \
3491 (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
3492
3493 if (gpio >= ah->caps.num_gpio_pins)
3581 return 0xffffffff; 3494 return 0xffffffff;
3582 3495
3583 if (AR_SREV_9280_10_OR_LATER(ah)) { 3496 if (AR_SREV_9285_10_OR_LATER(ah))
3584 return (MS 3497 return MS_REG_READ(AR9285, gpio) != 0;
3585 (REG_READ(ah, AR_GPIO_IN_OUT), 3498 else if (AR_SREV_9280_10_OR_LATER(ah))
3586 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; 3499 return MS_REG_READ(AR928X, gpio) != 0;
3587 } else { 3500 else
3588 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) & 3501 return MS_REG_READ(AR, gpio) != 0;
3589 AR_GPIO_BIT(gpio)) != 0;
3590 }
3591} 3502}
3592 3503
3593void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio, 3504void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3594 u32 ah_signal_type) 3505 u32 ah_signal_type)
3595{ 3506{
3596 u32 gpio_shift; 3507 u32 gpio_shift;
@@ -3605,14 +3516,14 @@ void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
3605 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3516 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3606} 3517}
3607 3518
3608void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val) 3519void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3609{ 3520{
3610 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 3521 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3611 AR_GPIO_BIT(gpio)); 3522 AR_GPIO_BIT(gpio));
3612} 3523}
3613 3524
3614#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 3525#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
3615void ath9k_enable_rfkill(struct ath_hal *ah) 3526void ath9k_enable_rfkill(struct ath_hw *ah)
3616{ 3527{
3617 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 3528 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
3618 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); 3529 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
@@ -3620,50 +3531,28 @@ void ath9k_enable_rfkill(struct ath_hal *ah)
3620 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, 3531 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
3621 AR_GPIO_INPUT_MUX2_RFSILENT); 3532 AR_GPIO_INPUT_MUX2_RFSILENT);
3622 3533
3623 ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio); 3534 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
3624 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); 3535 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
3625} 3536}
3626#endif 3537#endif
3627 3538
3628int ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg) 3539u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
3629{
3630 struct ath9k_channel *chan = ah->ah_curchan;
3631 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3632 u16 ant_config;
3633 u32 halNumAntConfig;
3634
3635 halNumAntConfig = IS_CHAN_2GHZ(chan) ?
3636 pCap->num_antcfg_2ghz : pCap->num_antcfg_5ghz;
3637
3638 if (cfg < halNumAntConfig) {
3639 if (!ath9k_hw_get_eeprom_antenna_cfg(ah, chan,
3640 cfg, &ant_config)) {
3641 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
3642 return 0;
3643 }
3644 }
3645
3646 return -EINVAL;
3647}
3648
3649u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
3650{ 3540{
3651 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; 3541 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
3652} 3542}
3653 3543
3654void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna) 3544void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
3655{ 3545{
3656 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); 3546 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
3657} 3547}
3658 3548
3659bool ath9k_hw_setantennaswitch(struct ath_hal *ah, 3549bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
3660 enum ath9k_ant_setting settings, 3550 enum ath9k_ant_setting settings,
3661 struct ath9k_channel *chan, 3551 struct ath9k_channel *chan,
3662 u8 *tx_chainmask, 3552 u8 *tx_chainmask,
3663 u8 *rx_chainmask, 3553 u8 *rx_chainmask,
3664 u8 *antenna_cfgd) 3554 u8 *antenna_cfgd)
3665{ 3555{
3666 struct ath_hal_5416 *ahp = AH5416(ah);
3667 static u8 tx_chainmask_cfg, rx_chainmask_cfg; 3556 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
3668 3557
3669 if (AR_SREV_9280(ah)) { 3558 if (AR_SREV_9280(ah)) {
@@ -3680,7 +3569,7 @@ bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
3680 *antenna_cfgd = true; 3569 *antenna_cfgd = true;
3681 break; 3570 break;
3682 case ATH9K_ANT_FIXED_B: 3571 case ATH9K_ANT_FIXED_B:
3683 if (ah->ah_caps.tx_chainmask > 3572 if (ah->caps.tx_chainmask >
3684 ATH9K_ANTENNA1_CHAINMASK) { 3573 ATH9K_ANTENNA1_CHAINMASK) {
3685 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK; 3574 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
3686 } 3575 }
@@ -3696,7 +3585,7 @@ bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
3696 break; 3585 break;
3697 } 3586 }
3698 } else { 3587 } else {
3699 ahp->ah_diversityControl = settings; 3588 ah->diversity_control = settings;
3700 } 3589 }
3701 3590
3702 return true; 3591 return true;
@@ -3706,7 +3595,7 @@ bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
3706/* General Operation */ 3595/* General Operation */
3707/*********************/ 3596/*********************/
3708 3597
3709u32 ath9k_hw_getrxfilter(struct ath_hal *ah) 3598u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
3710{ 3599{
3711 u32 bits = REG_READ(ah, AR_RX_FILTER); 3600 u32 bits = REG_READ(ah, AR_RX_FILTER);
3712 u32 phybits = REG_READ(ah, AR_PHY_ERR); 3601 u32 phybits = REG_READ(ah, AR_PHY_ERR);
@@ -3719,7 +3608,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
3719 return bits; 3608 return bits;
3720} 3609}
3721 3610
3722void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits) 3611void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3723{ 3612{
3724 u32 phybits; 3613 u32 phybits;
3725 3614
@@ -3739,12 +3628,12 @@ void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
3739 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 3628 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
3740} 3629}
3741 3630
3742bool ath9k_hw_phy_disable(struct ath_hal *ah) 3631bool ath9k_hw_phy_disable(struct ath_hw *ah)
3743{ 3632{
3744 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM); 3633 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
3745} 3634}
3746 3635
3747bool ath9k_hw_disable(struct ath_hal *ah) 3636bool ath9k_hw_disable(struct ath_hw *ah)
3748{ 3637{
3749 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 3638 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
3750 return false; 3639 return false;
@@ -3752,82 +3641,54 @@ bool ath9k_hw_disable(struct ath_hal *ah)
3752 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD); 3641 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
3753} 3642}
3754 3643
3755bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit) 3644bool ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
3756{ 3645{
3757 struct ath9k_channel *chan = ah->ah_curchan; 3646 struct ath9k_channel *chan = ah->curchan;
3647 struct ieee80211_channel *channel = chan->chan;
3758 3648
3759 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER); 3649 ah->regulatory.power_limit = min(limit, (u32) MAX_RATE_POWER);
3760 3650
3761 if (ath9k_hw_set_txpower(ah, chan, 3651 if (ah->eep_ops->set_txpower(ah, chan,
3762 ath9k_regd_get_ctl(ah, chan), 3652 ath9k_regd_get_ctl(ah, chan),
3763 ath9k_regd_get_antenna_allowed(ah, chan), 3653 channel->max_antenna_gain * 2,
3764 chan->maxRegTxPower * 2, 3654 channel->max_power * 2,
3765 min((u32) MAX_RATE_POWER, 3655 min((u32) MAX_RATE_POWER,
3766 (u32) ah->ah_powerLimit)) != 0) 3656 (u32) ah->regulatory.power_limit)) != 0)
3767 return false; 3657 return false;
3768 3658
3769 return true; 3659 return true;
3770} 3660}
3771 3661
3772void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac) 3662void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
3773{ 3663{
3774 struct ath_hal_5416 *ahp = AH5416(ah); 3664 memcpy(ah->macaddr, mac, ETH_ALEN);
3775
3776 memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
3777} 3665}
3778 3666
3779bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac) 3667void ath9k_hw_setopmode(struct ath_hw *ah)
3780{ 3668{
3781 struct ath_hal_5416 *ahp = AH5416(ah); 3669 ath9k_hw_set_operating_mode(ah, ah->opmode);
3782
3783 memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
3784
3785 return true;
3786}
3787
3788void ath9k_hw_setopmode(struct ath_hal *ah)
3789{
3790 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
3791} 3670}
3792 3671
3793void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, u32 filter1) 3672void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
3794{ 3673{
3795 REG_WRITE(ah, AR_MCAST_FIL0, filter0); 3674 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
3796 REG_WRITE(ah, AR_MCAST_FIL1, filter1); 3675 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
3797} 3676}
3798 3677
3799void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask) 3678void ath9k_hw_setbssidmask(struct ath_softc *sc)
3800{ 3679{
3801 struct ath_hal_5416 *ahp = AH5416(ah); 3680 REG_WRITE(sc->sc_ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask));
3802 3681 REG_WRITE(sc->sc_ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
3803 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
3804} 3682}
3805 3683
3806bool ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask) 3684void ath9k_hw_write_associd(struct ath_softc *sc)
3807{ 3685{
3808 struct ath_hal_5416 *ahp = AH5416(ah); 3686 REG_WRITE(sc->sc_ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid));
3809 3687 REG_WRITE(sc->sc_ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
3810 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN); 3688 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
3811
3812 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
3813 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
3814
3815 return true;
3816}
3817
3818void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, u16 assocId)
3819{
3820 struct ath_hal_5416 *ahp = AH5416(ah);
3821
3822 memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
3823 ahp->ah_assocId = assocId;
3824
3825 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
3826 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
3827 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
3828} 3689}
3829 3690
3830u64 ath9k_hw_gettsf64(struct ath_hal *ah) 3691u64 ath9k_hw_gettsf64(struct ath_hw *ah)
3831{ 3692{
3832 u64 tsf; 3693 u64 tsf;
3833 3694
@@ -3837,7 +3698,14 @@ u64 ath9k_hw_gettsf64(struct ath_hal *ah)
3837 return tsf; 3698 return tsf;
3838} 3699}
3839 3700
3840void ath9k_hw_reset_tsf(struct ath_hal *ah) 3701void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
3702{
3703 REG_WRITE(ah, AR_TSF_L32, 0x00000000);
3704 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
3705 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
3706}
3707
3708void ath9k_hw_reset_tsf(struct ath_hw *ah)
3841{ 3709{
3842 int count; 3710 int count;
3843 3711
@@ -3854,42 +3722,65 @@ void ath9k_hw_reset_tsf(struct ath_hal *ah)
3854 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 3722 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
3855} 3723}
3856 3724
3857bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting) 3725bool ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
3858{ 3726{
3859 struct ath_hal_5416 *ahp = AH5416(ah);
3860
3861 if (setting) 3727 if (setting)
3862 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF; 3728 ah->misc_mode |= AR_PCU_TX_ADD_TSF;
3863 else 3729 else
3864 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF; 3730 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
3865 3731
3866 return true; 3732 return true;
3867} 3733}
3868 3734
3869bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us) 3735bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
3870{ 3736{
3871 struct ath_hal_5416 *ahp = AH5416(ah);
3872
3873 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) { 3737 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
3874 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us); 3738 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us);
3875 ahp->ah_slottime = (u32) -1; 3739 ah->slottime = (u32) -1;
3876 return false; 3740 return false;
3877 } else { 3741 } else {
3878 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us)); 3742 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
3879 ahp->ah_slottime = us; 3743 ah->slottime = us;
3880 return true; 3744 return true;
3881 } 3745 }
3882} 3746}
3883 3747
3884void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode) 3748void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode)
3885{ 3749{
3886 u32 macmode; 3750 u32 macmode;
3887 3751
3888 if (mode == ATH9K_HT_MACMODE_2040 && 3752 if (mode == ATH9K_HT_MACMODE_2040 &&
3889 !ah->ah_config.cwm_ignore_extcca) 3753 !ah->config.cwm_ignore_extcca)
3890 macmode = AR_2040_JOINED_RX_CLEAR; 3754 macmode = AR_2040_JOINED_RX_CLEAR;
3891 else 3755 else
3892 macmode = 0; 3756 macmode = 0;
3893 3757
3894 REG_WRITE(ah, AR_2040_MODE, macmode); 3758 REG_WRITE(ah, AR_2040_MODE, macmode);
3895} 3759}
3760
3761/***************************/
3762/* Bluetooth Coexistence */
3763/***************************/
3764
3765void ath9k_hw_btcoex_enable(struct ath_hw *ah)
3766{
3767 /* connect bt_active to baseband */
3768 REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
3769 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
3770 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
3771
3772 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
3773 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
3774
3775 /* Set input mux for bt_active to gpio pin */
3776 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
3777 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
3778 ah->btactive_gpio);
3779
3780 /* Configure the desired gpio port for input */
3781 ath9k_hw_cfg_gpio_input(ah, ah->btactive_gpio);
3782
3783 /* Configure the desired GPIO port for TX_FRAME output */
3784 ath9k_hw_cfg_output(ah, ah->wlanactive_gpio,
3785 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
3786}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index 91d8f594af8..82111636c69 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -19,1062 +19,627 @@
19 19
20#include <linux/if_ether.h> 20#include <linux/if_ether.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/io.h>
23
24#include "mac.h"
25#include "ani.h"
26#include "eeprom.h"
27#include "calib.h"
28#include "regd.h"
29#include "reg.h"
30#include "phy.h"
31
32#define ATHEROS_VENDOR_ID 0x168c
33#define AR5416_DEVID_PCI 0x0023
34#define AR5416_DEVID_PCIE 0x0024
35#define AR9160_DEVID_PCI 0x0027
36#define AR9280_DEVID_PCI 0x0029
37#define AR9280_DEVID_PCIE 0x002a
38#define AR9285_DEVID_PCIE 0x002b
39#define AR5416_AR9100_DEVID 0x000b
40#define AR_SUBVENDOR_ID_NOG 0x0e11
41#define AR_SUBVENDOR_ID_NEW_A 0x7065
42#define AR5416_MAGIC 0x19641014
43
44/* Register read/write primitives */
45#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sc->mem + _reg)
46#define REG_READ(_ah, _reg) ioread32(_ah->ah_sc->mem + _reg)
47
48#define SM(_v, _f) (((_v) << _f##_S) & _f)
49#define MS(_v, _f) (((_v) & _f) >> _f##_S)
50#define REG_RMW(_a, _r, _set, _clr) \
51 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
52#define REG_RMW_FIELD(_a, _r, _f, _v) \
53 REG_WRITE(_a, _r, \
54 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
55#define REG_SET_BIT(_a, _r, _f) \
56 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
57#define REG_CLR_BIT(_a, _r, _f) \
58 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
22 59
23struct ar5416_desc { 60#define DO_DELAY(x) do { \
24 u32 ds_link; 61 if ((++(x) % 64) == 0) \
25 u32 ds_data; 62 udelay(1); \
26 u32 ds_ctl0; 63 } while (0)
27 u32 ds_ctl1;
28 union {
29 struct {
30 u32 ctl2;
31 u32 ctl3;
32 u32 ctl4;
33 u32 ctl5;
34 u32 ctl6;
35 u32 ctl7;
36 u32 ctl8;
37 u32 ctl9;
38 u32 ctl10;
39 u32 ctl11;
40 u32 status0;
41 u32 status1;
42 u32 status2;
43 u32 status3;
44 u32 status4;
45 u32 status5;
46 u32 status6;
47 u32 status7;
48 u32 status8;
49 u32 status9;
50 } tx;
51 struct {
52 u32 status0;
53 u32 status1;
54 u32 status2;
55 u32 status3;
56 u32 status4;
57 u32 status5;
58 u32 status6;
59 u32 status7;
60 u32 status8;
61 } rx;
62 } u;
63} __packed;
64
65#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
66#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
67
68#define ds_ctl2 u.tx.ctl2
69#define ds_ctl3 u.tx.ctl3
70#define ds_ctl4 u.tx.ctl4
71#define ds_ctl5 u.tx.ctl5
72#define ds_ctl6 u.tx.ctl6
73#define ds_ctl7 u.tx.ctl7
74#define ds_ctl8 u.tx.ctl8
75#define ds_ctl9 u.tx.ctl9
76#define ds_ctl10 u.tx.ctl10
77#define ds_ctl11 u.tx.ctl11
78
79#define ds_txstatus0 u.tx.status0
80#define ds_txstatus1 u.tx.status1
81#define ds_txstatus2 u.tx.status2
82#define ds_txstatus3 u.tx.status3
83#define ds_txstatus4 u.tx.status4
84#define ds_txstatus5 u.tx.status5
85#define ds_txstatus6 u.tx.status6
86#define ds_txstatus7 u.tx.status7
87#define ds_txstatus8 u.tx.status8
88#define ds_txstatus9 u.tx.status9
89
90#define ds_rxstatus0 u.rx.status0
91#define ds_rxstatus1 u.rx.status1
92#define ds_rxstatus2 u.rx.status2
93#define ds_rxstatus3 u.rx.status3
94#define ds_rxstatus4 u.rx.status4
95#define ds_rxstatus5 u.rx.status5
96#define ds_rxstatus6 u.rx.status6
97#define ds_rxstatus7 u.rx.status7
98#define ds_rxstatus8 u.rx.status8
99
100#define AR_FrameLen 0x00000fff
101#define AR_VirtMoreFrag 0x00001000
102#define AR_TxCtlRsvd00 0x0000e000
103#define AR_XmitPower 0x003f0000
104#define AR_XmitPower_S 16
105#define AR_RTSEnable 0x00400000
106#define AR_VEOL 0x00800000
107#define AR_ClrDestMask 0x01000000
108#define AR_TxCtlRsvd01 0x1e000000
109#define AR_TxIntrReq 0x20000000
110#define AR_DestIdxValid 0x40000000
111#define AR_CTSEnable 0x80000000
112
113#define AR_BufLen 0x00000fff
114#define AR_TxMore 0x00001000
115#define AR_DestIdx 0x000fe000
116#define AR_DestIdx_S 13
117#define AR_FrameType 0x00f00000
118#define AR_FrameType_S 20
119#define AR_NoAck 0x01000000
120#define AR_InsertTS 0x02000000
121#define AR_CorruptFCS 0x04000000
122#define AR_ExtOnly 0x08000000
123#define AR_ExtAndCtl 0x10000000
124#define AR_MoreAggr 0x20000000
125#define AR_IsAggr 0x40000000
126
127#define AR_BurstDur 0x00007fff
128#define AR_BurstDur_S 0
129#define AR_DurUpdateEna 0x00008000
130#define AR_XmitDataTries0 0x000f0000
131#define AR_XmitDataTries0_S 16
132#define AR_XmitDataTries1 0x00f00000
133#define AR_XmitDataTries1_S 20
134#define AR_XmitDataTries2 0x0f000000
135#define AR_XmitDataTries2_S 24
136#define AR_XmitDataTries3 0xf0000000
137#define AR_XmitDataTries3_S 28
138
139#define AR_XmitRate0 0x000000ff
140#define AR_XmitRate0_S 0
141#define AR_XmitRate1 0x0000ff00
142#define AR_XmitRate1_S 8
143#define AR_XmitRate2 0x00ff0000
144#define AR_XmitRate2_S 16
145#define AR_XmitRate3 0xff000000
146#define AR_XmitRate3_S 24
147
148#define AR_PacketDur0 0x00007fff
149#define AR_PacketDur0_S 0
150#define AR_RTSCTSQual0 0x00008000
151#define AR_PacketDur1 0x7fff0000
152#define AR_PacketDur1_S 16
153#define AR_RTSCTSQual1 0x80000000
154
155#define AR_PacketDur2 0x00007fff
156#define AR_PacketDur2_S 0
157#define AR_RTSCTSQual2 0x00008000
158#define AR_PacketDur3 0x7fff0000
159#define AR_PacketDur3_S 16
160#define AR_RTSCTSQual3 0x80000000
161
162#define AR_AggrLen 0x0000ffff
163#define AR_AggrLen_S 0
164#define AR_TxCtlRsvd60 0x00030000
165#define AR_PadDelim 0x03fc0000
166#define AR_PadDelim_S 18
167#define AR_EncrType 0x0c000000
168#define AR_EncrType_S 26
169#define AR_TxCtlRsvd61 0xf0000000
170
171#define AR_2040_0 0x00000001
172#define AR_GI0 0x00000002
173#define AR_ChainSel0 0x0000001c
174#define AR_ChainSel0_S 2
175#define AR_2040_1 0x00000020
176#define AR_GI1 0x00000040
177#define AR_ChainSel1 0x00000380
178#define AR_ChainSel1_S 7
179#define AR_2040_2 0x00000400
180#define AR_GI2 0x00000800
181#define AR_ChainSel2 0x00007000
182#define AR_ChainSel2_S 12
183#define AR_2040_3 0x00008000
184#define AR_GI3 0x00010000
185#define AR_ChainSel3 0x000e0000
186#define AR_ChainSel3_S 17
187#define AR_RTSCTSRate 0x0ff00000
188#define AR_RTSCTSRate_S 20
189#define AR_TxCtlRsvd70 0xf0000000
190
191#define AR_TxRSSIAnt00 0x000000ff
192#define AR_TxRSSIAnt00_S 0
193#define AR_TxRSSIAnt01 0x0000ff00
194#define AR_TxRSSIAnt01_S 8
195#define AR_TxRSSIAnt02 0x00ff0000
196#define AR_TxRSSIAnt02_S 16
197#define AR_TxStatusRsvd00 0x3f000000
198#define AR_TxBaStatus 0x40000000
199#define AR_TxStatusRsvd01 0x80000000
200
201#define AR_FrmXmitOK 0x00000001
202#define AR_ExcessiveRetries 0x00000002
203#define AR_FIFOUnderrun 0x00000004
204#define AR_Filtered 0x00000008
205#define AR_RTSFailCnt 0x000000f0
206#define AR_RTSFailCnt_S 4
207#define AR_DataFailCnt 0x00000f00
208#define AR_DataFailCnt_S 8
209#define AR_VirtRetryCnt 0x0000f000
210#define AR_VirtRetryCnt_S 12
211#define AR_TxDelimUnderrun 0x00010000
212#define AR_TxDataUnderrun 0x00020000
213#define AR_DescCfgErr 0x00040000
214#define AR_TxTimerExpired 0x00080000
215#define AR_TxStatusRsvd10 0xfff00000
216
217#define AR_SendTimestamp ds_txstatus2
218#define AR_BaBitmapLow ds_txstatus3
219#define AR_BaBitmapHigh ds_txstatus4
220
221#define AR_TxRSSIAnt10 0x000000ff
222#define AR_TxRSSIAnt10_S 0
223#define AR_TxRSSIAnt11 0x0000ff00
224#define AR_TxRSSIAnt11_S 8
225#define AR_TxRSSIAnt12 0x00ff0000
226#define AR_TxRSSIAnt12_S 16
227#define AR_TxRSSICombined 0xff000000
228#define AR_TxRSSICombined_S 24
229
230#define AR_TxEVM0 ds_txstatus5
231#define AR_TxEVM1 ds_txstatus6
232#define AR_TxEVM2 ds_txstatus7
233
234#define AR_TxDone 0x00000001
235#define AR_SeqNum 0x00001ffe
236#define AR_SeqNum_S 1
237#define AR_TxStatusRsvd80 0x0001e000
238#define AR_TxOpExceeded 0x00020000
239#define AR_TxStatusRsvd81 0x001c0000
240#define AR_FinalTxIdx 0x00600000
241#define AR_FinalTxIdx_S 21
242#define AR_TxStatusRsvd82 0x01800000
243#define AR_PowerMgmt 0x02000000
244#define AR_TxStatusRsvd83 0xfc000000
245
246#define AR_RxCTLRsvd00 0xffffffff
247
248#define AR_BufLen 0x00000fff
249#define AR_RxCtlRsvd00 0x00001000
250#define AR_RxIntrReq 0x00002000
251#define AR_RxCtlRsvd01 0xffffc000
252
253#define AR_RxRSSIAnt00 0x000000ff
254#define AR_RxRSSIAnt00_S 0
255#define AR_RxRSSIAnt01 0x0000ff00
256#define AR_RxRSSIAnt01_S 8
257#define AR_RxRSSIAnt02 0x00ff0000
258#define AR_RxRSSIAnt02_S 16
259#define AR_RxRate 0xff000000
260#define AR_RxRate_S 24
261#define AR_RxStatusRsvd00 0xff000000
262
263#define AR_DataLen 0x00000fff
264#define AR_RxMore 0x00001000
265#define AR_NumDelim 0x003fc000
266#define AR_NumDelim_S 14
267#define AR_RxStatusRsvd10 0xff800000
268
269#define AR_RcvTimestamp ds_rxstatus2
270
271#define AR_GI 0x00000001
272#define AR_2040 0x00000002
273#define AR_Parallel40 0x00000004
274#define AR_Parallel40_S 2
275#define AR_RxStatusRsvd30 0x000000f8
276#define AR_RxAntenna 0xffffff00
277#define AR_RxAntenna_S 8
278
279#define AR_RxRSSIAnt10 0x000000ff
280#define AR_RxRSSIAnt10_S 0
281#define AR_RxRSSIAnt11 0x0000ff00
282#define AR_RxRSSIAnt11_S 8
283#define AR_RxRSSIAnt12 0x00ff0000
284#define AR_RxRSSIAnt12_S 16
285#define AR_RxRSSICombined 0xff000000
286#define AR_RxRSSICombined_S 24
287
288#define AR_RxEVM0 ds_rxstatus4
289#define AR_RxEVM1 ds_rxstatus5
290#define AR_RxEVM2 ds_rxstatus6
291
292#define AR_RxDone 0x00000001
293#define AR_RxFrameOK 0x00000002
294#define AR_CRCErr 0x00000004
295#define AR_DecryptCRCErr 0x00000008
296#define AR_PHYErr 0x00000010
297#define AR_MichaelErr 0x00000020
298#define AR_PreDelimCRCErr 0x00000040
299#define AR_RxStatusRsvd70 0x00000080
300#define AR_RxKeyIdxValid 0x00000100
301#define AR_KeyIdx 0x0000fe00
302#define AR_KeyIdx_S 9
303#define AR_PHYErrCode 0x0000ff00
304#define AR_PHYErrCode_S 8
305#define AR_RxMoreAggr 0x00010000
306#define AR_RxAggr 0x00020000
307#define AR_PostDelimCRCErr 0x00040000
308#define AR_RxStatusRsvd71 0x3ff80000
309#define AR_DecryptBusyErr 0x40000000
310#define AR_KeyMiss 0x80000000
311
312#define AR5416_MAGIC 0x19641014
313
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317
318#define set11nTries(_series, _index) \
319 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
320
321#define set11nRate(_series, _index) \
322 (SM((_series)[_index].Rate, AR_XmitRate##_index))
323
324#define set11nPktDurRTSCTS(_series, _index) \
325 (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
326 ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
327 AR_RTSCTSQual##_index : 0))
328 64
329#define set11nRateFlags(_series, _index) \ 65#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
330 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \ 66 int r; \
331 AR_2040_##_index : 0) \ 67 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
332 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ 68 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
333 AR_GI##_index : 0) \ 69 INI_RA((iniarray), r, (column))); \
334 |SM((_series)[_index].ChSel, AR_ChainSel##_index)) 70 DO_DELAY(regWr); \
71 } \
72 } while (0)
335 73
336#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100) 74#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
75#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
76#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
77#define AR_GPIO_OUTPUT_MUX_AS_TX_FRAME 3
78#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
79#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
337 80
338#define INIT_CONFIG_STATUS 0x00000000 81#define AR_GPIOD_MASK 0x00001FFF
339#define INIT_RSSI_THR 0x00000700 82#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
340#define INIT_BCON_CNTRL_REG 0x00000000
341 83
342#define MIN_TX_FIFO_THRESHOLD 0x1 84#define BASE_ACTIVATE_DELAY 100
343#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1) 85#define RTC_PLL_SETTLE_DELAY 1000
344#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD 86#define COEF_SCALE_S 24
87#define HT40_CHANNEL_CENTER_SHIFT 10
345 88
346struct ar5416AniState { 89#define ATH9K_ANTENNA0_CHAINMASK 0x1
347 struct ath9k_channel c; 90#define ATH9K_ANTENNA1_CHAINMASK 0x2
348 u8 noiseImmunityLevel; 91
349 u8 spurImmunityLevel; 92#define ATH9K_NUM_DMA_DEBUG_REGS 8
350 u8 firstepLevel; 93#define ATH9K_NUM_QUEUES 10
351 u8 ofdmWeakSigDetectOff; 94
352 u8 cckWeakSigThreshold; 95#define MAX_RATE_POWER 63
353 u32 listenTime; 96#define AH_TIMEOUT 100000
354 u32 ofdmTrigHigh; 97#define AH_TIME_QUANTUM 10
355 u32 ofdmTrigLow; 98#define AR_KEYTABLE_SIZE 128
356 int32_t cckTrigHigh; 99#define POWER_UP_TIME 200000
357 int32_t cckTrigLow; 100#define SPUR_RSSI_THRESH 40
358 int32_t rssiThrLow; 101
359 int32_t rssiThrHigh; 102#define CAB_TIMEOUT_VAL 10
360 u32 noiseFloor; 103#define BEACON_TIMEOUT_VAL 10
361 u32 txFrameCount; 104#define MIN_BEACON_TIMEOUT_VAL 1
362 u32 rxFrameCount; 105#define SLEEP_SLOP 3
363 u32 cycleCount; 106
364 u32 ofdmPhyErrCount; 107#define INIT_CONFIG_STATUS 0x00000000
365 u32 cckPhyErrCount; 108#define INIT_RSSI_THR 0x00000700
366 u32 ofdmPhyErrBase; 109#define INIT_BCON_CNTRL_REG 0x00000000
367 u32 cckPhyErrBase; 110
368 int16_t pktRssi[2]; 111#define TU_TO_USEC(_tu) ((_tu) << 10)
369 int16_t ofdmErrRssi[2]; 112
370 int16_t cckErrRssi[2]; 113enum wireless_mode {
114 ATH9K_MODE_11A = 0,
115 ATH9K_MODE_11B = 2,
116 ATH9K_MODE_11G = 3,
117 ATH9K_MODE_11NA_HT20 = 6,
118 ATH9K_MODE_11NG_HT20 = 7,
119 ATH9K_MODE_11NA_HT40PLUS = 8,
120 ATH9K_MODE_11NA_HT40MINUS = 9,
121 ATH9K_MODE_11NG_HT40PLUS = 10,
122 ATH9K_MODE_11NG_HT40MINUS = 11,
123 ATH9K_MODE_MAX
371}; 124};
372 125
373#define HAL_PROCESS_ANI 0x00000001 126enum ath9k_hw_caps {
374#define DO_ANI(ah) \ 127 ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
375 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI)) 128 ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
376 129 ATH9K_HW_CAP_MIC_CKIP = BIT(2),
377struct ar5416Stats { 130 ATH9K_HW_CAP_MIC_TKIP = BIT(3),
378 u32 ast_ani_niup; 131 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
379 u32 ast_ani_nidown; 132 ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
380 u32 ast_ani_spurup; 133 ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
381 u32 ast_ani_spurdown; 134 ATH9K_HW_CAP_VEOL = BIT(7),
382 u32 ast_ani_ofdmon; 135 ATH9K_HW_CAP_BSSIDMASK = BIT(8),
383 u32 ast_ani_ofdmoff; 136 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
384 u32 ast_ani_cckhigh; 137 ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
385 u32 ast_ani_ccklow; 138 ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
386 u32 ast_ani_stepup; 139 ATH9K_HW_CAP_HT = BIT(12),
387 u32 ast_ani_stepdown; 140 ATH9K_HW_CAP_GTT = BIT(13),
388 u32 ast_ani_ofdmerrs; 141 ATH9K_HW_CAP_FASTCC = BIT(14),
389 u32 ast_ani_cckerrs; 142 ATH9K_HW_CAP_RFSILENT = BIT(15),
390 u32 ast_ani_reset; 143 ATH9K_HW_CAP_WOW = BIT(16),
391 u32 ast_ani_lzero; 144 ATH9K_HW_CAP_CST = BIT(17),
392 u32 ast_ani_lneg; 145 ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
393 struct ath9k_mib_stats ast_mibstats; 146 ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
394 struct ath9k_node_stats ast_nodestats; 147 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
148 ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
149 ATH9K_HW_CAP_BT_COEX = BIT(22)
395}; 150};
396 151
397#define AR5416_OPFLAGS_11A 0x01 152enum ath9k_capability_type {
398#define AR5416_OPFLAGS_11G 0x02 153 ATH9K_CAP_CIPHER = 0,
399#define AR5416_OPFLAGS_N_5G_HT40 0x04 154 ATH9K_CAP_TKIP_MIC,
400#define AR5416_OPFLAGS_N_2G_HT40 0x08 155 ATH9K_CAP_TKIP_SPLIT,
401#define AR5416_OPFLAGS_N_5G_HT20 0x10 156 ATH9K_CAP_PHYCOUNTERS,
402#define AR5416_OPFLAGS_N_2G_HT20 0x20 157 ATH9K_CAP_DIVERSITY,
403 158 ATH9K_CAP_TXPOW,
404#define EEP_RFSILENT_ENABLED 0x0001 159 ATH9K_CAP_PHYDIAG,
405#define EEP_RFSILENT_ENABLED_S 0 160 ATH9K_CAP_MCAST_KEYSRCH,
406#define EEP_RFSILENT_POLARITY 0x0002 161 ATH9K_CAP_TSF_ADJUST,
407#define EEP_RFSILENT_POLARITY_S 1 162 ATH9K_CAP_WME_TKIPMIC,
408#define EEP_RFSILENT_GPIO_SEL 0x001c 163 ATH9K_CAP_RFSILENT,
409#define EEP_RFSILENT_GPIO_SEL_S 2 164 ATH9K_CAP_ANT_CFG_2GHZ,
410 165 ATH9K_CAP_ANT_CFG_5GHZ
411#define AR5416_EEP_NO_BACK_VER 0x1
412#define AR5416_EEP_VER 0xE
413#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
414#define AR5416_EEP_MINOR_VER_2 0x2
415#define AR5416_EEP_MINOR_VER_3 0x3
416#define AR5416_EEP_MINOR_VER_7 0x7
417#define AR5416_EEP_MINOR_VER_9 0x9
418#define AR5416_EEP_MINOR_VER_16 0x10
419#define AR5416_EEP_MINOR_VER_17 0x11
420#define AR5416_EEP_MINOR_VER_19 0x13
421
422#define AR5416_NUM_5G_CAL_PIERS 8
423#define AR5416_NUM_2G_CAL_PIERS 4
424#define AR5416_NUM_5G_20_TARGET_POWERS 8
425#define AR5416_NUM_5G_40_TARGET_POWERS 8
426#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
427#define AR5416_NUM_2G_20_TARGET_POWERS 4
428#define AR5416_NUM_2G_40_TARGET_POWERS 4
429#define AR5416_NUM_CTLS 24
430#define AR5416_NUM_BAND_EDGES 8
431#define AR5416_NUM_PD_GAINS 4
432#define AR5416_PD_GAINS_IN_MASK 4
433#define AR5416_PD_GAIN_ICEPTS 5
434#define AR5416_EEPROM_MODAL_SPURS 5
435#define AR5416_MAX_RATE_POWER 63
436#define AR5416_NUM_PDADC_VALUES 128
437#define AR5416_BCHAN_UNUSED 0xFF
438#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
439#define AR5416_MAX_CHAINS 3
440#define AR5416_PWR_TABLE_OFFSET -5
441
442/* Rx gain type values */
443#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
444#define AR5416_EEP_RXGAIN_13DB_BACKOFF 1
445#define AR5416_EEP_RXGAIN_ORIG 2
446
447/* Tx gain type values */
448#define AR5416_EEP_TXGAIN_ORIGINAL 0
449#define AR5416_EEP_TXGAIN_HIGH_POWER 1
450
451#define AR5416_EEP4K_START_LOC 64
452#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3
453#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
454#define AR5416_EEP4K_NUM_2G_20_TARGET_POWERS 3
455#define AR5416_EEP4K_NUM_2G_40_TARGET_POWERS 3
456#define AR5416_EEP4K_NUM_CTLS 12
457#define AR5416_EEP4K_NUM_BAND_EDGES 4
458#define AR5416_EEP4K_NUM_PD_GAINS 2
459#define AR5416_EEP4K_PD_GAINS_IN_MASK 4
460#define AR5416_EEP4K_PD_GAIN_ICEPTS 5
461#define AR5416_EEP4K_MAX_CHAINS 1
462
463enum eeprom_param {
464 EEP_NFTHRESH_5,
465 EEP_NFTHRESH_2,
466 EEP_MAC_MSW,
467 EEP_MAC_MID,
468 EEP_MAC_LSW,
469 EEP_REG_0,
470 EEP_REG_1,
471 EEP_OP_CAP,
472 EEP_OP_MODE,
473 EEP_RF_SILENT,
474 EEP_OB_5,
475 EEP_DB_5,
476 EEP_OB_2,
477 EEP_DB_2,
478 EEP_MINOR_REV,
479 EEP_TX_MASK,
480 EEP_RX_MASK,
481 EEP_RXGAIN_TYPE,
482 EEP_TXGAIN_TYPE,
483}; 166};
484 167
485enum ar5416_rates { 168struct ath9k_hw_capabilities {
486 rate6mb, rate9mb, rate12mb, rate18mb, 169 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
487 rate24mb, rate36mb, rate48mb, rate54mb, 170 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
488 rate1l, rate2l, rate2s, rate5_5l, 171 u16 total_queues;
489 rate5_5s, rate11l, rate11s, rateXr, 172 u16 keycache_size;
490 rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3, 173 u16 low_5ghz_chan, high_5ghz_chan;
491 rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7, 174 u16 low_2ghz_chan, high_2ghz_chan;
492 rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3, 175 u16 num_mr_retries;
493 rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7, 176 u16 rts_aggr_limit;
494 rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm, 177 u8 tx_chainmask;
495 Ar5416RateSize 178 u8 rx_chainmask;
179 u16 tx_triglevel_max;
180 u16 reg_cap;
181 u8 num_gpio_pins;
182 u8 num_antcfg_2ghz;
183 u8 num_antcfg_5ghz;
496}; 184};
497 185
498enum ath9k_hal_freq_band { 186struct ath9k_ops_config {
499 ATH9K_HAL_FREQ_BAND_5GHZ = 0, 187 int dma_beacon_response_time;
500 ATH9K_HAL_FREQ_BAND_2GHZ = 1 188 int sw_beacon_response_time;
189 int additional_swba_backoff;
190 int ack_6mb;
191 int cwm_ignore_extcca;
192 u8 pcie_powersave_enable;
193 u8 pcie_l1skp_enable;
194 u8 pcie_clock_req;
195 u32 pcie_waen;
196 int pcie_power_reset;
197 u8 pcie_restore;
198 u8 analog_shiftreg;
199 u8 ht_enable;
200 u32 ofdm_trig_low;
201 u32 ofdm_trig_high;
202 u32 cck_trig_high;
203 u32 cck_trig_low;
204 u32 enable_ani;
205 u8 noise_immunity_level;
206 u32 ofdm_weaksignal_det;
207 u32 cck_weaksignal_thr;
208 u8 spur_immunity_level;
209 u8 firstep_level;
210 int8_t rssi_thr_high;
211 int8_t rssi_thr_low;
212 u16 diversity_control;
213 u16 antenna_switch_swap;
214 int serialize_regmode;
215 int intr_mitigation;
216#define SPUR_DISABLE 0
217#define SPUR_ENABLE_IOCTL 1
218#define SPUR_ENABLE_EEPROM 2
219#define AR_EEPROM_MODAL_SPURS 5
220#define AR_SPUR_5413_1 1640
221#define AR_SPUR_5413_2 1200
222#define AR_NO_SPUR 0x8000
223#define AR_BASE_FREQ_2GHZ 2300
224#define AR_BASE_FREQ_5GHZ 4900
225#define AR_SPUR_FEEQ_BOUND_HT40 19
226#define AR_SPUR_FEEQ_BOUND_HT20 10
227 int spurmode;
228 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
501}; 229};
502 230
503struct base_eep_header { 231enum ath9k_int {
504 u16 length; 232 ATH9K_INT_RX = 0x00000001,
505 u16 checksum; 233 ATH9K_INT_RXDESC = 0x00000002,
506 u16 version; 234 ATH9K_INT_RXNOFRM = 0x00000008,
507 u8 opCapFlags; 235 ATH9K_INT_RXEOL = 0x00000010,
508 u8 eepMisc; 236 ATH9K_INT_RXORN = 0x00000020,
509 u16 regDmn[2]; 237 ATH9K_INT_TX = 0x00000040,
510 u8 macAddr[6]; 238 ATH9K_INT_TXDESC = 0x00000080,
511 u8 rxMask; 239 ATH9K_INT_TIM_TIMER = 0x00000100,
512 u8 txMask; 240 ATH9K_INT_TXURN = 0x00000800,
513 u16 rfSilent; 241 ATH9K_INT_MIB = 0x00001000,
514 u16 blueToothOptions; 242 ATH9K_INT_RXPHY = 0x00004000,
515 u16 deviceCap; 243 ATH9K_INT_RXKCM = 0x00008000,
516 u32 binBuildNumber; 244 ATH9K_INT_SWBA = 0x00010000,
517 u8 deviceType; 245 ATH9K_INT_BMISS = 0x00040000,
518 u8 pwdclkind; 246 ATH9K_INT_BNR = 0x00100000,
519 u8 futureBase_1[2]; 247 ATH9K_INT_TIM = 0x00200000,
520 u8 rxGainType; 248 ATH9K_INT_DTIM = 0x00400000,
521 u8 futureBase_2[3]; 249 ATH9K_INT_DTIMSYNC = 0x00800000,
522 u8 txGainType; 250 ATH9K_INT_GPIO = 0x01000000,
523 u8 futureBase_3[25]; 251 ATH9K_INT_CABEND = 0x02000000,
524} __packed; 252 ATH9K_INT_CST = 0x10000000,
525 253 ATH9K_INT_GTT = 0x20000000,
526struct base_eep_header_4k { 254 ATH9K_INT_FATAL = 0x40000000,
527 u16 length; 255 ATH9K_INT_GLOBAL = 0x80000000,
528 u16 checksum; 256 ATH9K_INT_BMISC = ATH9K_INT_TIM |
529 u16 version; 257 ATH9K_INT_DTIM |
530 u8 opCapFlags; 258 ATH9K_INT_DTIMSYNC |
531 u8 eepMisc; 259 ATH9K_INT_CABEND,
532 u16 regDmn[2]; 260 ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
533 u8 macAddr[6]; 261 ATH9K_INT_RXDESC |
534 u8 rxMask; 262 ATH9K_INT_RXEOL |
535 u8 txMask; 263 ATH9K_INT_RXORN |
536 u16 rfSilent; 264 ATH9K_INT_TXURN |
537 u16 blueToothOptions; 265 ATH9K_INT_TXDESC |
538 u16 deviceCap; 266 ATH9K_INT_MIB |
539 u32 binBuildNumber; 267 ATH9K_INT_RXPHY |
540 u8 deviceType; 268 ATH9K_INT_RXKCM |
541 u8 futureBase[1]; 269 ATH9K_INT_SWBA |
542} __packed; 270 ATH9K_INT_BMISS |
543 271 ATH9K_INT_GPIO,
544 272 ATH9K_INT_NOCARD = 0xffffffff
545struct spur_chan {
546 u16 spurChan;
547 u8 spurRangeLow;
548 u8 spurRangeHigh;
549} __packed;
550
551struct modal_eep_header {
552 u32 antCtrlChain[AR5416_MAX_CHAINS];
553 u32 antCtrlCommon;
554 u8 antennaGainCh[AR5416_MAX_CHAINS];
555 u8 switchSettling;
556 u8 txRxAttenCh[AR5416_MAX_CHAINS];
557 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
558 u8 adcDesiredSize;
559 u8 pgaDesiredSize;
560 u8 xlnaGainCh[AR5416_MAX_CHAINS];
561 u8 txEndToXpaOff;
562 u8 txEndToRxOn;
563 u8 txFrameToXpaOn;
564 u8 thresh62;
565 u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
566 u8 xpdGain;
567 u8 xpd;
568 u8 iqCalICh[AR5416_MAX_CHAINS];
569 u8 iqCalQCh[AR5416_MAX_CHAINS];
570 u8 pdGainOverlap;
571 u8 ob;
572 u8 db;
573 u8 xpaBiasLvl;
574 u8 pwrDecreaseFor2Chain;
575 u8 pwrDecreaseFor3Chain;
576 u8 txFrameToDataStart;
577 u8 txFrameToPaOn;
578 u8 ht40PowerIncForPdadc;
579 u8 bswAtten[AR5416_MAX_CHAINS];
580 u8 bswMargin[AR5416_MAX_CHAINS];
581 u8 swSettleHt40;
582 u8 xatten2Db[AR5416_MAX_CHAINS];
583 u8 xatten2Margin[AR5416_MAX_CHAINS];
584 u8 ob_ch1;
585 u8 db_ch1;
586 u8 useAnt1:1,
587 force_xpaon:1,
588 local_bias:1,
589 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
590 u8 futureModalar9280;
591 u16 xpaBiasLvlFreq[3];
592 u8 futureModal[6];
593
594 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
595} __packed;
596
597struct modal_eep_4k_header {
598 u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
599 u32 antCtrlCommon;
600 u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
601 u8 switchSettling;
602 u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
603 u8 rxTxMarginCh[AR5416_EEP4K_MAX_CHAINS];
604 u8 adcDesiredSize;
605 u8 pgaDesiredSize;
606 u8 xlnaGainCh[AR5416_EEP4K_MAX_CHAINS];
607 u8 txEndToXpaOff;
608 u8 txEndToRxOn;
609 u8 txFrameToXpaOn;
610 u8 thresh62;
611 u8 noiseFloorThreshCh[AR5416_EEP4K_MAX_CHAINS];
612 u8 xpdGain;
613 u8 xpd;
614 u8 iqCalICh[AR5416_EEP4K_MAX_CHAINS];
615 u8 iqCalQCh[AR5416_EEP4K_MAX_CHAINS];
616 u8 pdGainOverlap;
617 u8 ob_01;
618 u8 db1_01;
619 u8 xpaBiasLvl;
620 u8 txFrameToDataStart;
621 u8 txFrameToPaOn;
622 u8 ht40PowerIncForPdadc;
623 u8 bswAtten[AR5416_EEP4K_MAX_CHAINS];
624 u8 bswMargin[AR5416_EEP4K_MAX_CHAINS];
625 u8 swSettleHt40;
626 u8 xatten2Db[AR5416_EEP4K_MAX_CHAINS];
627 u8 xatten2Margin[AR5416_EEP4K_MAX_CHAINS];
628 u8 db2_01;
629 u8 version;
630 u16 ob_234;
631 u16 db1_234;
632 u16 db2_234;
633 u8 futureModal[4];
634
635 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
636} __packed;
637
638
639struct cal_data_per_freq {
640 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
641 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
642} __packed;
643
644struct cal_data_per_freq_4k {
645 u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
646 u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
647} __packed;
648
649struct cal_target_power_leg {
650 u8 bChannel;
651 u8 tPow2x[4];
652} __packed;
653
654struct cal_target_power_ht {
655 u8 bChannel;
656 u8 tPow2x[8];
657} __packed;
658
659
660#ifdef __BIG_ENDIAN_BITFIELD
661struct cal_ctl_edges {
662 u8 bChannel;
663 u8 flag:2, tPower:6;
664} __packed;
665#else
666struct cal_ctl_edges {
667 u8 bChannel;
668 u8 tPower:6, flag:2;
669} __packed;
670#endif
671
672struct cal_ctl_data {
673 struct cal_ctl_edges
674 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
675} __packed;
676
677struct cal_ctl_data_4k {
678 struct cal_ctl_edges
679 ctlEdges[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_BAND_EDGES];
680} __packed;
681
682struct ar5416_eeprom_def {
683 struct base_eep_header baseEepHeader;
684 u8 custData[64];
685 struct modal_eep_header modalHeader[2];
686 u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
687 u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
688 struct cal_data_per_freq
689 calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
690 struct cal_data_per_freq
691 calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
692 struct cal_target_power_leg
693 calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
694 struct cal_target_power_ht
695 calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
696 struct cal_target_power_ht
697 calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
698 struct cal_target_power_leg
699 calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
700 struct cal_target_power_leg
701 calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
702 struct cal_target_power_ht
703 calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
704 struct cal_target_power_ht
705 calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
706 u8 ctlIndex[AR5416_NUM_CTLS];
707 struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
708 u8 padding;
709} __packed;
710
711struct ar5416_eeprom_4k {
712 struct base_eep_header_4k baseEepHeader;
713 u8 custData[20];
714 struct modal_eep_4k_header modalHeader;
715 u8 calFreqPier2G[AR5416_EEP4K_NUM_2G_CAL_PIERS];
716 struct cal_data_per_freq_4k
717 calPierData2G[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_2G_CAL_PIERS];
718 struct cal_target_power_leg
719 calTargetPowerCck[AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS];
720 struct cal_target_power_leg
721 calTargetPower2G[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
722 struct cal_target_power_ht
723 calTargetPower2GHT20[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
724 struct cal_target_power_ht
725 calTargetPower2GHT40[AR5416_EEP4K_NUM_2G_40_TARGET_POWERS];
726 u8 ctlIndex[AR5416_EEP4K_NUM_CTLS];
727 struct cal_ctl_data_4k ctlData[AR5416_EEP4K_NUM_CTLS];
728 u8 padding;
729} __packed;
730
731struct ar5416IniArray {
732 u32 *ia_array;
733 u32 ia_rows;
734 u32 ia_columns;
735}; 273};
736 274
737#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \ 275#define CHANNEL_CW_INT 0x00002
738 (iniarray)->ia_array = (u32 *)(array); \ 276#define CHANNEL_CCK 0x00020
739 (iniarray)->ia_rows = (rows); \ 277#define CHANNEL_OFDM 0x00040
740 (iniarray)->ia_columns = (columns); \ 278#define CHANNEL_2GHZ 0x00080
741 } while (0) 279#define CHANNEL_5GHZ 0x00100
742 280#define CHANNEL_PASSIVE 0x00200
743#define INI_RA(iniarray, row, column) \ 281#define CHANNEL_DYN 0x00400
744 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)]) 282#define CHANNEL_HALF 0x04000
283#define CHANNEL_QUARTER 0x08000
284#define CHANNEL_HT20 0x10000
285#define CHANNEL_HT40PLUS 0x20000
286#define CHANNEL_HT40MINUS 0x40000
287
288#define CHANNEL_INTERFERENCE 0x01
289#define CHANNEL_DFS 0x02
290#define CHANNEL_4MS_LIMIT 0x04
291#define CHANNEL_DFS_CLEAR 0x08
292#define CHANNEL_DISALLOW_ADHOC 0x10
293#define CHANNEL_PER_11D_ADHOC 0x20
294
295#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
296#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
297#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
298#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
299#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
300#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
301#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
302#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
303#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
304#define CHANNEL_ALL \
305 (CHANNEL_OFDM| \
306 CHANNEL_CCK| \
307 CHANNEL_2GHZ | \
308 CHANNEL_5GHZ | \
309 CHANNEL_HT20 | \
310 CHANNEL_HT40PLUS | \
311 CHANNEL_HT40MINUS)
312
313struct ath9k_channel {
314 struct ieee80211_channel *chan;
315 u16 channel;
316 u32 channelFlags;
317 u32 chanmode;
318 int32_t CalValid;
319 bool oneTimeCalsDone;
320 int8_t iCoff;
321 int8_t qCoff;
322 int16_t rawNoiseFloor;
323};
745 324
746#define INIT_CAL(_perCal) do { \ 325#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
747 (_perCal)->calState = CAL_WAITING; \ 326 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
748 (_perCal)->calNext = NULL; \ 327 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
749 } while (0) 328 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
329#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
330 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
331 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
332 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
333#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
334#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
335#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
336#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
337#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
338#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
339#define IS_CHAN_A_5MHZ_SPACED(_c) \
340 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
341 (((_c)->channel % 20) != 0) && \
342 (((_c)->channel % 10) != 0))
343
344/* These macros check chanmode and not channelFlags */
345#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
346#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
347 ((_c)->chanmode == CHANNEL_G_HT20))
348#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
349 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
350 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
351 ((_c)->chanmode == CHANNEL_G_HT40MINUS))
352#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
353
354enum ath9k_power_mode {
355 ATH9K_PM_AWAKE = 0,
356 ATH9K_PM_FULL_SLEEP,
357 ATH9K_PM_NETWORK_SLEEP,
358 ATH9K_PM_UNDEFINED
359};
750 360
751#define INSERT_CAL(_ahp, _perCal) \ 361enum ath9k_ant_setting {
752 do { \ 362 ATH9K_ANT_VARIABLE = 0,
753 if ((_ahp)->ah_cal_list_last == NULL) { \ 363 ATH9K_ANT_FIXED_A,
754 (_ahp)->ah_cal_list = \ 364 ATH9K_ANT_FIXED_B
755 (_ahp)->ah_cal_list_last = (_perCal); \ 365};
756 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
757 } else { \
758 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
759 (_ahp)->ah_cal_list_last = (_perCal); \
760 (_perCal)->calNext = (_ahp)->ah_cal_list; \
761 } \
762 } while (0)
763 366
764enum hal_cal_types { 367enum ath9k_tp_scale {
765 ADC_DC_INIT_CAL = 0x1, 368 ATH9K_TP_SCALE_MAX = 0,
766 ADC_GAIN_CAL = 0x2, 369 ATH9K_TP_SCALE_50,
767 ADC_DC_CAL = 0x4, 370 ATH9K_TP_SCALE_25,
768 IQ_MISMATCH_CAL = 0x8 371 ATH9K_TP_SCALE_12,
372 ATH9K_TP_SCALE_MIN
769}; 373};
770 374
771enum hal_cal_state { 375enum ser_reg_mode {
772 CAL_INACTIVE, 376 SER_REG_MODE_OFF = 0,
773 CAL_WAITING, 377 SER_REG_MODE_ON = 1,
774 CAL_RUNNING, 378 SER_REG_MODE_AUTO = 2,
775 CAL_DONE
776}; 379};
777 380
778#define MIN_CAL_SAMPLES 1 381struct ath9k_beacon_state {
779#define MAX_CAL_SAMPLES 64 382 u32 bs_nexttbtt;
780#define INIT_LOG_COUNT 5 383 u32 bs_nextdtim;
781#define PER_MIN_LOG_COUNT 2 384 u32 bs_intval;
782#define PER_MAX_LOG_COUNT 10 385#define ATH9K_BEACON_PERIOD 0x0000ffff
386#define ATH9K_BEACON_ENA 0x00800000
387#define ATH9K_BEACON_RESET_TSF 0x01000000
388 u32 bs_dtimperiod;
389 u16 bs_cfpperiod;
390 u16 bs_cfpmaxduration;
391 u32 bs_cfpnext;
392 u16 bs_timoffset;
393 u16 bs_bmissthreshold;
394 u32 bs_sleepduration;
395};
783 396
784struct hal_percal_data { 397struct chan_centers {
785 enum hal_cal_types calType; 398 u16 synth_center;
786 u32 calNumSamples; 399 u16 ctl_center;
787 u32 calCountMax; 400 u16 ext_center;
788 void (*calCollect) (struct ath_hal *);
789 void (*calPostProc) (struct ath_hal *, u8);
790}; 401};
791 402
792struct hal_cal_list { 403enum {
793 const struct hal_percal_data *calData; 404 ATH9K_RESET_POWER_ON,
794 enum hal_cal_state calState; 405 ATH9K_RESET_WARM,
795 struct hal_cal_list *calNext; 406 ATH9K_RESET_COLD,
796}; 407};
797 408
798/* 409struct ath9k_hw_version {
799 * Enum to indentify the eeprom mappings 410 u32 magic;
800 */ 411 u16 devid;
801enum hal_eep_map { 412 u16 subvendorid;
802 EEP_MAP_DEFAULT = 0x0, 413 u32 macVersion;
803 EEP_MAP_4KBITS, 414 u16 macRev;
804 EEP_MAP_MAX 415 u16 phyRev;
416 u16 analog5GhzRev;
417 u16 analog2GhzRev;
805}; 418};
806 419
420struct ath_hw {
421 struct ath_softc *ah_sc;
422 struct ath9k_hw_version hw_version;
423 struct ath9k_ops_config config;
424 struct ath9k_hw_capabilities caps;
425 struct ath9k_regulatory regulatory;
426 struct ath9k_channel channels[38];
427 struct ath9k_channel *curchan;
807 428
808struct ath_hal_5416 {
809 struct ath_hal ah;
810 union { 429 union {
811 struct ar5416_eeprom_def def; 430 struct ar5416_eeprom_def def;
812 struct ar5416_eeprom_4k map4k; 431 struct ar5416_eeprom_4k map4k;
813 } ah_eeprom; 432 } eeprom;
814 struct ar5416Stats ah_stats; 433 const struct eeprom_ops *eep_ops;
815 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES]; 434 enum ath9k_eep_map eep_map;
816 void __iomem *ah_cal_mem; 435
817 436 bool sw_mgmt_crypto;
818 u8 ah_macaddr[ETH_ALEN]; 437 bool is_pciexpress;
819 u8 ah_bssid[ETH_ALEN]; 438 u8 macaddr[ETH_ALEN];
820 u8 ah_bssidmask[ETH_ALEN]; 439 u16 tx_trig_level;
821 u16 ah_assocId; 440 u16 rfsilent;
822 441 u32 rfkill_gpio;
823 int16_t ah_curchanRadIndex; 442 u32 rfkill_polarity;
824 u32 ah_maskReg; 443 u32 btactive_gpio;
825 u32 ah_txOkInterruptMask; 444 u32 wlanactive_gpio;
826 u32 ah_txErrInterruptMask; 445 u32 ah_flags;
827 u32 ah_txDescInterruptMask; 446
828 u32 ah_txEolInterruptMask; 447 enum nl80211_iftype opmode;
829 u32 ah_txUrnInterruptMask; 448 enum ath9k_power_mode power_mode;
830 bool ah_chipFullSleep; 449 enum ath9k_power_mode restore_mode;
831 u32 ah_atimWindow; 450
832 u16 ah_antennaSwitchSwap; 451 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
833 enum ath9k_power_mode ah_powerMode; 452 struct ar5416Stats stats;
834 enum ath9k_ant_setting ah_diversityControl; 453 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
454
455 int16_t curchan_rad_index;
456 u32 mask_reg;
457 u32 txok_interrupt_mask;
458 u32 txerr_interrupt_mask;
459 u32 txdesc_interrupt_mask;
460 u32 txeol_interrupt_mask;
461 u32 txurn_interrupt_mask;
462 bool chip_fullsleep;
463 u32 atim_window;
464 u16 antenna_switch_swap;
465 enum ath9k_ant_setting diversity_control;
835 466
836 /* Calibration */ 467 /* Calibration */
837 enum hal_cal_types ah_suppCals; 468 enum hal_cal_types supp_cals;
838 struct hal_cal_list ah_iqCalData; 469 struct hal_cal_list iq_caldata;
839 struct hal_cal_list ah_adcGainCalData; 470 struct hal_cal_list adcgain_caldata;
840 struct hal_cal_list ah_adcDcCalInitData; 471 struct hal_cal_list adcdc_calinitdata;
841 struct hal_cal_list ah_adcDcCalData; 472 struct hal_cal_list adcdc_caldata;
842 struct hal_cal_list *ah_cal_list; 473 struct hal_cal_list *cal_list;
843 struct hal_cal_list *ah_cal_list_last; 474 struct hal_cal_list *cal_list_last;
844 struct hal_cal_list *ah_cal_list_curr; 475 struct hal_cal_list *cal_list_curr;
845#define ah_totalPowerMeasI ah_Meas0.unsign 476#define totalPowerMeasI meas0.unsign
846#define ah_totalPowerMeasQ ah_Meas1.unsign 477#define totalPowerMeasQ meas1.unsign
847#define ah_totalIqCorrMeas ah_Meas2.sign 478#define totalIqCorrMeas meas2.sign
848#define ah_totalAdcIOddPhase ah_Meas0.unsign 479#define totalAdcIOddPhase meas0.unsign
849#define ah_totalAdcIEvenPhase ah_Meas1.unsign 480#define totalAdcIEvenPhase meas1.unsign
850#define ah_totalAdcQOddPhase ah_Meas2.unsign 481#define totalAdcQOddPhase meas2.unsign
851#define ah_totalAdcQEvenPhase ah_Meas3.unsign 482#define totalAdcQEvenPhase meas3.unsign
852#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign 483#define totalAdcDcOffsetIOddPhase meas0.sign
853#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign 484#define totalAdcDcOffsetIEvenPhase meas1.sign
854#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign 485#define totalAdcDcOffsetQOddPhase meas2.sign
855#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign 486#define totalAdcDcOffsetQEvenPhase meas3.sign
856 union { 487 union {
857 u32 unsign[AR5416_MAX_CHAINS]; 488 u32 unsign[AR5416_MAX_CHAINS];
858 int32_t sign[AR5416_MAX_CHAINS]; 489 int32_t sign[AR5416_MAX_CHAINS];
859 } ah_Meas0; 490 } meas0;
860 union { 491 union {
861 u32 unsign[AR5416_MAX_CHAINS]; 492 u32 unsign[AR5416_MAX_CHAINS];
862 int32_t sign[AR5416_MAX_CHAINS]; 493 int32_t sign[AR5416_MAX_CHAINS];
863 } ah_Meas1; 494 } meas1;
864 union { 495 union {
865 u32 unsign[AR5416_MAX_CHAINS]; 496 u32 unsign[AR5416_MAX_CHAINS];
866 int32_t sign[AR5416_MAX_CHAINS]; 497 int32_t sign[AR5416_MAX_CHAINS];
867 } ah_Meas2; 498 } meas2;
868 union { 499 union {
869 u32 unsign[AR5416_MAX_CHAINS]; 500 u32 unsign[AR5416_MAX_CHAINS];
870 int32_t sign[AR5416_MAX_CHAINS]; 501 int32_t sign[AR5416_MAX_CHAINS];
871 } ah_Meas3; 502 } meas3;
872 u16 ah_CalSamples; 503 u16 cal_samples;
873 504
874 u32 ah_staId1Defaults; 505 u32 sta_id1_defaults;
875 u32 ah_miscMode; 506 u32 misc_mode;
876 enum { 507 enum {
877 AUTO_32KHZ, 508 AUTO_32KHZ,
878 USE_32KHZ, 509 USE_32KHZ,
879 DONT_USE_32KHZ, 510 DONT_USE_32KHZ,
880 } ah_enable32kHzClock; 511 } enable_32kHz_clock;
881 512
882 /* RF */ 513 /* RF */
883 u32 *ah_analogBank0Data; 514 u32 *analogBank0Data;
884 u32 *ah_analogBank1Data; 515 u32 *analogBank1Data;
885 u32 *ah_analogBank2Data; 516 u32 *analogBank2Data;
886 u32 *ah_analogBank3Data; 517 u32 *analogBank3Data;
887 u32 *ah_analogBank6Data; 518 u32 *analogBank6Data;
888 u32 *ah_analogBank6TPCData; 519 u32 *analogBank6TPCData;
889 u32 *ah_analogBank7Data; 520 u32 *analogBank7Data;
890 u32 *ah_addac5416_21; 521 u32 *addac5416_21;
891 u32 *ah_bank6Temp; 522 u32 *bank6Temp;
892 523
893 int16_t ah_txPowerIndexOffset; 524 int16_t txpower_indexoffset;
894 u32 ah_beaconInterval; 525 u32 beacon_interval;
895 u32 ah_slottime; 526 u32 slottime;
896 u32 ah_acktimeout; 527 u32 acktimeout;
897 u32 ah_ctstimeout; 528 u32 ctstimeout;
898 u32 ah_globaltxtimeout; 529 u32 globaltxtimeout;
899 u8 ah_gBeaconRate; 530 u8 gbeacon_rate;
900 u32 ah_gpioSelect;
901 u32 ah_polarity;
902 u32 ah_gpioBit;
903 531
904 /* ANI */ 532 /* ANI */
905 u32 ah_procPhyErr; 533 u32 proc_phyerr;
906 bool ah_hasHwPhyCounters; 534 bool has_hw_phycounters;
907 u32 ah_aniPeriod; 535 u32 aniperiod;
908 struct ar5416AniState *ah_curani; 536 struct ar5416AniState *curani;
909 struct ar5416AniState ah_ani[255]; 537 struct ar5416AniState ani[255];
910 int ah_totalSizeDesired[5]; 538 int totalSizeDesired[5];
911 int ah_coarseHigh[5]; 539 int coarse_high[5];
912 int ah_coarseLow[5]; 540 int coarse_low[5];
913 int ah_firpwr[5]; 541 int firpwr[5];
914 enum ath9k_ani_cmd ah_ani_function; 542 enum ath9k_ani_cmd ani_function;
915 543
916 u32 ah_intrTxqs; 544 u32 intr_txqs;
917 bool ah_intrMitigation; 545 bool intr_mitigation;
918 enum ath9k_ht_extprotspacing ah_extprotspacing; 546 enum ath9k_ht_extprotspacing extprotspacing;
919 u8 ah_txchainmask; 547 u8 txchainmask;
920 u8 ah_rxchainmask; 548 u8 rxchainmask;
921 549
922 struct ar5416IniArray ah_iniModes; 550 struct ar5416IniArray iniModes;
923 struct ar5416IniArray ah_iniCommon; 551 struct ar5416IniArray iniCommon;
924 struct ar5416IniArray ah_iniBank0; 552 struct ar5416IniArray iniBank0;
925 struct ar5416IniArray ah_iniBB_RfGain; 553 struct ar5416IniArray iniBB_RfGain;
926 struct ar5416IniArray ah_iniBank1; 554 struct ar5416IniArray iniBank1;
927 struct ar5416IniArray ah_iniBank2; 555 struct ar5416IniArray iniBank2;
928 struct ar5416IniArray ah_iniBank3; 556 struct ar5416IniArray iniBank3;
929 struct ar5416IniArray ah_iniBank6; 557 struct ar5416IniArray iniBank6;
930 struct ar5416IniArray ah_iniBank6TPC; 558 struct ar5416IniArray iniBank6TPC;
931 struct ar5416IniArray ah_iniBank7; 559 struct ar5416IniArray iniBank7;
932 struct ar5416IniArray ah_iniAddac; 560 struct ar5416IniArray iniAddac;
933 struct ar5416IniArray ah_iniPcieSerdes; 561 struct ar5416IniArray iniPcieSerdes;
934 struct ar5416IniArray ah_iniModesAdditional; 562 struct ar5416IniArray iniModesAdditional;
935 struct ar5416IniArray ah_iniModesRxGain; 563 struct ar5416IniArray iniModesRxGain;
936 struct ar5416IniArray ah_iniModesTxGain; 564 struct ar5416IniArray iniModesTxGain;
937 /* To indicate EEPROM mapping used */
938 enum hal_eep_map ah_eep_map;
939}; 565};
940#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
941
942#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
943
944#define ar5416RfDetach(ah) do { \
945 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
946 AH5416(ah)->ah_rfHal.rfDetach(ah); \
947 } while (0)
948
949#define ath9k_hw_use_flash(_ah) \
950 (!(_ah->ah_flags & AH_USE_EEPROM))
951
952
953#define DO_DELAY(x) do { \
954 if ((++(x) % 64) == 0) \
955 udelay(1); \
956 } while (0)
957
958#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
959 int r; \
960 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
961 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
962 INI_RA((iniarray), r, (column))); \
963 DO_DELAY(regWr); \
964 } \
965 } while (0)
966
967#define BASE_ACTIVATE_DELAY 100
968#define RTC_PLL_SETTLE_DELAY 1000
969#define COEF_SCALE_S 24
970#define HT40_CHANNEL_CENTER_SHIFT 10
971
972#define AR5416_EEPROM_MAGIC_OFFSET 0x0
973
974#define AR5416_EEPROM_S 2
975#define AR5416_EEPROM_OFFSET 0x2000
976#define AR5416_EEPROM_START_ADDR \
977 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
978#define AR5416_EEPROM_MAX 0xae0
979#define ar5416_get_eep_ver(_ahp) \
980 (((_ahp)->ah_eeprom.def.baseEepHeader.version >> 12) & 0xF)
981#define ar5416_get_eep_rev(_ahp) \
982 (((_ahp)->ah_eeprom.def.baseEepHeader.version) & 0xFFF)
983#define ar5416_get_ntxchains(_txchainmask) \
984 (((_txchainmask >> 2) & 1) + \
985 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
986 566
987/* EEPROM 4K bit map definations */ 567/* Attach, Detach, Reset */
988#define ar5416_get_eep4k_ver(_ahp) \ 568const char *ath9k_hw_probe(u16 vendorid, u16 devid);
989 (((_ahp)->ah_eeprom.map4k.baseEepHeader.version >> 12) & 0xF) 569void ath9k_hw_detach(struct ath_hw *ah);
990#define ar5416_get_eep4k_rev(_ahp) \ 570struct ath_hw *ath9k_hw_attach(u16 devid, struct ath_softc *sc, int *error);
991 (((_ahp)->ah_eeprom.map4k.baseEepHeader.version) & 0xFFF) 571void ath9k_hw_rfdetach(struct ath_hw *ah);
992 572int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
993 573 bool bChannelChange);
994#ifdef __BIG_ENDIAN 574bool ath9k_hw_fill_cap_info(struct ath_hw *ah);
995#define AR5416_EEPROM_MAGIC 0x5aa5 575bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
996#else 576 u32 capability, u32 *result);
997#define AR5416_EEPROM_MAGIC 0xa55a 577bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
578 u32 capability, u32 setting, int *status);
579
580/* Key Cache Management */
581bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
582bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac);
583bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
584 const struct ath9k_keyval *k,
585 const u8 *mac, int xorKey);
586bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry);
587
588/* GPIO / RFKILL / Antennae */
589void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
590u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
591void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
592 u32 ah_signal_type);
593void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
594#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
595void ath9k_enable_rfkill(struct ath_hw *ah);
998#endif 596#endif
999 597u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
1000#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 598void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
1001 599bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
1002#define ATH9K_ANTENNA0_CHAINMASK 0x1 600 enum ath9k_ant_setting settings,
1003#define ATH9K_ANTENNA1_CHAINMASK 0x2 601 struct ath9k_channel *chan,
1004 602 u8 *tx_chainmask, u8 *rx_chainmask,
1005#define ATH9K_NUM_DMA_DEBUG_REGS 8 603 u8 *antenna_cfgd);
1006#define ATH9K_NUM_QUEUES 10 604
1007 605/* General Operation */
1008#define HAL_NOISE_IMMUNE_MAX 4 606bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val);
1009#define HAL_SPUR_IMMUNE_MAX 7 607u32 ath9k_hw_reverse_bits(u32 val, u32 n);
1010#define HAL_FIRST_STEP_MAX 2 608bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high);
1011 609u16 ath9k_hw_computetxtime(struct ath_hw *ah, struct ath_rate_table *rates,
1012#define ATH9K_ANI_OFDM_TRIG_HIGH 500 610 u32 frameLen, u16 rateix, bool shortPreamble);
1013#define ATH9K_ANI_OFDM_TRIG_LOW 200 611void ath9k_hw_get_channel_centers(struct ath_hw *ah,
1014#define ATH9K_ANI_CCK_TRIG_HIGH 200 612 struct ath9k_channel *chan,
1015#define ATH9K_ANI_CCK_TRIG_LOW 100 613 struct chan_centers *centers);
1016#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 614u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
1017#define ATH9K_ANI_USE_OFDM_WEAK_SIG true 615void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
1018#define ATH9K_ANI_CCK_WEAK_SIG_THR false 616bool ath9k_hw_phy_disable(struct ath_hw *ah);
1019#define ATH9K_ANI_SPUR_IMMUNE_LVL 7 617bool ath9k_hw_disable(struct ath_hw *ah);
1020#define ATH9K_ANI_FIRSTEP_LVL 0 618bool ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
1021#define ATH9K_ANI_RSSI_THR_HIGH 40 619void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
1022#define ATH9K_ANI_RSSI_THR_LOW 7 620void ath9k_hw_setopmode(struct ath_hw *ah);
1023#define ATH9K_ANI_PERIOD 100 621void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
1024 622void ath9k_hw_setbssidmask(struct ath_softc *sc);
1025#define AR_GPIOD_MASK 0x00001FFF 623void ath9k_hw_write_associd(struct ath_softc *sc);
1026#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 624u64 ath9k_hw_gettsf64(struct ath_hw *ah);
1027 625void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
1028#define HAL_EP_RND(x, mul) \ 626void ath9k_hw_reset_tsf(struct ath_hw *ah);
1029 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 627bool ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
1030#define BEACON_RSSI(ahp) \ 628bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
1031 HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \ 629void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode);
1032 ATH9K_RSSI_EP_MULTIPLIER) 630void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
1033 631void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
1034#define ah_mibStats ah_stats.ast_mibstats 632 const struct ath9k_beacon_state *bs);
1035 633bool ath9k_hw_setpower(struct ath_hw *ah,
1036#define AH_TIMEOUT 100000 634 enum ath9k_power_mode mode);
1037#define AH_TIME_QUANTUM 10 635void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore);
1038 636
1039#define AR_KEYTABLE_SIZE 128 637/* Interrupt Handling */
1040#define POWER_UP_TIME 200000 638bool ath9k_hw_intrpend(struct ath_hw *ah);
1041 639bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
1042#define EXT_ADDITIVE (0x8000) 640enum ath9k_int ath9k_hw_intrget(struct ath_hw *ah);
1043#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) 641enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
1044#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) 642
1045#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) 643void ath9k_hw_btcoex_enable(struct ath_hw *ah);
1046
1047#define SUB_NUM_CTL_MODES_AT_5G_40 2
1048#define SUB_NUM_CTL_MODES_AT_2G_40 3
1049#define SPUR_RSSI_THRESH 40
1050
1051#define TU_TO_USEC(_tu) ((_tu) << 10)
1052
1053#define CAB_TIMEOUT_VAL 10
1054#define BEACON_TIMEOUT_VAL 10
1055#define MIN_BEACON_TIMEOUT_VAL 1
1056#define SLEEP_SLOP 3
1057
1058#define CCK_SIFS_TIME 10
1059#define CCK_PREAMBLE_BITS 144
1060#define CCK_PLCP_BITS 48
1061
1062#define OFDM_SIFS_TIME 16
1063#define OFDM_PREAMBLE_TIME 20
1064#define OFDM_PLCP_BITS 22
1065#define OFDM_SYMBOL_TIME 4
1066
1067#define OFDM_SIFS_TIME_HALF 32
1068#define OFDM_PREAMBLE_TIME_HALF 40
1069#define OFDM_PLCP_BITS_HALF 22
1070#define OFDM_SYMBOL_TIME_HALF 8
1071
1072#define OFDM_SIFS_TIME_QUARTER 64
1073#define OFDM_PREAMBLE_TIME_QUARTER 80
1074#define OFDM_PLCP_BITS_QUARTER 22
1075#define OFDM_SYMBOL_TIME_QUARTER 16
1076
1077u32 ath9k_hw_get_eeprom(struct ath_hal *ah,
1078 enum eeprom_param param);
1079 644
1080#endif 645#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
index f3cfa16525e..d49236368a1 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -14,7 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/* AR5416 to Fowl ar5146.ini */
18static const u32 ar5416Modes_9100[][6] = { 17static const u32 ar5416Modes_9100[][6] = {
19 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
20 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -659,10 +658,9 @@ static const u32 ar5416Addac_9100[][2] = {
659 {0x0000989c, 0x00000000 }, 658 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 }, 659 {0x0000989c, 0x00000000 },
661 {0x0000989c, 0x00000000 }, 660 {0x0000989c, 0x00000000 },
662 {0x000098c4, 0x00000000 }, 661 {0x000098cc, 0x00000000 },
663}; 662};
664 663
665/* ar5416 - howl ar5416_howl.ini */
666static const u32 ar5416Modes[][6] = { 664static const u32 ar5416Modes[][6] = {
667 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 665 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
668 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 666 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -1313,7 +1311,6 @@ static const u32 ar5416Addac[][2] = {
1313 {0x000098cc, 0x00000000 }, 1311 {0x000098cc, 0x00000000 },
1314}; 1312};
1315 1313
1316/* AR5416 9160 Sowl ar5416_sowl.ini */
1317static const u32 ar5416Modes_9160[][6] = { 1314static const u32 ar5416Modes_9160[][6] = {
1318 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 1315 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1319 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 1316 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2549,6 +2546,8 @@ static const u32 ar9280Modes_9280_2[][6] = {
2549 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, 2546 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
2550 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 2547 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
2551 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, 2548 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2549 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
2550 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
2552 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 2551 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2553 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 2552 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2554 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 2553 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
@@ -2587,7 +2586,6 @@ static const u32 ar9280Modes_9280_2[][6] = {
2587 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 2586 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2588 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 2587 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2589 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 }, 2588 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
2590 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2591 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2589 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2592 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2590 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2593 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, 2591 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
@@ -2719,7 +2717,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2719 { 0x00008110, 0x00000168 }, 2717 { 0x00008110, 0x00000168 },
2720 { 0x00008118, 0x000100aa }, 2718 { 0x00008118, 0x000100aa },
2721 { 0x0000811c, 0x00003210 }, 2719 { 0x0000811c, 0x00003210 },
2722 { 0x00008120, 0x08f04800 },
2723 { 0x00008124, 0x00000000 }, 2720 { 0x00008124, 0x00000000 },
2724 { 0x00008128, 0x00000000 }, 2721 { 0x00008128, 0x00000000 },
2725 { 0x0000812c, 0x00000000 }, 2722 { 0x0000812c, 0x00000000 },
@@ -2735,7 +2732,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2735 { 0x00008178, 0x00000100 }, 2732 { 0x00008178, 0x00000100 },
2736 { 0x0000817c, 0x00000000 }, 2733 { 0x0000817c, 0x00000000 },
2737 { 0x000081c0, 0x00000000 }, 2734 { 0x000081c0, 0x00000000 },
2738 { 0x000081d0, 0x00003210 },
2739 { 0x000081ec, 0x00000000 }, 2735 { 0x000081ec, 0x00000000 },
2740 { 0x000081f0, 0x00000000 }, 2736 { 0x000081f0, 0x00000000 },
2741 { 0x000081f4, 0x00000000 }, 2737 { 0x000081f4, 0x00000000 },
@@ -2817,7 +2813,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2817 { 0x00009958, 0x2108ecff }, 2813 { 0x00009958, 0x2108ecff },
2818 { 0x00009940, 0x14750604 }, 2814 { 0x00009940, 0x14750604 },
2819 { 0x0000c95c, 0x004b6a8e }, 2815 { 0x0000c95c, 0x004b6a8e },
2820 { 0x00009968, 0x000003ce }, 2816 { 0x0000c968, 0x000003ce },
2821 { 0x00009970, 0x190fb515 }, 2817 { 0x00009970, 0x190fb515 },
2822 { 0x00009974, 0x00000000 }, 2818 { 0x00009974, 0x00000000 },
2823 { 0x00009978, 0x00000001 }, 2819 { 0x00009978, 0x00000001 },
@@ -2909,16 +2905,12 @@ static const u32 ar9280Common_9280_2[][2] = {
2909 { 0x0000780c, 0x21084210 }, 2905 { 0x0000780c, 0x21084210 },
2910 { 0x00007810, 0x6d801300 }, 2906 { 0x00007810, 0x6d801300 },
2911 { 0x00007818, 0x07e41000 }, 2907 { 0x00007818, 0x07e41000 },
2912 { 0x0000781c, 0x00392000 },
2913 { 0x00007820, 0x92592480 },
2914 { 0x00007824, 0x00040000 }, 2908 { 0x00007824, 0x00040000 },
2915 { 0x00007828, 0xdb005012 }, 2909 { 0x00007828, 0xdb005012 },
2916 { 0x0000782c, 0x04924914 }, 2910 { 0x0000782c, 0x04924914 },
2917 { 0x00007830, 0x21084210 }, 2911 { 0x00007830, 0x21084210 },
2918 { 0x00007834, 0x6d801300 }, 2912 { 0x00007834, 0x6d801300 },
2919 { 0x0000783c, 0x07e40000 }, 2913 { 0x0000783c, 0x07e40000 },
2920 { 0x00007840, 0x00392000 },
2921 { 0x00007844, 0x92592480 },
2922 { 0x00007848, 0x00100000 }, 2914 { 0x00007848, 0x00100000 },
2923 { 0x0000784c, 0x773f0567 }, 2915 { 0x0000784c, 0x773f0567 },
2924 { 0x00007850, 0x54214514 }, 2916 { 0x00007850, 0x54214514 },
@@ -2954,7 +2946,6 @@ static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
2954 { 0x00009844, 0x03721821, 0x03721821 }, 2946 { 0x00009844, 0x03721821, 0x03721821 },
2955 { 0x00009914, 0x00000898, 0x00001130 }, 2947 { 0x00009914, 0x00000898, 0x00001130 },
2956 { 0x00009918, 0x0000000b, 0x00000016 }, 2948 { 0x00009918, 0x0000000b, 0x00000016 },
2957 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
2958}; 2949};
2959 2950
2960static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = { 2951static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = {
@@ -3366,21 +3357,26 @@ static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3366 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a }, 3357 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a },
3367 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 }, 3358 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 },
3368 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, 3359 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
3369 { 0x0000a324, 0x00020092, 0x00020092, 0x00022411, 0x00022411, 0x00022411 }, 3360 { 0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411 },
3370 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00025413, 0x00025413, 0x00025413 }, 3361 { 0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413 },
3371 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00029811, 0x00029811, 0x00029811 }, 3362 { 0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811 },
3372 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002c813, 0x0002c813, 0x0002c813 }, 3363 { 0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813 },
3373 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030a14, 0x00030a14, 0x00030a14 }, 3364 { 0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14 },
3374 { 0x0000a338, 0x000321ec, 0x000321ec, 0x00035a50, 0x00035a50, 0x00035a50 }, 3365 { 0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50 },
3375 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00039c4c, 0x00039c4c, 0x00039c4c }, 3366 { 0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c },
3376 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003de8a, 0x0003de8a, 0x0003de8a }, 3367 { 0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a },
3377 { 0x0000a344, 0x000321ec, 0x000321ec, 0x00042e92, 0x00042e92, 0x00042e92 }, 3368 { 0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92 },
3378 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 }, 3369 { 0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 },
3379 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 }, 3370 { 0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 },
3380 { 0x0000a350, 0x000321ec, 0x000321ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 }, 3371 { 0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 },
3381 { 0x0000a354, 0x000321ec, 0x000321ec, 0x00053fd5, 0x00053fd5, 0x00053fd5 }, 3372 { 0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5 },
3382 { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, 3373 { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff },
3383 { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, 3374 { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff },
3375 { 0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 },
3376 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 },
3377 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3378 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3379 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3384 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce }, 3380 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3385}; 3381};
3386 3382
@@ -3409,6 +3405,11 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
3409 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, 3405 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
3410 { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, 3406 { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff },
3411 { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, 3407 { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff },
3408 { 0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 },
3409 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 },
3410 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3411 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3412 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3412 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce }, 3413 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3413}; 3414};
3414 3415
@@ -4135,11 +4136,11 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4135 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 4136 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
4136 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 4137 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
4137 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, 4138 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
4138 { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 }, 4139 { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 },
4139 { 0x00009848, 0x00001066, 0x00001066, 0x00000057, 0x00000057, 0x00001059 }, 4140 { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 },
4140 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 4141 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
4141 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 4142 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
4142 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e }, 4143 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
4143 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, 4144 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 },
4144 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 4145 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
4145 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 4146 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
@@ -4159,264 +4160,264 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4159 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 4160 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
4160 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 4161 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4161 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 4162 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4162 { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, 4163 { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 },
4163 { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, 4164 { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 },
4164 { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, 4165 { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 },
4165 { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, 4166 { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 },
4166 { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, 4167 { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 },
4167 { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, 4168 { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 },
4168 { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, 4169 { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 },
4169 { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, 4170 { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 },
4170 { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 }, 4171 { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 },
4171 { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, 4172 { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 },
4172 { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, 4173 { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 },
4173 { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, 4174 { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 },
4174 { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, 4175 { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 },
4175 { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, 4176 { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 },
4176 { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, 4177 { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 },
4177 { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, 4178 { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 },
4178 { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, 4179 { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 },
4179 { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, 4180 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4180 { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 }, 4181 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4181 { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 }, 4182 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4182 { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 }, 4183 { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
4183 { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, 4184 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4184 { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, 4185 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4185 { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 }, 4186 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
4186 { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, 4187 { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 },
4187 { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, 4188 { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 },
4188 { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 }, 4189 { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 },
4189 { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 }, 4190 { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 },
4190 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, 4191 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 },
4191 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, 4192 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 },
4192 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, 4193 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 },
4193 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 4194 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4194 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 4195 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4195 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 4196 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4196 { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 }, 4197 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4197 { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, 4198 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4198 { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, 4199 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4199 { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 }, 4200 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4200 { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 }, 4201 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
4201 { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 }, 4202 { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 },
4202 { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 }, 4203 { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 },
4203 { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 }, 4204 { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 },
4204 { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 }, 4205 { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 },
4205 { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 }, 4206 { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 },
4206 { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, 4207 { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 },
4207 { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, 4208 { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 },
4208 { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 }, 4209 { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 },
4209 { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 }, 4210 { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 },
4210 { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 }, 4211 { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 },
4211 { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 }, 4212 { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 },
4212 { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, 4213 { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 },
4213 { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, 4214 { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 },
4214 { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 }, 4215 { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 },
4215 { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 }, 4216 { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 },
4216 { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 }, 4217 { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 },
4217 { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 }, 4218 { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 },
4218 { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, 4219 { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 },
4219 { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, 4220 { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 },
4220 { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 }, 4221 { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 },
4221 { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 }, 4222 { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 },
4222 { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 }, 4223 { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 },
4223 { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 }, 4224 { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 },
4224 { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 }, 4225 { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 },
4225 { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 }, 4226 { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 },
4226 { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, 4227 { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 },
4227 { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, 4228 { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 },
4228 { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 }, 4229 { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 },
4229 { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, 4230 { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 },
4230 { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, 4231 { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 },
4231 { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, 4232 { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 },
4232 { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 }, 4233 { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 },
4233 { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, 4234 { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 },
4234 { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, 4235 { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 },
4235 { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 }, 4236 { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 },
4236 { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 }, 4237 { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 },
4237 { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, 4238 { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 },
4238 { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, 4239 { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 },
4239 { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 }, 4240 { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 },
4240 { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, 4241 { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 },
4241 { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, 4242 { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 },
4242 { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, 4243 { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 },
4243 { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, 4244 { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 },
4244 { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, 4245 { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 },
4245 { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, 4246 { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 },
4246 { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, 4247 { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 },
4247 { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 }, 4248 { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 },
4248 { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, 4249 { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 },
4249 { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 }, 4250 { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 },
4250 { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 }, 4251 { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 },
4251 { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4252 { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4252 { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4253 { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4253 { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4254 { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4254 { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4255 { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4255 { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4256 { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4256 { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4257 { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4257 { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4258 { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4258 { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4259 { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4259 { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4260 { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4260 { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4261 { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4261 { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4262 { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4262 { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4263 { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4263 { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4264 { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4264 { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4265 { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4265 { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4266 { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4266 { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4267 { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4267 { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4268 { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4268 { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4269 { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4269 { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4270 { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4270 { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4271 { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4271 { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4272 { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4272 { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4273 { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4273 { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4274 { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4274 { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4275 { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4275 { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4276 { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4276 { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4277 { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4277 { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4278 { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4278 { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4279 { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4279 { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4280 { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4280 { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4281 { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4281 { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4282 { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4282 { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4283 { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4283 { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4284 { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4284 { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4285 { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4285 { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4286 { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4286 { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4287 { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4287 { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4288 { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4288 { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4289 { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4289 { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, 4290 { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4290 { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 }, 4291 { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 },
4291 { 0x0000aa04, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 }, 4292 { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 },
4292 { 0x0000aa08, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 }, 4293 { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 },
4293 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 }, 4294 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 },
4294 { 0x0000aa10, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, 4295 { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 },
4295 { 0x0000aa14, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, 4296 { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 },
4296 { 0x0000aa18, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, 4297 { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 },
4297 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, 4298 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 },
4298 { 0x0000aa20, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, 4299 { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 },
4299 { 0x0000aa24, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, 4300 { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 },
4300 { 0x0000aa28, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, 4301 { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 },
4301 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, 4302 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 },
4302 { 0x0000aa30, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, 4303 { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 },
4303 { 0x0000aa34, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, 4304 { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 },
4304 { 0x0000aa38, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, 4305 { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 },
4305 { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, 4306 { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 },
4306 { 0x0000aa40, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, 4307 { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 },
4307 { 0x0000aa44, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, 4308 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4308 { 0x0000aa48, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, 4309 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4309 { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, 4310 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4310 { 0x0000aa50, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, 4311 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
4311 { 0x0000aa54, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, 4312 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4312 { 0x0000aa58, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 }, 4313 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4313 { 0x0000aa5c, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 }, 4314 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
4314 { 0x0000aa60, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 }, 4315 { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 },
4315 { 0x0000aa64, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, 4316 { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 },
4316 { 0x0000aa68, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, 4317 { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 },
4317 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, 4318 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 },
4318 { 0x0000aa70, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, 4319 { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 },
4319 { 0x0000aa74, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 }, 4320 { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 },
4320 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 }, 4321 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 },
4321 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 }, 4322 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4322 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 }, 4323 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4323 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 }, 4324 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4324 { 0x0000aa88, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 }, 4325 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4325 { 0x0000aa8c, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 }, 4326 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4326 { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 }, 4327 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4327 { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 }, 4328 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4328 { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 }, 4329 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
4329 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 }, 4330 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 },
4330 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 }, 4331 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 },
4331 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 }, 4332 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 },
4332 { 0x0000aaa8, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 }, 4333 { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 },
4333 { 0x0000aaac, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 }, 4334 { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 },
4334 { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 }, 4335 { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 },
4335 { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 }, 4336 { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 },
4336 { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 }, 4337 { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 },
4337 { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, 4338 { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 },
4338 { 0x0000aac0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, 4339 { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 },
4339 { 0x0000aac4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, 4340 { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 },
4340 { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 }, 4341 { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 },
4341 { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, 4342 { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 },
4342 { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, 4343 { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 },
4343 { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 }, 4344 { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 },
4344 { 0x0000aad8, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 }, 4345 { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 },
4345 { 0x0000aadc, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 }, 4346 { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 },
4346 { 0x0000aae0, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 }, 4347 { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 },
4347 { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, 4348 { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 },
4348 { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, 4349 { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 },
4349 { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 }, 4350 { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 },
4350 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 }, 4351 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 },
4351 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 }, 4352 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 },
4352 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 }, 4353 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 },
4353 { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 }, 4354 { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 },
4354 { 0x0000ab00, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 }, 4355 { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 },
4355 { 0x0000ab04, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 }, 4356 { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 },
4356 { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 }, 4357 { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 },
4357 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, 4358 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 },
4358 { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, 4359 { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 },
4359 { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, 4360 { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 },
4360 { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, 4361 { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 },
4361 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, 4362 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 },
4362 { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 }, 4363 { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 },
4363 { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 }, 4364 { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 },
4364 { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, 4365 { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 },
4365 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, 4366 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 },
4366 { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, 4367 { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 },
4367 { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, 4368 { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 },
4368 { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 }, 4369 { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 },
4369 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, 4370 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 },
4370 { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 }, 4371 { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 },
4371 { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, 4372 { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 },
4372 { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, 4373 { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 },
4373 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 }, 4374 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 },
4374 { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, 4375 { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 },
4375 { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, 4376 { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 },
4376 { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, 4377 { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 },
4377 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, 4378 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 },
4378 { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, 4379 { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 },
4379 { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, 4380 { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4380 { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, 4381 { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4381 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4382 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4382 { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4383 { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4383 { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4384 { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4384 { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4385 { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4385 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4386 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4386 { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4387 { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4387 { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4388 { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4388 { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4389 { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4389 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4390 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4390 { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4391 { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4391 { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4392 { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4392 { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4393 { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4393 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4394 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4394 { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4395 { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4395 { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4396 { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4396 { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4397 { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4397 { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4398 { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4398 { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4399 { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4399 { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4400 { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4400 { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4401 { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4401 { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4402 { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4402 { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4403 { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4403 { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4404 { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4404 { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4405 { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4405 { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4406 { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4406 { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4407 { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4407 { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4408 { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4408 { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4409 { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4409 { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4410 { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4410 { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4411 { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4411 { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4412 { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4412 { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4413 { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4413 { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4414 { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4414 { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4415 { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4415 { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4416 { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4416 { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4417 { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4417 { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, 4418 { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 },
4418 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, 4419 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 },
4419 { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 }, 4420 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 },
4420 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 4421 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
4421 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 4422 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
4422 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, 4423 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
@@ -4679,7 +4680,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4679 { 0x000099a0, 0x00000000 }, 4680 { 0x000099a0, 0x00000000 },
4680 { 0x000099a4, 0x00000001 }, 4681 { 0x000099a4, 0x00000001 },
4681 { 0x000099a8, 0x201fff00 }, 4682 { 0x000099a8, 0x201fff00 },
4682 { 0x000099ac, 0x2def1000 }, 4683 { 0x000099ac, 0x2def0400 },
4683 { 0x000099b0, 0x03051000 }, 4684 { 0x000099b0, 0x03051000 },
4684 { 0x000099b4, 0x00000820 }, 4685 { 0x000099b4, 0x00000820 },
4685 { 0x000099dc, 0x00000000 }, 4686 { 0x000099dc, 0x00000000 },
@@ -4688,7 +4689,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4688 { 0x000099e8, 0x3c466478 }, 4689 { 0x000099e8, 0x3c466478 },
4689 { 0x000099ec, 0x0cc80caa }, 4690 { 0x000099ec, 0x0cc80caa },
4690 { 0x000099f0, 0x00000000 }, 4691 { 0x000099f0, 0x00000000 },
4691 { 0x0000a208, 0x803e6788 }, 4692 { 0x0000a208, 0x803e68c8 },
4692 { 0x0000a210, 0x4080a333 }, 4693 { 0x0000a210, 0x4080a333 },
4693 { 0x0000a214, 0x00206c10 }, 4694 { 0x0000a214, 0x00206c10 },
4694 { 0x0000a218, 0x009c4060 }, 4695 { 0x0000a218, 0x009c4060 },
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c
index af32d091dc3..f32c622db6e 100644
--- a/drivers/net/wireless/ath9k/mac.c
+++ b/drivers/net/wireless/ath9k/mac.c
@@ -14,45 +14,40 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21 18
22static void ath9k_hw_set_txq_interrupts(struct ath_hal *ah, 19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
23 struct ath9k_tx_queue_info *qi) 20 struct ath9k_tx_queue_info *qi)
24{ 21{
25 struct ath_hal_5416 *ahp = AH5416(ah);
26
27 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 22 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
28 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
29 ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask, 24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
30 ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
31 ahp->ah_txUrnInterruptMask); 26 ah->txurn_interrupt_mask);
32 27
33 REG_WRITE(ah, AR_IMR_S0, 28 REG_WRITE(ah, AR_IMR_S0,
34 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK) 29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
35 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)); 30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
36 REG_WRITE(ah, AR_IMR_S1, 31 REG_WRITE(ah, AR_IMR_S1,
37 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR) 32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
38 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)); 33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
39 REG_RMW_FIELD(ah, AR_IMR_S2, 34 REG_RMW_FIELD(ah, AR_IMR_S2,
40 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask); 35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
41} 36}
42 37
43u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q) 38u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
44{ 39{
45 return REG_READ(ah, AR_QTXDP(q)); 40 return REG_READ(ah, AR_QTXDP(q));
46} 41}
47 42
48bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp) 43bool ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
49{ 44{
50 REG_WRITE(ah, AR_QTXDP(q), txdp); 45 REG_WRITE(ah, AR_QTXDP(q), txdp);
51 46
52 return true; 47 return true;
53} 48}
54 49
55bool ath9k_hw_txstart(struct ath_hal *ah, u32 q) 50bool ath9k_hw_txstart(struct ath_hw *ah, u32 q)
56{ 51{
57 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q); 52 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
58 53
@@ -61,7 +56,7 @@ bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
61 return true; 56 return true;
62} 57}
63 58
64u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q) 59u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
65{ 60{
66 u32 npend; 61 u32 npend;
67 62
@@ -75,16 +70,15 @@ u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
75 return npend; 70 return npend;
76} 71}
77 72
78bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel) 73bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
79{ 74{
80 struct ath_hal_5416 *ahp = AH5416(ah);
81 u32 txcfg, curLevel, newLevel; 75 u32 txcfg, curLevel, newLevel;
82 enum ath9k_int omask; 76 enum ath9k_int omask;
83 77
84 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD) 78 if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
85 return false; 79 return false;
86 80
87 omask = ath9k_hw_set_interrupts(ah, ahp->ah_maskReg & ~ATH9K_INT_GLOBAL); 81 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
88 82
89 txcfg = REG_READ(ah, AR_TXCFG); 83 txcfg = REG_READ(ah, AR_TXCFG);
90 curLevel = MS(txcfg, AR_FTRIG); 84 curLevel = MS(txcfg, AR_FTRIG);
@@ -100,21 +94,38 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
100 94
101 ath9k_hw_set_interrupts(ah, omask); 95 ath9k_hw_set_interrupts(ah, omask);
102 96
103 ah->ah_txTrigLevel = newLevel; 97 ah->tx_trig_level = newLevel;
104 98
105 return newLevel != curLevel; 99 return newLevel != curLevel;
106} 100}
107 101
108bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q) 102bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
109{ 103{
104#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
105#define ATH9K_TIME_QUANTUM 100 /* usec */
106
107 struct ath9k_hw_capabilities *pCap = &ah->caps;
108 struct ath9k_tx_queue_info *qi;
110 u32 tsfLow, j, wait; 109 u32 tsfLow, j, wait;
110 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
111
112 if (q >= pCap->total_queues) {
113 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
114 return false;
115 }
116
117 qi = &ah->txq[q];
118 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
119 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
120 return false;
121 }
111 122
112 REG_WRITE(ah, AR_Q_TXD, 1 << q); 123 REG_WRITE(ah, AR_Q_TXD, 1 << q);
113 124
114 for (wait = 1000; wait != 0; wait--) { 125 for (wait = wait_time; wait != 0; wait--) {
115 if (ath9k_hw_numtxpending(ah, q) == 0) 126 if (ath9k_hw_numtxpending(ah, q) == 0)
116 break; 127 break;
117 udelay(100); 128 udelay(ATH9K_TIME_QUANTUM);
118 } 129 }
119 130
120 if (ath9k_hw_numtxpending(ah, q)) { 131 if (ath9k_hw_numtxpending(ah, q)) {
@@ -144,8 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
144 udelay(200); 155 udelay(200);
145 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); 156 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
146 157
147 wait = 1000; 158 wait = wait_time;
148
149 while (ath9k_hw_numtxpending(ah, q)) { 159 while (ath9k_hw_numtxpending(ah, q)) {
150 if ((--wait) == 0) { 160 if ((--wait) == 0) {
151 DPRINTF(ah->ah_sc, ATH_DBG_XMIT, 161 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
@@ -153,18 +163,20 @@ bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
153 "msec after killing last frame\n"); 163 "msec after killing last frame\n");
154 break; 164 break;
155 } 165 }
156 udelay(100); 166 udelay(ATH9K_TIME_QUANTUM);
157 } 167 }
158 168
159 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 169 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
160 } 170 }
161 171
162 REG_WRITE(ah, AR_Q_TXD, 0); 172 REG_WRITE(ah, AR_Q_TXD, 0);
163
164 return wait != 0; 173 return wait != 0;
174
175#undef ATH9K_TX_STOP_DMA_TIMEOUT
176#undef ATH9K_TIME_QUANTUM
165} 177}
166 178
167bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds, 179bool ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
168 u32 segLen, bool firstSeg, 180 u32 segLen, bool firstSeg,
169 bool lastSeg, const struct ath_desc *ds0) 181 bool lastSeg, const struct ath_desc *ds0)
170{ 182{
@@ -192,7 +204,7 @@ bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
192 return true; 204 return true;
193} 205}
194 206
195void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds) 207void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
196{ 208{
197 struct ar5416_desc *ads = AR5416DESC(ds); 209 struct ar5416_desc *ads = AR5416DESC(ds);
198 210
@@ -203,7 +215,7 @@ void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
203 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 215 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
204} 216}
205 217
206int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds) 218int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
207{ 219{
208 struct ar5416_desc *ads = AR5416DESC(ds); 220 struct ar5416_desc *ads = AR5416DESC(ds);
209 221
@@ -278,14 +290,13 @@ int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
278 return 0; 290 return 0;
279} 291}
280 292
281void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds, 293void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
282 u32 pktLen, enum ath9k_pkt_type type, u32 txPower, 294 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
283 u32 keyIx, enum ath9k_key_type keyType, u32 flags) 295 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
284{ 296{
285 struct ar5416_desc *ads = AR5416DESC(ds); 297 struct ar5416_desc *ads = AR5416DESC(ds);
286 struct ath_hal_5416 *ahp = AH5416(ah);
287 298
288 txPower += ahp->ah_txPowerIndexOffset; 299 txPower += ah->txpower_indexoffset;
289 if (txPower > 63) 300 if (txPower > 63)
290 txPower = 63; 301 txPower = 63;
291 302
@@ -314,7 +325,7 @@ void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
314 } 325 }
315} 326}
316 327
317void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds, 328void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
318 struct ath_desc *lastds, 329 struct ath_desc *lastds,
319 u32 durUpdateEn, u32 rtsctsRate, 330 u32 durUpdateEn, u32 rtsctsRate,
320 u32 rtsctsDuration, 331 u32 rtsctsDuration,
@@ -325,9 +336,6 @@ void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
325 struct ar5416_desc *last_ads = AR5416DESC(lastds); 336 struct ar5416_desc *last_ads = AR5416DESC(lastds);
326 u32 ds_ctl0; 337 u32 ds_ctl0;
327 338
328 (void) nseries;
329 (void) rtsctsDuration;
330
331 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) { 339 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
332 ds_ctl0 = ads->ds_ctl0; 340 ds_ctl0 = ads->ds_ctl0;
333 341
@@ -372,7 +380,7 @@ void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
372 last_ads->ds_ctl3 = ads->ds_ctl3; 380 last_ads->ds_ctl3 = ads->ds_ctl3;
373} 381}
374 382
375void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds, 383void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
376 u32 aggrLen) 384 u32 aggrLen)
377{ 385{
378 struct ar5416_desc *ads = AR5416DESC(ds); 386 struct ar5416_desc *ads = AR5416DESC(ds);
@@ -382,7 +390,7 @@ void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
382 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); 390 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
383} 391}
384 392
385void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds, 393void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
386 u32 numDelims) 394 u32 numDelims)
387{ 395{
388 struct ar5416_desc *ads = AR5416DESC(ds); 396 struct ar5416_desc *ads = AR5416DESC(ds);
@@ -396,7 +404,7 @@ void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
396 ads->ds_ctl6 = ctl6; 404 ads->ds_ctl6 = ctl6;
397} 405}
398 406
399void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds) 407void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
400{ 408{
401 struct ar5416_desc *ads = AR5416DESC(ds); 409 struct ar5416_desc *ads = AR5416DESC(ds);
402 410
@@ -405,14 +413,14 @@ void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
405 ads->ds_ctl6 &= ~AR_PadDelim; 413 ads->ds_ctl6 &= ~AR_PadDelim;
406} 414}
407 415
408void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds) 416void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
409{ 417{
410 struct ar5416_desc *ads = AR5416DESC(ds); 418 struct ar5416_desc *ads = AR5416DESC(ds);
411 419
412 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 420 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
413} 421}
414 422
415void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds, 423void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
416 u32 burstDuration) 424 u32 burstDuration)
417{ 425{
418 struct ar5416_desc *ads = AR5416DESC(ds); 426 struct ar5416_desc *ads = AR5416DESC(ds);
@@ -421,7 +429,7 @@ void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
421 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); 429 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
422} 430}
423 431
424void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds, 432void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
425 u32 vmf) 433 u32 vmf)
426{ 434{
427 struct ar5416_desc *ads = AR5416DESC(ds); 435 struct ar5416_desc *ads = AR5416DESC(ds);
@@ -432,20 +440,17 @@ void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
432 ads->ds_ctl0 &= ~AR_VirtMoreFrag; 440 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
433} 441}
434 442
435void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs) 443void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
436{ 444{
437 struct ath_hal_5416 *ahp = AH5416(ah); 445 *txqs &= ah->intr_txqs;
438 446 ah->intr_txqs &= ~(*txqs);
439 *txqs &= ahp->ah_intrTxqs;
440 ahp->ah_intrTxqs &= ~(*txqs);
441} 447}
442 448
443bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q, 449bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
444 const struct ath9k_tx_queue_info *qinfo) 450 const struct ath9k_tx_queue_info *qinfo)
445{ 451{
446 u32 cw; 452 u32 cw;
447 struct ath_hal_5416 *ahp = AH5416(ah); 453 struct ath9k_hw_capabilities *pCap = &ah->caps;
448 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
449 struct ath9k_tx_queue_info *qi; 454 struct ath9k_tx_queue_info *qi;
450 455
451 if (q >= pCap->total_queues) { 456 if (q >= pCap->total_queues) {
@@ -453,7 +458,7 @@ bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
453 return false; 458 return false;
454 } 459 }
455 460
456 qi = &ahp->ah_txq[q]; 461 qi = &ah->txq[q];
457 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 462 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
458 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n"); 463 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
459 return false; 464 return false;
@@ -509,11 +514,10 @@ bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
509 return true; 514 return true;
510} 515}
511 516
512bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q, 517bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
513 struct ath9k_tx_queue_info *qinfo) 518 struct ath9k_tx_queue_info *qinfo)
514{ 519{
515 struct ath_hal_5416 *ahp = AH5416(ah); 520 struct ath9k_hw_capabilities *pCap = &ah->caps;
516 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
517 struct ath9k_tx_queue_info *qi; 521 struct ath9k_tx_queue_info *qi;
518 522
519 if (q >= pCap->total_queues) { 523 if (q >= pCap->total_queues) {
@@ -521,7 +525,7 @@ bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
521 return false; 525 return false;
522 } 526 }
523 527
524 qi = &ahp->ah_txq[q]; 528 qi = &ah->txq[q];
525 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 529 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
526 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n"); 530 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
527 return false; 531 return false;
@@ -545,12 +549,11 @@ bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
545 return true; 549 return true;
546} 550}
547 551
548int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type, 552int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
549 const struct ath9k_tx_queue_info *qinfo) 553 const struct ath9k_tx_queue_info *qinfo)
550{ 554{
551 struct ath_hal_5416 *ahp = AH5416(ah);
552 struct ath9k_tx_queue_info *qi; 555 struct ath9k_tx_queue_info *qi;
553 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 556 struct ath9k_hw_capabilities *pCap = &ah->caps;
554 int q; 557 int q;
555 558
556 switch (type) { 559 switch (type) {
@@ -568,7 +571,7 @@ int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
568 break; 571 break;
569 case ATH9K_TX_QUEUE_DATA: 572 case ATH9K_TX_QUEUE_DATA:
570 for (q = 0; q < pCap->total_queues; q++) 573 for (q = 0; q < pCap->total_queues; q++)
571 if (ahp->ah_txq[q].tqi_type == 574 if (ah->txq[q].tqi_type ==
572 ATH9K_TX_QUEUE_INACTIVE) 575 ATH9K_TX_QUEUE_INACTIVE)
573 break; 576 break;
574 if (q == pCap->total_queues) { 577 if (q == pCap->total_queues) {
@@ -584,7 +587,7 @@ int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
584 587
585 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q); 588 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
586 589
587 qi = &ahp->ah_txq[q]; 590 qi = &ah->txq[q];
588 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 591 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
589 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 592 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
590 "tx queue %u already active\n", q); 593 "tx queue %u already active\n", q);
@@ -611,17 +614,16 @@ int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
611 return q; 614 return q;
612} 615}
613 616
614bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q) 617bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
615{ 618{
616 struct ath_hal_5416 *ahp = AH5416(ah); 619 struct ath9k_hw_capabilities *pCap = &ah->caps;
617 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
618 struct ath9k_tx_queue_info *qi; 620 struct ath9k_tx_queue_info *qi;
619 621
620 if (q >= pCap->total_queues) { 622 if (q >= pCap->total_queues) {
621 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q); 623 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
622 return false; 624 return false;
623 } 625 }
624 qi = &ahp->ah_txq[q]; 626 qi = &ah->txq[q];
625 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 627 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
626 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q); 628 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
627 return false; 629 return false;
@@ -630,21 +632,20 @@ bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
630 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "release queue %u\n", q); 632 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "release queue %u\n", q);
631 633
632 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 634 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
633 ahp->ah_txOkInterruptMask &= ~(1 << q); 635 ah->txok_interrupt_mask &= ~(1 << q);
634 ahp->ah_txErrInterruptMask &= ~(1 << q); 636 ah->txerr_interrupt_mask &= ~(1 << q);
635 ahp->ah_txDescInterruptMask &= ~(1 << q); 637 ah->txdesc_interrupt_mask &= ~(1 << q);
636 ahp->ah_txEolInterruptMask &= ~(1 << q); 638 ah->txeol_interrupt_mask &= ~(1 << q);
637 ahp->ah_txUrnInterruptMask &= ~(1 << q); 639 ah->txurn_interrupt_mask &= ~(1 << q);
638 ath9k_hw_set_txq_interrupts(ah, qi); 640 ath9k_hw_set_txq_interrupts(ah, qi);
639 641
640 return true; 642 return true;
641} 643}
642 644
643bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q) 645bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
644{ 646{
645 struct ath_hal_5416 *ahp = AH5416(ah); 647 struct ath9k_hw_capabilities *pCap = &ah->caps;
646 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 648 struct ath9k_channel *chan = ah->curchan;
647 struct ath9k_channel *chan = ah->ah_curchan;
648 struct ath9k_tx_queue_info *qi; 649 struct ath9k_tx_queue_info *qi;
649 u32 cwMin, chanCwMin, value; 650 u32 cwMin, chanCwMin, value;
650 651
@@ -653,7 +654,7 @@ bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
653 return false; 654 return false;
654 } 655 }
655 656
656 qi = &ahp->ah_txq[q]; 657 qi = &ah->txq[q];
657 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 658 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
658 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q); 659 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
659 return true; 660 return true;
@@ -741,9 +742,9 @@ bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
741 | AR_Q_MISC_CBR_INCR_DIS1 742 | AR_Q_MISC_CBR_INCR_DIS1
742 | AR_Q_MISC_CBR_INCR_DIS0); 743 | AR_Q_MISC_CBR_INCR_DIS0);
743 value = (qi->tqi_readyTime - 744 value = (qi->tqi_readyTime -
744 (ah->ah_config.sw_beacon_response_time - 745 (ah->config.sw_beacon_response_time -
745 ah->ah_config.dma_beacon_response_time) - 746 ah->config.dma_beacon_response_time) -
746 ah->ah_config.additional_swba_backoff) * 1024; 747 ah->config.additional_swba_backoff) * 1024;
747 REG_WRITE(ah, AR_QRDYTIMECFG(q), 748 REG_WRITE(ah, AR_QRDYTIMECFG(q),
748 value | AR_Q_RDYTIMECFG_EN); 749 value | AR_Q_RDYTIMECFG_EN);
749 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 750 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
@@ -771,31 +772,31 @@ bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
771 } 772 }
772 773
773 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 774 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
774 ahp->ah_txOkInterruptMask |= 1 << q; 775 ah->txok_interrupt_mask |= 1 << q;
775 else 776 else
776 ahp->ah_txOkInterruptMask &= ~(1 << q); 777 ah->txok_interrupt_mask &= ~(1 << q);
777 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 778 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
778 ahp->ah_txErrInterruptMask |= 1 << q; 779 ah->txerr_interrupt_mask |= 1 << q;
779 else 780 else
780 ahp->ah_txErrInterruptMask &= ~(1 << q); 781 ah->txerr_interrupt_mask &= ~(1 << q);
781 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 782 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
782 ahp->ah_txDescInterruptMask |= 1 << q; 783 ah->txdesc_interrupt_mask |= 1 << q;
783 else 784 else
784 ahp->ah_txDescInterruptMask &= ~(1 << q); 785 ah->txdesc_interrupt_mask &= ~(1 << q);
785 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 786 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
786 ahp->ah_txEolInterruptMask |= 1 << q; 787 ah->txeol_interrupt_mask |= 1 << q;
787 else 788 else
788 ahp->ah_txEolInterruptMask &= ~(1 << q); 789 ah->txeol_interrupt_mask &= ~(1 << q);
789 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 790 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
790 ahp->ah_txUrnInterruptMask |= 1 << q; 791 ah->txurn_interrupt_mask |= 1 << q;
791 else 792 else
792 ahp->ah_txUrnInterruptMask &= ~(1 << q); 793 ah->txurn_interrupt_mask &= ~(1 << q);
793 ath9k_hw_set_txq_interrupts(ah, qi); 794 ath9k_hw_set_txq_interrupts(ah, qi);
794 795
795 return true; 796 return true;
796} 797}
797 798
798int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds, 799int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
799 u32 pa, struct ath_desc *nds, u64 tsf) 800 u32 pa, struct ath_desc *nds, u64 tsf)
800{ 801{
801 struct ar5416_desc ads; 802 struct ar5416_desc ads;
@@ -860,11 +861,11 @@ int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
860 return 0; 861 return 0;
861} 862}
862 863
863bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds, 864bool ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
864 u32 size, u32 flags) 865 u32 size, u32 flags)
865{ 866{
866 struct ar5416_desc *ads = AR5416DESC(ds); 867 struct ar5416_desc *ads = AR5416DESC(ds);
867 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 868 struct ath9k_hw_capabilities *pCap = &ah->caps;
868 869
869 ads->ds_ctl1 = size & AR_BufLen; 870 ads->ds_ctl1 = size & AR_BufLen;
870 if (flags & ATH9K_RXDESC_INTREQ) 871 if (flags & ATH9K_RXDESC_INTREQ)
@@ -877,7 +878,7 @@ bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
877 return true; 878 return true;
878} 879}
879 880
880bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set) 881bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
881{ 882{
882 u32 reg; 883 u32 reg;
883 884
@@ -904,17 +905,17 @@ bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
904 return true; 905 return true;
905} 906}
906 907
907void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp) 908void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
908{ 909{
909 REG_WRITE(ah, AR_RXDP, rxdp); 910 REG_WRITE(ah, AR_RXDP, rxdp);
910} 911}
911 912
912void ath9k_hw_rxena(struct ath_hal *ah) 913void ath9k_hw_rxena(struct ath_hw *ah)
913{ 914{
914 REG_WRITE(ah, AR_CR, AR_CR_RXE); 915 REG_WRITE(ah, AR_CR, AR_CR_RXE);
915} 916}
916 917
917void ath9k_hw_startpcureceive(struct ath_hal *ah) 918void ath9k_hw_startpcureceive(struct ath_hw *ah)
918{ 919{
919 ath9k_enable_mib_counters(ah); 920 ath9k_enable_mib_counters(ah);
920 921
@@ -923,14 +924,14 @@ void ath9k_hw_startpcureceive(struct ath_hal *ah)
923 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 924 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
924} 925}
925 926
926void ath9k_hw_stoppcurecv(struct ath_hal *ah) 927void ath9k_hw_stoppcurecv(struct ath_hw *ah)
927{ 928{
928 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); 929 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
929 930
930 ath9k_hw_disable_mib_counters(ah); 931 ath9k_hw_disable_mib_counters(ah);
931} 932}
932 933
933bool ath9k_hw_stopdmarecv(struct ath_hal *ah) 934bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
934{ 935{
935 REG_WRITE(ah, AR_CR, AR_CR_RXD); 936 REG_WRITE(ah, AR_CR, AR_CR_RXD);
936 937
diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath9k/mac.h
new file mode 100644
index 00000000000..74b660ae8ad
--- /dev/null
+++ b/drivers/net/wireless/ath9k/mac.h
@@ -0,0 +1,676 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef MAC_H
18#define MAC_H
19
20#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
21 MS(ads->ds_rxstatus0, AR_RxRate) : \
22 (ads->ds_rxstatus3 >> 2) & 0xFF)
23
24#define set11nTries(_series, _index) \
25 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
26
27#define set11nRate(_series, _index) \
28 (SM((_series)[_index].Rate, AR_XmitRate##_index))
29
30#define set11nPktDurRTSCTS(_series, _index) \
31 (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
32 ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
33 AR_RTSCTSQual##_index : 0))
34
35#define set11nRateFlags(_series, _index) \
36 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
37 AR_2040_##_index : 0) \
38 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
39 AR_GI##_index : 0) \
40 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
41
42#define CCK_SIFS_TIME 10
43#define CCK_PREAMBLE_BITS 144
44#define CCK_PLCP_BITS 48
45
46#define OFDM_SIFS_TIME 16
47#define OFDM_PREAMBLE_TIME 20
48#define OFDM_PLCP_BITS 22
49#define OFDM_SYMBOL_TIME 4
50
51#define OFDM_SIFS_TIME_HALF 32
52#define OFDM_PREAMBLE_TIME_HALF 40
53#define OFDM_PLCP_BITS_HALF 22
54#define OFDM_SYMBOL_TIME_HALF 8
55
56#define OFDM_SIFS_TIME_QUARTER 64
57#define OFDM_PREAMBLE_TIME_QUARTER 80
58#define OFDM_PLCP_BITS_QUARTER 22
59#define OFDM_SYMBOL_TIME_QUARTER 16
60
61#define INIT_AIFS 2
62#define INIT_CWMIN 15
63#define INIT_CWMIN_11B 31
64#define INIT_CWMAX 1023
65#define INIT_SH_RETRY 10
66#define INIT_LG_RETRY 10
67#define INIT_SSH_RETRY 32
68#define INIT_SLG_RETRY 32
69
70#define ATH9K_SLOT_TIME_6 6
71#define ATH9K_SLOT_TIME_9 9
72#define ATH9K_SLOT_TIME_20 20
73
74#define ATH9K_TXERR_XRETRY 0x01
75#define ATH9K_TXERR_FILT 0x02
76#define ATH9K_TXERR_FIFO 0x04
77#define ATH9K_TXERR_XTXOP 0x08
78#define ATH9K_TXERR_TIMER_EXPIRED 0x10
79
80#define ATH9K_TX_BA 0x01
81#define ATH9K_TX_PWRMGMT 0x02
82#define ATH9K_TX_DESC_CFG_ERR 0x04
83#define ATH9K_TX_DATA_UNDERRUN 0x08
84#define ATH9K_TX_DELIM_UNDERRUN 0x10
85#define ATH9K_TX_SW_ABORTED 0x40
86#define ATH9K_TX_SW_FILTERED 0x80
87
88#define MIN_TX_FIFO_THRESHOLD 0x1
89#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
90#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
91
92struct ath_tx_status {
93 u32 ts_tstamp;
94 u16 ts_seqnum;
95 u8 ts_status;
96 u8 ts_ratecode;
97 u8 ts_rateindex;
98 int8_t ts_rssi;
99 u8 ts_shortretry;
100 u8 ts_longretry;
101 u8 ts_virtcol;
102 u8 ts_antenna;
103 u8 ts_flags;
104 int8_t ts_rssi_ctl0;
105 int8_t ts_rssi_ctl1;
106 int8_t ts_rssi_ctl2;
107 int8_t ts_rssi_ext0;
108 int8_t ts_rssi_ext1;
109 int8_t ts_rssi_ext2;
110 u8 pad[3];
111 u32 ba_low;
112 u32 ba_high;
113 u32 evm0;
114 u32 evm1;
115 u32 evm2;
116};
117
118struct ath_rx_status {
119 u32 rs_tstamp;
120 u16 rs_datalen;
121 u8 rs_status;
122 u8 rs_phyerr;
123 int8_t rs_rssi;
124 u8 rs_keyix;
125 u8 rs_rate;
126 u8 rs_antenna;
127 u8 rs_more;
128 int8_t rs_rssi_ctl0;
129 int8_t rs_rssi_ctl1;
130 int8_t rs_rssi_ctl2;
131 int8_t rs_rssi_ext0;
132 int8_t rs_rssi_ext1;
133 int8_t rs_rssi_ext2;
134 u8 rs_isaggr;
135 u8 rs_moreaggr;
136 u8 rs_num_delims;
137 u8 rs_flags;
138 u32 evm0;
139 u32 evm1;
140 u32 evm2;
141};
142
143#define ATH9K_RXERR_CRC 0x01
144#define ATH9K_RXERR_PHY 0x02
145#define ATH9K_RXERR_FIFO 0x04
146#define ATH9K_RXERR_DECRYPT 0x08
147#define ATH9K_RXERR_MIC 0x10
148
149#define ATH9K_RX_MORE 0x01
150#define ATH9K_RX_MORE_AGGR 0x02
151#define ATH9K_RX_GI 0x04
152#define ATH9K_RX_2040 0x08
153#define ATH9K_RX_DELIM_CRC_PRE 0x10
154#define ATH9K_RX_DELIM_CRC_POST 0x20
155#define ATH9K_RX_DECRYPT_BUSY 0x40
156
157#define ATH9K_RXKEYIX_INVALID ((u8)-1)
158#define ATH9K_TXKEYIX_INVALID ((u32)-1)
159
160struct ath_desc {
161 u32 ds_link;
162 u32 ds_data;
163 u32 ds_ctl0;
164 u32 ds_ctl1;
165 u32 ds_hw[20];
166 union {
167 struct ath_tx_status tx;
168 struct ath_rx_status rx;
169 void *stats;
170 } ds_us;
171 void *ds_vdata;
172} __packed;
173
174#define ds_txstat ds_us.tx
175#define ds_rxstat ds_us.rx
176#define ds_stat ds_us.stats
177
178#define ATH9K_TXDESC_CLRDMASK 0x0001
179#define ATH9K_TXDESC_NOACK 0x0002
180#define ATH9K_TXDESC_RTSENA 0x0004
181#define ATH9K_TXDESC_CTSENA 0x0008
182/* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for
183 * the descriptor its marked on. We take a tx interrupt to reap
184 * descriptors when the h/w hits an EOL condition or
185 * when the descriptor is specifically marked to generate
186 * an interrupt with this flag. Descriptors should be
187 * marked periodically to insure timely replenishing of the
188 * supply needed for sending frames. Defering interrupts
189 * reduces system load and potentially allows more concurrent
190 * work to be done but if done to aggressively can cause
191 * senders to backup. When the hardware queue is left too
192 * large rate control information may also be too out of
193 * date. An Alternative for this is TX interrupt mitigation
194 * but this needs more testing. */
195#define ATH9K_TXDESC_INTREQ 0x0010
196#define ATH9K_TXDESC_VEOL 0x0020
197#define ATH9K_TXDESC_EXT_ONLY 0x0040
198#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
199#define ATH9K_TXDESC_VMF 0x0100
200#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
201#define ATH9K_TXDESC_CAB 0x0400
202
203#define ATH9K_RXDESC_INTREQ 0x0020
204
205struct ar5416_desc {
206 u32 ds_link;
207 u32 ds_data;
208 u32 ds_ctl0;
209 u32 ds_ctl1;
210 union {
211 struct {
212 u32 ctl2;
213 u32 ctl3;
214 u32 ctl4;
215 u32 ctl5;
216 u32 ctl6;
217 u32 ctl7;
218 u32 ctl8;
219 u32 ctl9;
220 u32 ctl10;
221 u32 ctl11;
222 u32 status0;
223 u32 status1;
224 u32 status2;
225 u32 status3;
226 u32 status4;
227 u32 status5;
228 u32 status6;
229 u32 status7;
230 u32 status8;
231 u32 status9;
232 } tx;
233 struct {
234 u32 status0;
235 u32 status1;
236 u32 status2;
237 u32 status3;
238 u32 status4;
239 u32 status5;
240 u32 status6;
241 u32 status7;
242 u32 status8;
243 } rx;
244 } u;
245} __packed;
246
247#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
248#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
249
250#define ds_ctl2 u.tx.ctl2
251#define ds_ctl3 u.tx.ctl3
252#define ds_ctl4 u.tx.ctl4
253#define ds_ctl5 u.tx.ctl5
254#define ds_ctl6 u.tx.ctl6
255#define ds_ctl7 u.tx.ctl7
256#define ds_ctl8 u.tx.ctl8
257#define ds_ctl9 u.tx.ctl9
258#define ds_ctl10 u.tx.ctl10
259#define ds_ctl11 u.tx.ctl11
260
261#define ds_txstatus0 u.tx.status0
262#define ds_txstatus1 u.tx.status1
263#define ds_txstatus2 u.tx.status2
264#define ds_txstatus3 u.tx.status3
265#define ds_txstatus4 u.tx.status4
266#define ds_txstatus5 u.tx.status5
267#define ds_txstatus6 u.tx.status6
268#define ds_txstatus7 u.tx.status7
269#define ds_txstatus8 u.tx.status8
270#define ds_txstatus9 u.tx.status9
271
272#define ds_rxstatus0 u.rx.status0
273#define ds_rxstatus1 u.rx.status1
274#define ds_rxstatus2 u.rx.status2
275#define ds_rxstatus3 u.rx.status3
276#define ds_rxstatus4 u.rx.status4
277#define ds_rxstatus5 u.rx.status5
278#define ds_rxstatus6 u.rx.status6
279#define ds_rxstatus7 u.rx.status7
280#define ds_rxstatus8 u.rx.status8
281
282#define AR_FrameLen 0x00000fff
283#define AR_VirtMoreFrag 0x00001000
284#define AR_TxCtlRsvd00 0x0000e000
285#define AR_XmitPower 0x003f0000
286#define AR_XmitPower_S 16
287#define AR_RTSEnable 0x00400000
288#define AR_VEOL 0x00800000
289#define AR_ClrDestMask 0x01000000
290#define AR_TxCtlRsvd01 0x1e000000
291#define AR_TxIntrReq 0x20000000
292#define AR_DestIdxValid 0x40000000
293#define AR_CTSEnable 0x80000000
294
295#define AR_BufLen 0x00000fff
296#define AR_TxMore 0x00001000
297#define AR_DestIdx 0x000fe000
298#define AR_DestIdx_S 13
299#define AR_FrameType 0x00f00000
300#define AR_FrameType_S 20
301#define AR_NoAck 0x01000000
302#define AR_InsertTS 0x02000000
303#define AR_CorruptFCS 0x04000000
304#define AR_ExtOnly 0x08000000
305#define AR_ExtAndCtl 0x10000000
306#define AR_MoreAggr 0x20000000
307#define AR_IsAggr 0x40000000
308
309#define AR_BurstDur 0x00007fff
310#define AR_BurstDur_S 0
311#define AR_DurUpdateEna 0x00008000
312#define AR_XmitDataTries0 0x000f0000
313#define AR_XmitDataTries0_S 16
314#define AR_XmitDataTries1 0x00f00000
315#define AR_XmitDataTries1_S 20
316#define AR_XmitDataTries2 0x0f000000
317#define AR_XmitDataTries2_S 24
318#define AR_XmitDataTries3 0xf0000000
319#define AR_XmitDataTries3_S 28
320
321#define AR_XmitRate0 0x000000ff
322#define AR_XmitRate0_S 0
323#define AR_XmitRate1 0x0000ff00
324#define AR_XmitRate1_S 8
325#define AR_XmitRate2 0x00ff0000
326#define AR_XmitRate2_S 16
327#define AR_XmitRate3 0xff000000
328#define AR_XmitRate3_S 24
329
330#define AR_PacketDur0 0x00007fff
331#define AR_PacketDur0_S 0
332#define AR_RTSCTSQual0 0x00008000
333#define AR_PacketDur1 0x7fff0000
334#define AR_PacketDur1_S 16
335#define AR_RTSCTSQual1 0x80000000
336
337#define AR_PacketDur2 0x00007fff
338#define AR_PacketDur2_S 0
339#define AR_RTSCTSQual2 0x00008000
340#define AR_PacketDur3 0x7fff0000
341#define AR_PacketDur3_S 16
342#define AR_RTSCTSQual3 0x80000000
343
344#define AR_AggrLen 0x0000ffff
345#define AR_AggrLen_S 0
346#define AR_TxCtlRsvd60 0x00030000
347#define AR_PadDelim 0x03fc0000
348#define AR_PadDelim_S 18
349#define AR_EncrType 0x0c000000
350#define AR_EncrType_S 26
351#define AR_TxCtlRsvd61 0xf0000000
352
353#define AR_2040_0 0x00000001
354#define AR_GI0 0x00000002
355#define AR_ChainSel0 0x0000001c
356#define AR_ChainSel0_S 2
357#define AR_2040_1 0x00000020
358#define AR_GI1 0x00000040
359#define AR_ChainSel1 0x00000380
360#define AR_ChainSel1_S 7
361#define AR_2040_2 0x00000400
362#define AR_GI2 0x00000800
363#define AR_ChainSel2 0x00007000
364#define AR_ChainSel2_S 12
365#define AR_2040_3 0x00008000
366#define AR_GI3 0x00010000
367#define AR_ChainSel3 0x000e0000
368#define AR_ChainSel3_S 17
369#define AR_RTSCTSRate 0x0ff00000
370#define AR_RTSCTSRate_S 20
371#define AR_TxCtlRsvd70 0xf0000000
372
373#define AR_TxRSSIAnt00 0x000000ff
374#define AR_TxRSSIAnt00_S 0
375#define AR_TxRSSIAnt01 0x0000ff00
376#define AR_TxRSSIAnt01_S 8
377#define AR_TxRSSIAnt02 0x00ff0000
378#define AR_TxRSSIAnt02_S 16
379#define AR_TxStatusRsvd00 0x3f000000
380#define AR_TxBaStatus 0x40000000
381#define AR_TxStatusRsvd01 0x80000000
382
383#define AR_FrmXmitOK 0x00000001
384#define AR_ExcessiveRetries 0x00000002
385#define AR_FIFOUnderrun 0x00000004
386#define AR_Filtered 0x00000008
387#define AR_RTSFailCnt 0x000000f0
388#define AR_RTSFailCnt_S 4
389#define AR_DataFailCnt 0x00000f00
390#define AR_DataFailCnt_S 8
391#define AR_VirtRetryCnt 0x0000f000
392#define AR_VirtRetryCnt_S 12
393#define AR_TxDelimUnderrun 0x00010000
394#define AR_TxDataUnderrun 0x00020000
395#define AR_DescCfgErr 0x00040000
396#define AR_TxTimerExpired 0x00080000
397#define AR_TxStatusRsvd10 0xfff00000
398
399#define AR_SendTimestamp ds_txstatus2
400#define AR_BaBitmapLow ds_txstatus3
401#define AR_BaBitmapHigh ds_txstatus4
402
403#define AR_TxRSSIAnt10 0x000000ff
404#define AR_TxRSSIAnt10_S 0
405#define AR_TxRSSIAnt11 0x0000ff00
406#define AR_TxRSSIAnt11_S 8
407#define AR_TxRSSIAnt12 0x00ff0000
408#define AR_TxRSSIAnt12_S 16
409#define AR_TxRSSICombined 0xff000000
410#define AR_TxRSSICombined_S 24
411
412#define AR_TxEVM0 ds_txstatus5
413#define AR_TxEVM1 ds_txstatus6
414#define AR_TxEVM2 ds_txstatus7
415
416#define AR_TxDone 0x00000001
417#define AR_SeqNum 0x00001ffe
418#define AR_SeqNum_S 1
419#define AR_TxStatusRsvd80 0x0001e000
420#define AR_TxOpExceeded 0x00020000
421#define AR_TxStatusRsvd81 0x001c0000
422#define AR_FinalTxIdx 0x00600000
423#define AR_FinalTxIdx_S 21
424#define AR_TxStatusRsvd82 0x01800000
425#define AR_PowerMgmt 0x02000000
426#define AR_TxStatusRsvd83 0xfc000000
427
428#define AR_RxCTLRsvd00 0xffffffff
429
430#define AR_BufLen 0x00000fff
431#define AR_RxCtlRsvd00 0x00001000
432#define AR_RxIntrReq 0x00002000
433#define AR_RxCtlRsvd01 0xffffc000
434
435#define AR_RxRSSIAnt00 0x000000ff
436#define AR_RxRSSIAnt00_S 0
437#define AR_RxRSSIAnt01 0x0000ff00
438#define AR_RxRSSIAnt01_S 8
439#define AR_RxRSSIAnt02 0x00ff0000
440#define AR_RxRSSIAnt02_S 16
441#define AR_RxRate 0xff000000
442#define AR_RxRate_S 24
443#define AR_RxStatusRsvd00 0xff000000
444
445#define AR_DataLen 0x00000fff
446#define AR_RxMore 0x00001000
447#define AR_NumDelim 0x003fc000
448#define AR_NumDelim_S 14
449#define AR_RxStatusRsvd10 0xff800000
450
451#define AR_RcvTimestamp ds_rxstatus2
452
453#define AR_GI 0x00000001
454#define AR_2040 0x00000002
455#define AR_Parallel40 0x00000004
456#define AR_Parallel40_S 2
457#define AR_RxStatusRsvd30 0x000000f8
458#define AR_RxAntenna 0xffffff00
459#define AR_RxAntenna_S 8
460
461#define AR_RxRSSIAnt10 0x000000ff
462#define AR_RxRSSIAnt10_S 0
463#define AR_RxRSSIAnt11 0x0000ff00
464#define AR_RxRSSIAnt11_S 8
465#define AR_RxRSSIAnt12 0x00ff0000
466#define AR_RxRSSIAnt12_S 16
467#define AR_RxRSSICombined 0xff000000
468#define AR_RxRSSICombined_S 24
469
470#define AR_RxEVM0 ds_rxstatus4
471#define AR_RxEVM1 ds_rxstatus5
472#define AR_RxEVM2 ds_rxstatus6
473
474#define AR_RxDone 0x00000001
475#define AR_RxFrameOK 0x00000002
476#define AR_CRCErr 0x00000004
477#define AR_DecryptCRCErr 0x00000008
478#define AR_PHYErr 0x00000010
479#define AR_MichaelErr 0x00000020
480#define AR_PreDelimCRCErr 0x00000040
481#define AR_RxStatusRsvd70 0x00000080
482#define AR_RxKeyIdxValid 0x00000100
483#define AR_KeyIdx 0x0000fe00
484#define AR_KeyIdx_S 9
485#define AR_PHYErrCode 0x0000ff00
486#define AR_PHYErrCode_S 8
487#define AR_RxMoreAggr 0x00010000
488#define AR_RxAggr 0x00020000
489#define AR_PostDelimCRCErr 0x00040000
490#define AR_RxStatusRsvd71 0x3ff80000
491#define AR_DecryptBusyErr 0x40000000
492#define AR_KeyMiss 0x80000000
493
494enum ath9k_tx_queue {
495 ATH9K_TX_QUEUE_INACTIVE = 0,
496 ATH9K_TX_QUEUE_DATA,
497 ATH9K_TX_QUEUE_BEACON,
498 ATH9K_TX_QUEUE_CAB,
499 ATH9K_TX_QUEUE_UAPSD,
500 ATH9K_TX_QUEUE_PSPOLL
501};
502
503#define ATH9K_NUM_TX_QUEUES 10
504
505enum ath9k_tx_queue_subtype {
506 ATH9K_WME_AC_BK = 0,
507 ATH9K_WME_AC_BE,
508 ATH9K_WME_AC_VI,
509 ATH9K_WME_AC_VO,
510 ATH9K_WME_UPSD
511};
512
513enum ath9k_tx_queue_flags {
514 TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
515 TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
516 TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
517 TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
518 TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
519 TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
520 TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
521 TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
522 TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
523};
524
525#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
526#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
527
528#define ATH9K_DECOMP_MASK_SIZE 128
529#define ATH9K_READY_TIME_LO_BOUND 50
530#define ATH9K_READY_TIME_HI_BOUND 96
531
532enum ath9k_pkt_type {
533 ATH9K_PKT_TYPE_NORMAL = 0,
534 ATH9K_PKT_TYPE_ATIM,
535 ATH9K_PKT_TYPE_PSPOLL,
536 ATH9K_PKT_TYPE_BEACON,
537 ATH9K_PKT_TYPE_PROBE_RESP,
538 ATH9K_PKT_TYPE_CHIRP,
539 ATH9K_PKT_TYPE_GRP_POLL,
540};
541
542struct ath9k_tx_queue_info {
543 u32 tqi_ver;
544 enum ath9k_tx_queue tqi_type;
545 enum ath9k_tx_queue_subtype tqi_subtype;
546 enum ath9k_tx_queue_flags tqi_qflags;
547 u32 tqi_priority;
548 u32 tqi_aifs;
549 u32 tqi_cwmin;
550 u32 tqi_cwmax;
551 u16 tqi_shretry;
552 u16 tqi_lgretry;
553 u32 tqi_cbrPeriod;
554 u32 tqi_cbrOverflowLimit;
555 u32 tqi_burstTime;
556 u32 tqi_readyTime;
557 u32 tqi_physCompBuf;
558 u32 tqi_intFlags;
559};
560
561enum ath9k_rx_filter {
562 ATH9K_RX_FILTER_UCAST = 0x00000001,
563 ATH9K_RX_FILTER_MCAST = 0x00000002,
564 ATH9K_RX_FILTER_BCAST = 0x00000004,
565 ATH9K_RX_FILTER_CONTROL = 0x00000008,
566 ATH9K_RX_FILTER_BEACON = 0x00000010,
567 ATH9K_RX_FILTER_PROM = 0x00000020,
568 ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
569 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
570 ATH9K_RX_FILTER_PHYERR = 0x00000100,
571 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
572};
573
574#define ATH9K_RATESERIES_RTS_CTS 0x0001
575#define ATH9K_RATESERIES_2040 0x0002
576#define ATH9K_RATESERIES_HALFGI 0x0004
577
578struct ath9k_11n_rate_series {
579 u32 Tries;
580 u32 Rate;
581 u32 PktDuration;
582 u32 ChSel;
583 u32 RateFlags;
584};
585
586struct ath9k_keyval {
587 u8 kv_type;
588 u8 kv_pad;
589 u16 kv_len;
590 u8 kv_val[16];
591 u8 kv_mic[8];
592 u8 kv_txmic[8];
593};
594
595enum ath9k_key_type {
596 ATH9K_KEY_TYPE_CLEAR,
597 ATH9K_KEY_TYPE_WEP,
598 ATH9K_KEY_TYPE_AES,
599 ATH9K_KEY_TYPE_TKIP,
600};
601
602enum ath9k_cipher {
603 ATH9K_CIPHER_WEP = 0,
604 ATH9K_CIPHER_AES_OCB = 1,
605 ATH9K_CIPHER_AES_CCM = 2,
606 ATH9K_CIPHER_CKIP = 3,
607 ATH9K_CIPHER_TKIP = 4,
608 ATH9K_CIPHER_CLR = 5,
609 ATH9K_CIPHER_MIC = 127
610};
611
612enum ath9k_ht_macmode {
613 ATH9K_HT_MACMODE_20 = 0,
614 ATH9K_HT_MACMODE_2040 = 1,
615};
616
617enum ath9k_ht_extprotspacing {
618 ATH9K_HT_EXTPROTSPACING_20 = 0,
619 ATH9K_HT_EXTPROTSPACING_25 = 1,
620};
621
622struct ath_hw;
623struct ath9k_channel;
624struct ath_rate_table;
625
626u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
627bool ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
628bool ath9k_hw_txstart(struct ath_hw *ah, u32 q);
629u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
630bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
631bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
632bool ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
633 u32 segLen, bool firstSeg,
634 bool lastSeg, const struct ath_desc *ds0);
635void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
636int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
637void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
638 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
639 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
640void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
641 struct ath_desc *lastds,
642 u32 durUpdateEn, u32 rtsctsRate,
643 u32 rtsctsDuration,
644 struct ath9k_11n_rate_series series[],
645 u32 nseries, u32 flags);
646void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
647 u32 aggrLen);
648void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
649 u32 numDelims);
650void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
651void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
652void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
653 u32 burstDuration);
654void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
655 u32 vmf);
656void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
657bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
658 const struct ath9k_tx_queue_info *qinfo);
659bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
660 struct ath9k_tx_queue_info *qinfo);
661int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
662 const struct ath9k_tx_queue_info *qinfo);
663bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
664bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
665int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
666 u32 pa, struct ath_desc *nds, u64 tsf);
667bool ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
668 u32 size, u32 flags);
669bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
670void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
671void ath9k_hw_rxena(struct ath_hw *ah);
672void ath9k_hw_startpcureceive(struct ath_hw *ah);
673void ath9k_hw_stoppcurecv(struct ath_hw *ah);
674bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
675
676#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 727f067aca4..fc3460f8f7f 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -15,9 +15,7 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include "core.h" 18#include "ath9k.h"
19#include "reg.h"
20#include "hw.h"
21 19
22#define ATH_PCI_VERSION "0.1" 20#define ATH_PCI_VERSION "0.1"
23 21
@@ -28,84 +26,125 @@ MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
28MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 26MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
29MODULE_LICENSE("Dual BSD/GPL"); 27MODULE_LICENSE("Dual BSD/GPL");
30 28
31static struct pci_device_id ath_pci_id_table[] __devinitdata = { 29/* We use the hw_value as an index into our private channel structure */
32 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ 30
33 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ 31#define CHAN2G(_freq, _idx) { \
34 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 32 .center_freq = (_freq), \
35 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ 33 .hw_value = (_idx), \
36 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ 34 .max_power = 30, \
37 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ 35}
38 { 0 } 36
37#define CHAN5G(_freq, _idx) { \
38 .band = IEEE80211_BAND_5GHZ, \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 30, \
42}
43
44/* Some 2 GHz radios are actually tunable on 2312-2732
45 * on 5 MHz steps, we support the channels which we know
46 * we have calibration data for all cards though to make
47 * this static */
48static struct ieee80211_channel ath9k_2ghz_chantable[] = {
49 CHAN2G(2412, 0), /* Channel 1 */
50 CHAN2G(2417, 1), /* Channel 2 */
51 CHAN2G(2422, 2), /* Channel 3 */
52 CHAN2G(2427, 3), /* Channel 4 */
53 CHAN2G(2432, 4), /* Channel 5 */
54 CHAN2G(2437, 5), /* Channel 6 */
55 CHAN2G(2442, 6), /* Channel 7 */
56 CHAN2G(2447, 7), /* Channel 8 */
57 CHAN2G(2452, 8), /* Channel 9 */
58 CHAN2G(2457, 9), /* Channel 10 */
59 CHAN2G(2462, 10), /* Channel 11 */
60 CHAN2G(2467, 11), /* Channel 12 */
61 CHAN2G(2472, 12), /* Channel 13 */
62 CHAN2G(2484, 13), /* Channel 14 */
39}; 63};
40 64
41static void ath_detach(struct ath_softc *sc); 65/* Some 5 GHz radios are actually tunable on XXXX-YYYY
42 66 * on 5 MHz steps, we support the channels which we know
43/* return bus cachesize in 4B word units */ 67 * we have calibration data for all cards though to make
44 68 * this static */
45static void bus_read_cachesize(struct ath_softc *sc, int *csz) 69static struct ieee80211_channel ath9k_5ghz_chantable[] = {
46{ 70 /* _We_ call this UNII 1 */
47 u8 u8tmp; 71 CHAN5G(5180, 14), /* Channel 36 */
48 72 CHAN5G(5200, 15), /* Channel 40 */
49 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp); 73 CHAN5G(5220, 16), /* Channel 44 */
50 *csz = (int)u8tmp; 74 CHAN5G(5240, 17), /* Channel 48 */
51 75 /* _We_ call this UNII 2 */
52 /* 76 CHAN5G(5260, 18), /* Channel 52 */
53 * This check was put in to avoid "unplesant" consequences if 77 CHAN5G(5280, 19), /* Channel 56 */
54 * the bootrom has not fully initialized all PCI devices. 78 CHAN5G(5300, 20), /* Channel 60 */
55 * Sometimes the cache line size register is not set 79 CHAN5G(5320, 21), /* Channel 64 */
56 */ 80 /* _We_ call this "Middle band" */
57 81 CHAN5G(5500, 22), /* Channel 100 */
58 if (*csz == 0) 82 CHAN5G(5520, 23), /* Channel 104 */
59 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ 83 CHAN5G(5540, 24), /* Channel 108 */
60} 84 CHAN5G(5560, 25), /* Channel 112 */
61 85 CHAN5G(5580, 26), /* Channel 116 */
62static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode) 86 CHAN5G(5600, 27), /* Channel 120 */
63{ 87 CHAN5G(5620, 28), /* Channel 124 */
64 sc->cur_rate_table = sc->hw_rate_table[mode]; 88 CHAN5G(5640, 29), /* Channel 128 */
65 /* 89 CHAN5G(5660, 30), /* Channel 132 */
66 * All protection frames are transmited at 2Mb/s for 90 CHAN5G(5680, 31), /* Channel 136 */
67 * 11g, otherwise at 1Mb/s. 91 CHAN5G(5700, 32), /* Channel 140 */
68 * XXX select protection rate index from rate table. 92 /* _We_ call this UNII 3 */
69 */ 93 CHAN5G(5745, 33), /* Channel 149 */
70 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0); 94 CHAN5G(5765, 34), /* Channel 153 */
71} 95 CHAN5G(5785, 35), /* Channel 157 */
96 CHAN5G(5805, 36), /* Channel 161 */
97 CHAN5G(5825, 37), /* Channel 165 */
98};
72 99
73static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan) 100static void ath_cache_conf_rate(struct ath_softc *sc,
101 struct ieee80211_conf *conf)
74{ 102{
75 if (chan->chanmode == CHANNEL_A) 103 switch (conf->channel->band) {
76 return ATH9K_MODE_11A; 104 case IEEE80211_BAND_2GHZ:
77 else if (chan->chanmode == CHANNEL_G) 105 if (conf_is_ht20(conf))
78 return ATH9K_MODE_11G; 106 sc->cur_rate_table =
79 else if (chan->chanmode == CHANNEL_B) 107 sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
80 return ATH9K_MODE_11B; 108 else if (conf_is_ht40_minus(conf))
81 else if (chan->chanmode == CHANNEL_A_HT20) 109 sc->cur_rate_table =
82 return ATH9K_MODE_11NA_HT20; 110 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
83 else if (chan->chanmode == CHANNEL_G_HT20) 111 else if (conf_is_ht40_plus(conf))
84 return ATH9K_MODE_11NG_HT20; 112 sc->cur_rate_table =
85 else if (chan->chanmode == CHANNEL_A_HT40PLUS) 113 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
86 return ATH9K_MODE_11NA_HT40PLUS; 114 else
87 else if (chan->chanmode == CHANNEL_A_HT40MINUS) 115 sc->cur_rate_table =
88 return ATH9K_MODE_11NA_HT40MINUS; 116 sc->hw_rate_table[ATH9K_MODE_11G];
89 else if (chan->chanmode == CHANNEL_G_HT40PLUS) 117 break;
90 return ATH9K_MODE_11NG_HT40PLUS; 118 case IEEE80211_BAND_5GHZ:
91 else if (chan->chanmode == CHANNEL_G_HT40MINUS) 119 if (conf_is_ht20(conf))
92 return ATH9K_MODE_11NG_HT40MINUS; 120 sc->cur_rate_table =
93 121 sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
94 WARN_ON(1); /* should not get here */ 122 else if (conf_is_ht40_minus(conf))
95 123 sc->cur_rate_table =
96 return ATH9K_MODE_11B; 124 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
125 else if (conf_is_ht40_plus(conf))
126 sc->cur_rate_table =
127 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
128 else
129 sc->cur_rate_table =
130 sc->hw_rate_table[ATH9K_MODE_11A];
131 break;
132 default:
133 BUG_ON(1);
134 break;
135 }
97} 136}
98 137
99static void ath_update_txpow(struct ath_softc *sc) 138static void ath_update_txpow(struct ath_softc *sc)
100{ 139{
101 struct ath_hal *ah = sc->sc_ah; 140 struct ath_hw *ah = sc->sc_ah;
102 u32 txpow; 141 u32 txpow;
103 142
104 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) { 143 if (sc->curtxpow != sc->config.txpowlimit) {
105 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit); 144 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
106 /* read back in case value is clamped */ 145 /* read back in case value is clamped */
107 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); 146 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
108 sc->sc_curtxpow = txpow; 147 sc->curtxpow = txpow;
109 } 148 }
110} 149}
111 150
@@ -176,79 +215,18 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
176 for (i = 0; i < maxrates; i++) { 215 for (i = 0; i < maxrates; i++) {
177 rate[i].bitrate = rate_table->info[i].ratekbps / 100; 216 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
178 rate[i].hw_value = rate_table->info[i].ratecode; 217 rate[i].hw_value = rate_table->info[i].ratecode;
218 if (rate_table->info[i].short_preamble) {
219 rate[i].hw_value_short = rate_table->info[i].ratecode |
220 rate_table->info[i].short_preamble;
221 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
222 }
179 sband->n_bitrates++; 223 sband->n_bitrates++;
224
180 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n", 225 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
181 rate[i].bitrate / 10, rate[i].hw_value); 226 rate[i].bitrate / 10, rate[i].hw_value);
182 } 227 }
183} 228}
184 229
185static int ath_setup_channels(struct ath_softc *sc)
186{
187 struct ath_hal *ah = sc->sc_ah;
188 int nchan, i, a = 0, b = 0;
189 u8 regclassids[ATH_REGCLASSIDS_MAX];
190 u32 nregclass = 0;
191 struct ieee80211_supported_band *band_2ghz;
192 struct ieee80211_supported_band *band_5ghz;
193 struct ieee80211_channel *chan_2ghz;
194 struct ieee80211_channel *chan_5ghz;
195 struct ath9k_channel *c;
196
197 /* Fill in ah->ah_channels */
198 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
199 regclassids, ATH_REGCLASSIDS_MAX,
200 &nregclass, CTRY_DEFAULT, false, 1)) {
201 u32 rd = ah->ah_currentRD;
202 DPRINTF(sc, ATH_DBG_FATAL,
203 "Unable to collect channel list; "
204 "regdomain likely %u country code %u\n",
205 rd, CTRY_DEFAULT);
206 return -EINVAL;
207 }
208
209 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
210 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
211 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
212 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
213
214 for (i = 0; i < nchan; i++) {
215 c = &ah->ah_channels[i];
216 if (IS_CHAN_2GHZ(c)) {
217 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
218 chan_2ghz[a].center_freq = c->channel;
219 chan_2ghz[a].max_power = c->maxTxPower;
220
221 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
222 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
223 if (c->channelFlags & CHANNEL_PASSIVE)
224 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
225
226 band_2ghz->n_channels = ++a;
227
228 DPRINTF(sc, ATH_DBG_CONFIG, "2MHz channel: %d, "
229 "channelFlags: 0x%x\n",
230 c->channel, c->channelFlags);
231 } else if (IS_CHAN_5GHZ(c)) {
232 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
233 chan_5ghz[b].center_freq = c->channel;
234 chan_5ghz[b].max_power = c->maxTxPower;
235
236 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
237 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
238 if (c->channelFlags & CHANNEL_PASSIVE)
239 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
240
241 band_5ghz->n_channels = ++b;
242
243 DPRINTF(sc, ATH_DBG_CONFIG, "5MHz channel: %d, "
244 "channelFlags: 0x%x\n",
245 c->channel, c->channelFlags);
246 }
247 }
248
249 return 0;
250}
251
252/* 230/*
253 * Set/change channels. If the channel is really being changed, it's done 231 * Set/change channels. If the channel is really being changed, it's done
254 * by reseting the chip. To accomplish this we must first cleanup any pending 232 * by reseting the chip. To accomplish this we must first cleanup any pending
@@ -256,70 +234,68 @@ static int ath_setup_channels(struct ath_softc *sc)
256*/ 234*/
257static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan) 235static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
258{ 236{
259 struct ath_hal *ah = sc->sc_ah; 237 struct ath_hw *ah = sc->sc_ah;
260 bool fastcc = true, stopped; 238 bool fastcc = true, stopped;
239 struct ieee80211_hw *hw = sc->hw;
240 struct ieee80211_channel *channel = hw->conf.channel;
241 int r;
261 242
262 if (sc->sc_flags & SC_OP_INVALID) 243 if (sc->sc_flags & SC_OP_INVALID)
263 return -EIO; 244 return -EIO;
264 245
265 if (hchan->channel != sc->sc_ah->ah_curchan->channel || 246 ath9k_ps_wakeup(sc);
266 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags || 247
267 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) || 248 /*
268 (sc->sc_flags & SC_OP_FULL_RESET)) { 249 * This is only performed if the channel settings have
269 int status; 250 * actually changed.
270 /* 251 *
271 * This is only performed if the channel settings have 252 * To switch channels clear any pending DMA operations;
272 * actually changed. 253 * wait long enough for the RX fifo to drain, reset the
273 * 254 * hardware at the new frequency, and then re-enable
274 * To switch channels clear any pending DMA operations; 255 * the relevant bits of the h/w.
275 * wait long enough for the RX fifo to drain, reset the 256 */
276 * hardware at the new frequency, and then re-enable 257 ath9k_hw_set_interrupts(ah, 0);
277 * the relevant bits of the h/w. 258 ath_drain_all_txq(sc, false);
278 */ 259 stopped = ath_stoprecv(sc);
279 ath9k_hw_set_interrupts(ah, 0);
280 ath_draintxq(sc, false);
281 stopped = ath_stoprecv(sc);
282
283 /* XXX: do not flush receive queue here. We don't want
284 * to flush data frames already in queue because of
285 * changing channel. */
286
287 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
288 fastcc = false;
289
290 DPRINTF(sc, ATH_DBG_CONFIG,
291 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
292 sc->sc_ah->ah_curchan->channel,
293 hchan->channel, hchan->channelFlags, sc->tx_chan_width);
294
295 spin_lock_bh(&sc->sc_resetlock);
296 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
297 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
298 sc->sc_ht_extprotspacing, fastcc, &status)) {
299 DPRINTF(sc, ATH_DBG_FATAL,
300 "Unable to reset channel %u (%uMhz) "
301 "flags 0x%x hal status %u\n",
302 ath9k_hw_mhz2ieee(ah, hchan->channel,
303 hchan->channelFlags),
304 hchan->channel, hchan->channelFlags, status);
305 spin_unlock_bh(&sc->sc_resetlock);
306 return -EIO;
307 }
308 spin_unlock_bh(&sc->sc_resetlock);
309 260
310 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE; 261 /* XXX: do not flush receive queue here. We don't want
311 sc->sc_flags &= ~SC_OP_FULL_RESET; 262 * to flush data frames already in queue because of
263 * changing channel. */
312 264
313 if (ath_startrecv(sc) != 0) { 265 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
314 DPRINTF(sc, ATH_DBG_FATAL, 266 fastcc = false;
315 "Unable to restart recv logic\n");
316 return -EIO;
317 }
318 267
319 ath_setcurmode(sc, ath_chan2mode(hchan)); 268 DPRINTF(sc, ATH_DBG_CONFIG,
320 ath_update_txpow(sc); 269 "(%u MHz) -> (%u MHz), chanwidth: %d\n",
321 ath9k_hw_set_interrupts(ah, sc->sc_imask); 270 sc->sc_ah->curchan->channel,
271 channel->center_freq, sc->tx_chan_width);
272
273 spin_lock_bh(&sc->sc_resetlock);
274
275 r = ath9k_hw_reset(ah, hchan, fastcc);
276 if (r) {
277 DPRINTF(sc, ATH_DBG_FATAL,
278 "Unable to reset channel (%u Mhz) "
279 "reset status %u\n",
280 channel->center_freq, r);
281 spin_unlock_bh(&sc->sc_resetlock);
282 return r;
322 } 283 }
284 spin_unlock_bh(&sc->sc_resetlock);
285
286 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
287 sc->sc_flags &= ~SC_OP_FULL_RESET;
288
289 if (ath_startrecv(sc) != 0) {
290 DPRINTF(sc, ATH_DBG_FATAL,
291 "Unable to restart recv logic\n");
292 return -EIO;
293 }
294
295 ath_cache_conf_rate(sc, &hw->conf);
296 ath_update_txpow(sc);
297 ath9k_hw_set_interrupts(ah, sc->imask);
298 ath9k_ps_restore(sc);
323 return 0; 299 return 0;
324} 300}
325 301
@@ -333,7 +309,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
333static void ath_ani_calibrate(unsigned long data) 309static void ath_ani_calibrate(unsigned long data)
334{ 310{
335 struct ath_softc *sc; 311 struct ath_softc *sc;
336 struct ath_hal *ah; 312 struct ath_hw *ah;
337 bool longcal = false; 313 bool longcal = false;
338 bool shortcal = false; 314 bool shortcal = false;
339 bool aniflag = false; 315 bool aniflag = false;
@@ -351,69 +327,68 @@ static void ath_ani_calibrate(unsigned long data)
351 return; 327 return;
352 328
353 /* Long calibration runs independently of short calibration. */ 329 /* Long calibration runs independently of short calibration. */
354 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) { 330 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
355 longcal = true; 331 longcal = true;
356 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 332 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
357 sc->sc_ani.sc_longcal_timer = timestamp; 333 sc->ani.longcal_timer = timestamp;
358 } 334 }
359 335
360 /* Short calibration applies only while sc_caldone is false */ 336 /* Short calibration applies only while caldone is false */
361 if (!sc->sc_ani.sc_caldone) { 337 if (!sc->ani.caldone) {
362 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >= 338 if ((timestamp - sc->ani.shortcal_timer) >=
363 ATH_SHORT_CALINTERVAL) { 339 ATH_SHORT_CALINTERVAL) {
364 shortcal = true; 340 shortcal = true;
365 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies); 341 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
366 sc->sc_ani.sc_shortcal_timer = timestamp; 342 sc->ani.shortcal_timer = timestamp;
367 sc->sc_ani.sc_resetcal_timer = timestamp; 343 sc->ani.resetcal_timer = timestamp;
368 } 344 }
369 } else { 345 } else {
370 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >= 346 if ((timestamp - sc->ani.resetcal_timer) >=
371 ATH_RESTART_CALINTERVAL) { 347 ATH_RESTART_CALINTERVAL) {
372 ath9k_hw_reset_calvalid(ah, ah->ah_curchan, 348 sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
373 &sc->sc_ani.sc_caldone); 349 if (sc->ani.caldone)
374 if (sc->sc_ani.sc_caldone) 350 sc->ani.resetcal_timer = timestamp;
375 sc->sc_ani.sc_resetcal_timer = timestamp;
376 } 351 }
377 } 352 }
378 353
379 /* Verify whether we must check ANI */ 354 /* Verify whether we must check ANI */
380 if ((timestamp - sc->sc_ani.sc_checkani_timer) >= 355 if ((timestamp - sc->ani.checkani_timer) >=
381 ATH_ANI_POLLINTERVAL) { 356 ATH_ANI_POLLINTERVAL) {
382 aniflag = true; 357 aniflag = true;
383 sc->sc_ani.sc_checkani_timer = timestamp; 358 sc->ani.checkani_timer = timestamp;
384 } 359 }
385 360
386 /* Skip all processing if there's nothing to do. */ 361 /* Skip all processing if there's nothing to do. */
387 if (longcal || shortcal || aniflag) { 362 if (longcal || shortcal || aniflag) {
388 /* Call ANI routine if necessary */ 363 /* Call ANI routine if necessary */
389 if (aniflag) 364 if (aniflag)
390 ath9k_hw_ani_monitor(ah, &sc->sc_halstats, 365 ath9k_hw_ani_monitor(ah, &sc->nodestats,
391 ah->ah_curchan); 366 ah->curchan);
392 367
393 /* Perform calibration if necessary */ 368 /* Perform calibration if necessary */
394 if (longcal || shortcal) { 369 if (longcal || shortcal) {
395 bool iscaldone = false; 370 bool iscaldone = false;
396 371
397 if (ath9k_hw_calibrate(ah, ah->ah_curchan, 372 if (ath9k_hw_calibrate(ah, ah->curchan,
398 sc->sc_rx_chainmask, longcal, 373 sc->rx_chainmask, longcal,
399 &iscaldone)) { 374 &iscaldone)) {
400 if (longcal) 375 if (longcal)
401 sc->sc_ani.sc_noise_floor = 376 sc->ani.noise_floor =
402 ath9k_hw_getchan_noise(ah, 377 ath9k_hw_getchan_noise(ah,
403 ah->ah_curchan); 378 ah->curchan);
404 379
405 DPRINTF(sc, ATH_DBG_ANI, 380 DPRINTF(sc, ATH_DBG_ANI,
406 "calibrate chan %u/%x nf: %d\n", 381 "calibrate chan %u/%x nf: %d\n",
407 ah->ah_curchan->channel, 382 ah->curchan->channel,
408 ah->ah_curchan->channelFlags, 383 ah->curchan->channelFlags,
409 sc->sc_ani.sc_noise_floor); 384 sc->ani.noise_floor);
410 } else { 385 } else {
411 DPRINTF(sc, ATH_DBG_ANY, 386 DPRINTF(sc, ATH_DBG_ANY,
412 "calibrate chan %u/%x failed\n", 387 "calibrate chan %u/%x failed\n",
413 ah->ah_curchan->channel, 388 ah->curchan->channel,
414 ah->ah_curchan->channelFlags); 389 ah->curchan->channelFlags);
415 } 390 }
416 sc->sc_ani.sc_caldone = iscaldone; 391 sc->ani.caldone = iscaldone;
417 } 392 }
418 } 393 }
419 394
@@ -423,32 +398,34 @@ static void ath_ani_calibrate(unsigned long data)
423 * short calibration and long calibration. 398 * short calibration and long calibration.
424 */ 399 */
425 cal_interval = ATH_LONG_CALINTERVAL; 400 cal_interval = ATH_LONG_CALINTERVAL;
426 if (sc->sc_ah->ah_config.enable_ani) 401 if (sc->sc_ah->config.enable_ani)
427 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); 402 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
428 if (!sc->sc_ani.sc_caldone) 403 if (!sc->ani.caldone)
429 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL); 404 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
430 405
431 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 406 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
432} 407}
433 408
434/* 409/*
435 * Update tx/rx chainmask. For legacy association, 410 * Update tx/rx chainmask. For legacy association,
436 * hard code chainmask to 1x1, for 11n association, use 411 * hard code chainmask to 1x1, for 11n association, use
437 * the chainmask configuration. 412 * the chainmask configuration, for bt coexistence, use
413 * the chainmask configuration even in legacy mode.
438 */ 414 */
439static void ath_update_chainmask(struct ath_softc *sc, int is_ht) 415static void ath_update_chainmask(struct ath_softc *sc, int is_ht)
440{ 416{
441 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE; 417 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
442 if (is_ht) { 418 if (is_ht ||
443 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask; 419 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
444 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask; 420 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
421 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
445 } else { 422 } else {
446 sc->sc_tx_chainmask = 1; 423 sc->tx_chainmask = 1;
447 sc->sc_rx_chainmask = 1; 424 sc->rx_chainmask = 1;
448 } 425 }
449 426
450 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n", 427 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
451 sc->sc_tx_chainmask, sc->sc_rx_chainmask); 428 sc->tx_chainmask, sc->rx_chainmask);
452} 429}
453 430
454static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) 431static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -476,7 +453,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
476static void ath9k_tasklet(unsigned long data) 453static void ath9k_tasklet(unsigned long data)
477{ 454{
478 struct ath_softc *sc = (struct ath_softc *)data; 455 struct ath_softc *sc = (struct ath_softc *)data;
479 u32 status = sc->sc_intrstatus; 456 u32 status = sc->intrstatus;
480 457
481 if (status & ATH9K_INT_FATAL) { 458 if (status & ATH9K_INT_FATAL) {
482 /* need a chip reset */ 459 /* need a chip reset */
@@ -496,13 +473,13 @@ static void ath9k_tasklet(unsigned long data)
496 } 473 }
497 474
498 /* re-enable hardware interrupt */ 475 /* re-enable hardware interrupt */
499 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask); 476 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
500} 477}
501 478
502static irqreturn_t ath_isr(int irq, void *dev) 479irqreturn_t ath_isr(int irq, void *dev)
503{ 480{
504 struct ath_softc *sc = dev; 481 struct ath_softc *sc = dev;
505 struct ath_hal *ah = sc->sc_ah; 482 struct ath_hw *ah = sc->sc_ah;
506 enum ath9k_int status; 483 enum ath9k_int status;
507 bool sched = false; 484 bool sched = false;
508 485
@@ -527,7 +504,7 @@ static irqreturn_t ath_isr(int irq, void *dev)
527 */ 504 */
528 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ 505 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
529 506
530 status &= sc->sc_imask; /* discard unasked-for bits */ 507 status &= sc->imask; /* discard unasked-for bits */
531 508
532 /* 509 /*
533 * If there are no status bits set, then this interrupt was not 510 * If there are no status bits set, then this interrupt was not
@@ -536,7 +513,7 @@ static irqreturn_t ath_isr(int irq, void *dev)
536 if (!status) 513 if (!status)
537 return IRQ_NONE; 514 return IRQ_NONE;
538 515
539 sc->sc_intrstatus = status; 516 sc->intrstatus = status;
540 517
541 if (status & ATH9K_INT_FATAL) { 518 if (status & ATH9K_INT_FATAL) {
542 /* need a chip reset */ 519 /* need a chip reset */
@@ -583,16 +560,18 @@ static irqreturn_t ath_isr(int irq, void *dev)
583 * it will clear whatever condition caused 560 * it will clear whatever condition caused
584 * the interrupt. 561 * the interrupt.
585 */ 562 */
586 ath9k_hw_procmibevent(ah, &sc->sc_halstats); 563 ath9k_hw_procmibevent(ah, &sc->nodestats);
587 ath9k_hw_set_interrupts(ah, sc->sc_imask); 564 ath9k_hw_set_interrupts(ah, sc->imask);
588 } 565 }
589 if (status & ATH9K_INT_TIM_TIMER) { 566 if (status & ATH9K_INT_TIM_TIMER) {
590 if (!(ah->ah_caps.hw_caps & 567 if (!(ah->caps.hw_caps &
591 ATH9K_HW_CAP_AUTOSLEEP)) { 568 ATH9K_HW_CAP_AUTOSLEEP)) {
592 /* Clear RxAbort bit so that we can 569 /* Clear RxAbort bit so that we can
593 * receive frames */ 570 * receive frames */
571 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
594 ath9k_hw_setrxabort(ah, 0); 572 ath9k_hw_setrxabort(ah, 0);
595 sched = true; 573 sched = true;
574 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
596 } 575 }
597 } 576 }
598 } 577 }
@@ -602,26 +581,13 @@ static irqreturn_t ath_isr(int irq, void *dev)
602 581
603 if (sched) { 582 if (sched) {
604 /* turn off every interrupt except SWBA */ 583 /* turn off every interrupt except SWBA */
605 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA)); 584 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
606 tasklet_schedule(&sc->intr_tq); 585 tasklet_schedule(&sc->intr_tq);
607 } 586 }
608 587
609 return IRQ_HANDLED; 588 return IRQ_HANDLED;
610} 589}
611 590
612static int ath_get_channel(struct ath_softc *sc,
613 struct ieee80211_channel *chan)
614{
615 int i;
616
617 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
618 if (sc->sc_ah->ah_channels[i].channel == chan->center_freq)
619 return i;
620 }
621
622 return -1;
623}
624
625static u32 ath_get_extchanmode(struct ath_softc *sc, 591static u32 ath_get_extchanmode(struct ath_softc *sc,
626 struct ieee80211_channel *chan, 592 struct ieee80211_channel *chan,
627 enum nl80211_channel_type channel_type) 593 enum nl80211_channel_type channel_type)
@@ -690,7 +656,7 @@ static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
690 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 656 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
691 return ath_keyset(sc, keyix, hk, addr); 657 return ath_keyset(sc, keyix, hk, addr);
692 } 658 }
693 if (!sc->sc_splitmic) { 659 if (!sc->splitmic) {
694 /* 660 /*
695 * data key goes at first index, 661 * data key goes at first index,
696 * the hal handles the MIC keys at index+64. 662 * the hal handles the MIC keys at index+64.
@@ -720,13 +686,13 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
720{ 686{
721 int i; 687 int i;
722 688
723 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 2; i++) { 689 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
724 if (test_bit(i, sc->sc_keymap) || 690 if (test_bit(i, sc->keymap) ||
725 test_bit(i + 64, sc->sc_keymap)) 691 test_bit(i + 64, sc->keymap))
726 continue; /* At least one part of TKIP key allocated */ 692 continue; /* At least one part of TKIP key allocated */
727 if (sc->sc_splitmic && 693 if (sc->splitmic &&
728 (test_bit(i + 32, sc->sc_keymap) || 694 (test_bit(i + 32, sc->keymap) ||
729 test_bit(i + 64 + 32, sc->sc_keymap))) 695 test_bit(i + 64 + 32, sc->keymap)))
730 continue; /* At least one part of TKIP key allocated */ 696 continue; /* At least one part of TKIP key allocated */
731 697
732 /* Found a free slot for a TKIP key */ 698 /* Found a free slot for a TKIP key */
@@ -740,55 +706,55 @@ static int ath_reserve_key_cache_slot(struct ath_softc *sc)
740 int i; 706 int i;
741 707
742 /* First, try to find slots that would not be available for TKIP. */ 708 /* First, try to find slots that would not be available for TKIP. */
743 if (sc->sc_splitmic) { 709 if (sc->splitmic) {
744 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 4; i++) { 710 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
745 if (!test_bit(i, sc->sc_keymap) && 711 if (!test_bit(i, sc->keymap) &&
746 (test_bit(i + 32, sc->sc_keymap) || 712 (test_bit(i + 32, sc->keymap) ||
747 test_bit(i + 64, sc->sc_keymap) || 713 test_bit(i + 64, sc->keymap) ||
748 test_bit(i + 64 + 32, sc->sc_keymap))) 714 test_bit(i + 64 + 32, sc->keymap)))
749 return i; 715 return i;
750 if (!test_bit(i + 32, sc->sc_keymap) && 716 if (!test_bit(i + 32, sc->keymap) &&
751 (test_bit(i, sc->sc_keymap) || 717 (test_bit(i, sc->keymap) ||
752 test_bit(i + 64, sc->sc_keymap) || 718 test_bit(i + 64, sc->keymap) ||
753 test_bit(i + 64 + 32, sc->sc_keymap))) 719 test_bit(i + 64 + 32, sc->keymap)))
754 return i + 32; 720 return i + 32;
755 if (!test_bit(i + 64, sc->sc_keymap) && 721 if (!test_bit(i + 64, sc->keymap) &&
756 (test_bit(i , sc->sc_keymap) || 722 (test_bit(i , sc->keymap) ||
757 test_bit(i + 32, sc->sc_keymap) || 723 test_bit(i + 32, sc->keymap) ||
758 test_bit(i + 64 + 32, sc->sc_keymap))) 724 test_bit(i + 64 + 32, sc->keymap)))
759 return i + 64; 725 return i + 64;
760 if (!test_bit(i + 64 + 32, sc->sc_keymap) && 726 if (!test_bit(i + 64 + 32, sc->keymap) &&
761 (test_bit(i, sc->sc_keymap) || 727 (test_bit(i, sc->keymap) ||
762 test_bit(i + 32, sc->sc_keymap) || 728 test_bit(i + 32, sc->keymap) ||
763 test_bit(i + 64, sc->sc_keymap))) 729 test_bit(i + 64, sc->keymap)))
764 return i + 64 + 32; 730 return i + 64 + 32;
765 } 731 }
766 } else { 732 } else {
767 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 2; i++) { 733 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
768 if (!test_bit(i, sc->sc_keymap) && 734 if (!test_bit(i, sc->keymap) &&
769 test_bit(i + 64, sc->sc_keymap)) 735 test_bit(i + 64, sc->keymap))
770 return i; 736 return i;
771 if (test_bit(i, sc->sc_keymap) && 737 if (test_bit(i, sc->keymap) &&
772 !test_bit(i + 64, sc->sc_keymap)) 738 !test_bit(i + 64, sc->keymap))
773 return i + 64; 739 return i + 64;
774 } 740 }
775 } 741 }
776 742
777 /* No partially used TKIP slots, pick any available slot */ 743 /* No partially used TKIP slots, pick any available slot */
778 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax; i++) { 744 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
779 /* Do not allow slots that could be needed for TKIP group keys 745 /* Do not allow slots that could be needed for TKIP group keys
780 * to be used. This limitation could be removed if we know that 746 * to be used. This limitation could be removed if we know that
781 * TKIP will not be used. */ 747 * TKIP will not be used. */
782 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) 748 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
783 continue; 749 continue;
784 if (sc->sc_splitmic) { 750 if (sc->splitmic) {
785 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) 751 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
786 continue; 752 continue;
787 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) 753 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
788 continue; 754 continue;
789 } 755 }
790 756
791 if (!test_bit(i, sc->sc_keymap)) 757 if (!test_bit(i, sc->keymap))
792 return i; /* Found a free slot for a key */ 758 return i; /* Found a free slot for a key */
793 } 759 }
794 760
@@ -797,7 +763,7 @@ static int ath_reserve_key_cache_slot(struct ath_softc *sc)
797} 763}
798 764
799static int ath_key_config(struct ath_softc *sc, 765static int ath_key_config(struct ath_softc *sc,
800 const u8 *addr, 766 struct ieee80211_sta *sta,
801 struct ieee80211_key_conf *key) 767 struct ieee80211_key_conf *key)
802{ 768{
803 struct ath9k_keyval hk; 769 struct ath9k_keyval hk;
@@ -818,7 +784,7 @@ static int ath_key_config(struct ath_softc *sc,
818 hk.kv_type = ATH9K_CIPHER_AES_CCM; 784 hk.kv_type = ATH9K_CIPHER_AES_CCM;
819 break; 785 break;
820 default: 786 default:
821 return -EINVAL; 787 return -EOPNOTSUPP;
822 } 788 }
823 789
824 hk.kv_len = key->keylen; 790 hk.kv_len = key->keylen;
@@ -831,8 +797,11 @@ static int ath_key_config(struct ath_softc *sc,
831 } else if (key->keyidx) { 797 } else if (key->keyidx) {
832 struct ieee80211_vif *vif; 798 struct ieee80211_vif *vif;
833 799
834 mac = addr; 800 if (WARN_ON(!sta))
835 vif = sc->sc_vaps[0]; 801 return -EOPNOTSUPP;
802 mac = sta->addr;
803
804 vif = sc->vifs[0];
836 if (vif->type != NL80211_IFTYPE_AP) { 805 if (vif->type != NL80211_IFTYPE_AP) {
837 /* Only keyidx 0 should be used with unicast key, but 806 /* Only keyidx 0 should be used with unicast key, but
838 * allow this for client mode for now. */ 807 * allow this for client mode for now. */
@@ -840,13 +809,16 @@ static int ath_key_config(struct ath_softc *sc,
840 } else 809 } else
841 return -EIO; 810 return -EIO;
842 } else { 811 } else {
843 mac = addr; 812 if (WARN_ON(!sta))
813 return -EOPNOTSUPP;
814 mac = sta->addr;
815
844 if (key->alg == ALG_TKIP) 816 if (key->alg == ALG_TKIP)
845 idx = ath_reserve_key_cache_slot_tkip(sc); 817 idx = ath_reserve_key_cache_slot_tkip(sc);
846 else 818 else
847 idx = ath_reserve_key_cache_slot(sc); 819 idx = ath_reserve_key_cache_slot(sc);
848 if (idx < 0) 820 if (idx < 0)
849 return -EIO; /* no free key cache entries */ 821 return -ENOSPC; /* no free key cache entries */
850 } 822 }
851 823
852 if (key->alg == ALG_TKIP) 824 if (key->alg == ALG_TKIP)
@@ -857,12 +829,12 @@ static int ath_key_config(struct ath_softc *sc,
857 if (!ret) 829 if (!ret)
858 return -EIO; 830 return -EIO;
859 831
860 set_bit(idx, sc->sc_keymap); 832 set_bit(idx, sc->keymap);
861 if (key->alg == ALG_TKIP) { 833 if (key->alg == ALG_TKIP) {
862 set_bit(idx + 64, sc->sc_keymap); 834 set_bit(idx + 64, sc->keymap);
863 if (sc->sc_splitmic) { 835 if (sc->splitmic) {
864 set_bit(idx + 32, sc->sc_keymap); 836 set_bit(idx + 32, sc->keymap);
865 set_bit(idx + 64 + 32, sc->sc_keymap); 837 set_bit(idx + 64 + 32, sc->keymap);
866 } 838 }
867 } 839 }
868 840
@@ -875,18 +847,19 @@ static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
875 if (key->hw_key_idx < IEEE80211_WEP_NKID) 847 if (key->hw_key_idx < IEEE80211_WEP_NKID)
876 return; 848 return;
877 849
878 clear_bit(key->hw_key_idx, sc->sc_keymap); 850 clear_bit(key->hw_key_idx, sc->keymap);
879 if (key->alg != ALG_TKIP) 851 if (key->alg != ALG_TKIP)
880 return; 852 return;
881 853
882 clear_bit(key->hw_key_idx + 64, sc->sc_keymap); 854 clear_bit(key->hw_key_idx + 64, sc->keymap);
883 if (sc->sc_splitmic) { 855 if (sc->splitmic) {
884 clear_bit(key->hw_key_idx + 32, sc->sc_keymap); 856 clear_bit(key->hw_key_idx + 32, sc->keymap);
885 clear_bit(key->hw_key_idx + 64 + 32, sc->sc_keymap); 857 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
886 } 858 }
887} 859}
888 860
889static void setup_ht_cap(struct ieee80211_sta_ht_cap *ht_info) 861static void setup_ht_cap(struct ath_softc *sc,
862 struct ieee80211_sta_ht_cap *ht_info)
890{ 863{
891#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */ 864#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
892#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */ 865#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
@@ -899,10 +872,23 @@ static void setup_ht_cap(struct ieee80211_sta_ht_cap *ht_info)
899 872
900 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536; 873 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
901 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8; 874 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
875
902 /* set up supported mcs set */ 876 /* set up supported mcs set */
903 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 877 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
904 ht_info->mcs.rx_mask[0] = 0xff; 878
905 ht_info->mcs.rx_mask[1] = 0xff; 879 switch(sc->rx_chainmask) {
880 case 1:
881 ht_info->mcs.rx_mask[0] = 0xff;
882 break;
883 case 3:
884 case 5:
885 case 7:
886 default:
887 ht_info->mcs.rx_mask[0] = 0xff;
888 ht_info->mcs.rx_mask[1] = 0xff;
889 break;
890 }
891
906 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 892 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
907} 893}
908 894
@@ -910,17 +896,16 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
910 struct ieee80211_vif *vif, 896 struct ieee80211_vif *vif,
911 struct ieee80211_bss_conf *bss_conf) 897 struct ieee80211_bss_conf *bss_conf)
912{ 898{
913 struct ath_vap *avp = (void *)vif->drv_priv; 899 struct ath_vif *avp = (void *)vif->drv_priv;
914 900
915 if (bss_conf->assoc) { 901 if (bss_conf->assoc) {
916 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", 902 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
917 bss_conf->aid, sc->sc_curbssid); 903 bss_conf->aid, sc->curbssid);
918 904
919 /* New association, store aid */ 905 /* New association, store aid */
920 if (avp->av_opmode == NL80211_IFTYPE_STATION) { 906 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
921 sc->sc_curaid = bss_conf->aid; 907 sc->curaid = bss_conf->aid;
922 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid, 908 ath9k_hw_write_associd(sc);
923 sc->sc_curaid);
924 } 909 }
925 910
926 /* Configure the beacon */ 911 /* Configure the beacon */
@@ -928,18 +913,18 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
928 sc->sc_flags |= SC_OP_BEACONS; 913 sc->sc_flags |= SC_OP_BEACONS;
929 914
930 /* Reset rssi stats */ 915 /* Reset rssi stats */
931 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 916 sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
932 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 917 sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
933 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 918 sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
934 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER; 919 sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
935 920
936 /* Start ANI */ 921 /* Start ANI */
937 mod_timer(&sc->sc_ani.timer, 922 mod_timer(&sc->ani.timer,
938 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 923 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
939 924
940 } else { 925 } else {
941 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n"); 926 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n");
942 sc->sc_curaid = 0; 927 sc->curaid = 0;
943 } 928 }
944} 929}
945 930
@@ -947,6 +932,32 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
947/* LED functions */ 932/* LED functions */
948/********************************/ 933/********************************/
949 934
935static void ath_led_blink_work(struct work_struct *work)
936{
937 struct ath_softc *sc = container_of(work, struct ath_softc,
938 ath_led_blink_work.work);
939
940 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
941 return;
942 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
943 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
944
945 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
946 (sc->sc_flags & SC_OP_LED_ON) ?
947 msecs_to_jiffies(sc->led_off_duration) :
948 msecs_to_jiffies(sc->led_on_duration));
949
950 sc->led_on_duration =
951 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25);
952 sc->led_off_duration =
953 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10);
954 sc->led_on_cnt = sc->led_off_cnt = 0;
955 if (sc->sc_flags & SC_OP_LED_ON)
956 sc->sc_flags &= ~SC_OP_LED_ON;
957 else
958 sc->sc_flags |= SC_OP_LED_ON;
959}
960
950static void ath_led_brightness(struct led_classdev *led_cdev, 961static void ath_led_brightness(struct led_classdev *led_cdev,
951 enum led_brightness brightness) 962 enum led_brightness brightness)
952{ 963{
@@ -956,16 +967,27 @@ static void ath_led_brightness(struct led_classdev *led_cdev,
956 switch (brightness) { 967 switch (brightness) {
957 case LED_OFF: 968 case LED_OFF:
958 if (led->led_type == ATH_LED_ASSOC || 969 if (led->led_type == ATH_LED_ASSOC ||
959 led->led_type == ATH_LED_RADIO) 970 led->led_type == ATH_LED_RADIO) {
971 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
972 (led->led_type == ATH_LED_RADIO));
960 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 973 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
961 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 974 if (led->led_type == ATH_LED_RADIO)
962 (led->led_type == ATH_LED_RADIO) ? 1 : 975 sc->sc_flags &= ~SC_OP_LED_ON;
963 !!(sc->sc_flags & SC_OP_LED_ASSOCIATED)); 976 } else {
977 sc->led_off_cnt++;
978 }
964 break; 979 break;
965 case LED_FULL: 980 case LED_FULL:
966 if (led->led_type == ATH_LED_ASSOC) 981 if (led->led_type == ATH_LED_ASSOC) {
967 sc->sc_flags |= SC_OP_LED_ASSOCIATED; 982 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
968 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0); 983 queue_delayed_work(sc->hw->workqueue,
984 &sc->ath_led_blink_work, 0);
985 } else if (led->led_type == ATH_LED_RADIO) {
986 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
987 sc->sc_flags |= SC_OP_LED_ON;
988 } else {
989 sc->led_on_cnt++;
990 }
969 break; 991 break;
970 default: 992 default:
971 break; 993 break;
@@ -1001,6 +1023,7 @@ static void ath_unregister_led(struct ath_led *led)
1001 1023
1002static void ath_deinit_leds(struct ath_softc *sc) 1024static void ath_deinit_leds(struct ath_softc *sc)
1003{ 1025{
1026 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1004 ath_unregister_led(&sc->assoc_led); 1027 ath_unregister_led(&sc->assoc_led);
1005 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 1028 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1006 ath_unregister_led(&sc->tx_led); 1029 ath_unregister_led(&sc->tx_led);
@@ -1020,9 +1043,11 @@ static void ath_init_leds(struct ath_softc *sc)
1020 /* LED off, active low */ 1043 /* LED off, active low */
1021 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1); 1044 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1022 1045
1046 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1047
1023 trigger = ieee80211_get_radio_led_name(sc->hw); 1048 trigger = ieee80211_get_radio_led_name(sc->hw);
1024 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), 1049 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1025 "ath9k-%s:radio", wiphy_name(sc->hw->wiphy)); 1050 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1026 ret = ath_register_led(sc, &sc->radio_led, trigger); 1051 ret = ath_register_led(sc, &sc->radio_led, trigger);
1027 sc->radio_led.led_type = ATH_LED_RADIO; 1052 sc->radio_led.led_type = ATH_LED_RADIO;
1028 if (ret) 1053 if (ret)
@@ -1030,7 +1055,7 @@ static void ath_init_leds(struct ath_softc *sc)
1030 1055
1031 trigger = ieee80211_get_assoc_led_name(sc->hw); 1056 trigger = ieee80211_get_assoc_led_name(sc->hw);
1032 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), 1057 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1033 "ath9k-%s:assoc", wiphy_name(sc->hw->wiphy)); 1058 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1034 ret = ath_register_led(sc, &sc->assoc_led, trigger); 1059 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1035 sc->assoc_led.led_type = ATH_LED_ASSOC; 1060 sc->assoc_led.led_type = ATH_LED_ASSOC;
1036 if (ret) 1061 if (ret)
@@ -1038,7 +1063,7 @@ static void ath_init_leds(struct ath_softc *sc)
1038 1063
1039 trigger = ieee80211_get_tx_led_name(sc->hw); 1064 trigger = ieee80211_get_tx_led_name(sc->hw);
1040 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name), 1065 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1041 "ath9k-%s:tx", wiphy_name(sc->hw->wiphy)); 1066 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1042 ret = ath_register_led(sc, &sc->tx_led, trigger); 1067 ret = ath_register_led(sc, &sc->tx_led, trigger);
1043 sc->tx_led.led_type = ATH_LED_TX; 1068 sc->tx_led.led_type = ATH_LED_TX;
1044 if (ret) 1069 if (ret)
@@ -1046,7 +1071,7 @@ static void ath_init_leds(struct ath_softc *sc)
1046 1071
1047 trigger = ieee80211_get_rx_led_name(sc->hw); 1072 trigger = ieee80211_get_rx_led_name(sc->hw);
1048 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name), 1073 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1049 "ath9k-%s:rx", wiphy_name(sc->hw->wiphy)); 1074 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1050 ret = ath_register_led(sc, &sc->rx_led, trigger); 1075 ret = ath_register_led(sc, &sc->rx_led, trigger);
1051 sc->rx_led.led_type = ATH_LED_RX; 1076 sc->rx_led.led_type = ATH_LED_RX;
1052 if (ret) 1077 if (ret)
@@ -1066,24 +1091,20 @@ fail:
1066 1091
1067static void ath_radio_enable(struct ath_softc *sc) 1092static void ath_radio_enable(struct ath_softc *sc)
1068{ 1093{
1069 struct ath_hal *ah = sc->sc_ah; 1094 struct ath_hw *ah = sc->sc_ah;
1070 int status; 1095 struct ieee80211_channel *channel = sc->hw->conf.channel;
1096 int r;
1071 1097
1098 ath9k_ps_wakeup(sc);
1072 spin_lock_bh(&sc->sc_resetlock); 1099 spin_lock_bh(&sc->sc_resetlock);
1073 if (!ath9k_hw_reset(ah, ah->ah_curchan, 1100
1074 sc->tx_chan_width, 1101 r = ath9k_hw_reset(ah, ah->curchan, false);
1075 sc->sc_tx_chainmask, 1102
1076 sc->sc_rx_chainmask, 1103 if (r) {
1077 sc->sc_ht_extprotspacing,
1078 false, &status)) {
1079 DPRINTF(sc, ATH_DBG_FATAL, 1104 DPRINTF(sc, ATH_DBG_FATAL,
1080 "Unable to reset channel %u (%uMhz) " 1105 "Unable to reset channel %u (%uMhz) ",
1081 "flags 0x%x hal status %u\n", 1106 "reset status %u\n",
1082 ath9k_hw_mhz2ieee(ah, 1107 channel->center_freq, r);
1083 ah->ah_curchan->channel,
1084 ah->ah_curchan->channelFlags),
1085 ah->ah_curchan->channel,
1086 ah->ah_curchan->channelFlags, status);
1087 } 1108 }
1088 spin_unlock_bh(&sc->sc_resetlock); 1109 spin_unlock_bh(&sc->sc_resetlock);
1089 1110
@@ -1098,7 +1119,7 @@ static void ath_radio_enable(struct ath_softc *sc)
1098 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */ 1119 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1099 1120
1100 /* Re-Enable interrupts */ 1121 /* Re-Enable interrupts */
1101 ath9k_hw_set_interrupts(ah, sc->sc_imask); 1122 ath9k_hw_set_interrupts(ah, sc->imask);
1102 1123
1103 /* Enable LED */ 1124 /* Enable LED */
1104 ath9k_hw_cfg_output(ah, ATH_LED_PIN, 1125 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
@@ -1106,14 +1127,16 @@ static void ath_radio_enable(struct ath_softc *sc)
1106 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0); 1127 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1107 1128
1108 ieee80211_wake_queues(sc->hw); 1129 ieee80211_wake_queues(sc->hw);
1130 ath9k_ps_restore(sc);
1109} 1131}
1110 1132
1111static void ath_radio_disable(struct ath_softc *sc) 1133static void ath_radio_disable(struct ath_softc *sc)
1112{ 1134{
1113 struct ath_hal *ah = sc->sc_ah; 1135 struct ath_hw *ah = sc->sc_ah;
1114 int status; 1136 struct ieee80211_channel *channel = sc->hw->conf.channel;
1115 1137 int r;
1116 1138
1139 ath9k_ps_wakeup(sc);
1117 ieee80211_stop_queues(sc->hw); 1140 ieee80211_stop_queues(sc->hw);
1118 1141
1119 /* Disable LED */ 1142 /* Disable LED */
@@ -1123,38 +1146,31 @@ static void ath_radio_disable(struct ath_softc *sc)
1123 /* Disable interrupts */ 1146 /* Disable interrupts */
1124 ath9k_hw_set_interrupts(ah, 0); 1147 ath9k_hw_set_interrupts(ah, 0);
1125 1148
1126 ath_draintxq(sc, false); /* clear pending tx frames */ 1149 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1127 ath_stoprecv(sc); /* turn off frame recv */ 1150 ath_stoprecv(sc); /* turn off frame recv */
1128 ath_flushrecv(sc); /* flush recv queue */ 1151 ath_flushrecv(sc); /* flush recv queue */
1129 1152
1130 spin_lock_bh(&sc->sc_resetlock); 1153 spin_lock_bh(&sc->sc_resetlock);
1131 if (!ath9k_hw_reset(ah, ah->ah_curchan, 1154 r = ath9k_hw_reset(ah, ah->curchan, false);
1132 sc->tx_chan_width, 1155 if (r) {
1133 sc->sc_tx_chainmask,
1134 sc->sc_rx_chainmask,
1135 sc->sc_ht_extprotspacing,
1136 false, &status)) {
1137 DPRINTF(sc, ATH_DBG_FATAL, 1156 DPRINTF(sc, ATH_DBG_FATAL,
1138 "Unable to reset channel %u (%uMhz) " 1157 "Unable to reset channel %u (%uMhz) "
1139 "flags 0x%x hal status %u\n", 1158 "reset status %u\n",
1140 ath9k_hw_mhz2ieee(ah, 1159 channel->center_freq, r);
1141 ah->ah_curchan->channel,
1142 ah->ah_curchan->channelFlags),
1143 ah->ah_curchan->channel,
1144 ah->ah_curchan->channelFlags, status);
1145 } 1160 }
1146 spin_unlock_bh(&sc->sc_resetlock); 1161 spin_unlock_bh(&sc->sc_resetlock);
1147 1162
1148 ath9k_hw_phy_disable(ah); 1163 ath9k_hw_phy_disable(ah);
1149 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1164 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1165 ath9k_ps_restore(sc);
1150} 1166}
1151 1167
1152static bool ath_is_rfkill_set(struct ath_softc *sc) 1168static bool ath_is_rfkill_set(struct ath_softc *sc)
1153{ 1169{
1154 struct ath_hal *ah = sc->sc_ah; 1170 struct ath_hw *ah = sc->sc_ah;
1155 1171
1156 return ath9k_hw_gpio_get(ah, ah->ah_rfkill_gpio) == 1172 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1157 ah->ah_rfkill_polarity; 1173 ah->rfkill_polarity;
1158} 1174}
1159 1175
1160/* h/w rfkill poll function */ 1176/* h/w rfkill poll function */
@@ -1238,7 +1254,7 @@ static int ath_init_sw_rfkill(struct ath_softc *sc)
1238 } 1254 }
1239 1255
1240 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name), 1256 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1241 "ath9k-%s:rfkill", wiphy_name(sc->hw->wiphy)); 1257 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1242 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name; 1258 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1243 sc->rf_kill.rfkill->data = sc; 1259 sc->rf_kill.rfkill->data = sc;
1244 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio; 1260 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
@@ -1251,7 +1267,7 @@ static int ath_init_sw_rfkill(struct ath_softc *sc)
1251/* Deinitialize rfkill */ 1267/* Deinitialize rfkill */
1252static void ath_deinit_rfkill(struct ath_softc *sc) 1268static void ath_deinit_rfkill(struct ath_softc *sc)
1253{ 1269{
1254 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1270 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1255 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll); 1271 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1256 1272
1257 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) { 1273 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
@@ -1263,7 +1279,7 @@ static void ath_deinit_rfkill(struct ath_softc *sc)
1263 1279
1264static int ath_start_rfkill_poll(struct ath_softc *sc) 1280static int ath_start_rfkill_poll(struct ath_softc *sc)
1265{ 1281{
1266 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1282 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1267 queue_delayed_work(sc->hw->workqueue, 1283 queue_delayed_work(sc->hw->workqueue,
1268 &sc->rf_kill.rfkill_poll, 0); 1284 &sc->rf_kill.rfkill_poll, 0);
1269 1285
@@ -1274,13 +1290,7 @@ static int ath_start_rfkill_poll(struct ath_softc *sc)
1274 rfkill_free(sc->rf_kill.rfkill); 1290 rfkill_free(sc->rf_kill.rfkill);
1275 1291
1276 /* Deinitialize the device */ 1292 /* Deinitialize the device */
1277 ath_detach(sc); 1293 ath_cleanup(sc);
1278 if (sc->pdev->irq)
1279 free_irq(sc->pdev->irq, sc);
1280 pci_iounmap(sc->pdev, sc->mem);
1281 pci_release_region(sc->pdev, 0);
1282 pci_disable_device(sc->pdev);
1283 ieee80211_free_hw(sc->hw);
1284 return -EIO; 1294 return -EIO;
1285 } else { 1295 } else {
1286 sc->sc_flags |= SC_OP_RFKILL_REGISTERED; 1296 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
@@ -1291,11 +1301,21 @@ static int ath_start_rfkill_poll(struct ath_softc *sc)
1291} 1301}
1292#endif /* CONFIG_RFKILL */ 1302#endif /* CONFIG_RFKILL */
1293 1303
1294static void ath_detach(struct ath_softc *sc) 1304void ath_cleanup(struct ath_softc *sc)
1305{
1306 ath_detach(sc);
1307 free_irq(sc->irq, sc);
1308 ath_bus_cleanup(sc);
1309 ieee80211_free_hw(sc->hw);
1310}
1311
1312void ath_detach(struct ath_softc *sc)
1295{ 1313{
1296 struct ieee80211_hw *hw = sc->hw; 1314 struct ieee80211_hw *hw = sc->hw;
1297 int i = 0; 1315 int i = 0;
1298 1316
1317 ath9k_ps_wakeup(sc);
1318
1299 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1319 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1300 1320
1301#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1321#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
@@ -1320,11 +1340,12 @@ static void ath_detach(struct ath_softc *sc)
1320 1340
1321 ath9k_hw_detach(sc->sc_ah); 1341 ath9k_hw_detach(sc->sc_ah);
1322 ath9k_exit_debug(sc); 1342 ath9k_exit_debug(sc);
1343 ath9k_ps_restore(sc);
1323} 1344}
1324 1345
1325static int ath_init(u16 devid, struct ath_softc *sc) 1346static int ath_init(u16 devid, struct ath_softc *sc)
1326{ 1347{
1327 struct ath_hal *ah = NULL; 1348 struct ath_hw *ah = NULL;
1328 int status; 1349 int status;
1329 int error = 0, i; 1350 int error = 0, i;
1330 int csz = 0; 1351 int csz = 0;
@@ -1345,44 +1366,40 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1345 * Cache line size is used to size and align various 1366 * Cache line size is used to size and align various
1346 * structures used to communicate with the hardware. 1367 * structures used to communicate with the hardware.
1347 */ 1368 */
1348 bus_read_cachesize(sc, &csz); 1369 ath_read_cachesize(sc, &csz);
1349 /* XXX assert csz is non-zero */ 1370 /* XXX assert csz is non-zero */
1350 sc->sc_cachelsz = csz << 2; /* convert to bytes */ 1371 sc->cachelsz = csz << 2; /* convert to bytes */
1351 1372
1352 ah = ath9k_hw_attach(devid, sc, sc->mem, &status); 1373 ah = ath9k_hw_attach(devid, sc, &status);
1353 if (ah == NULL) { 1374 if (ah == NULL) {
1354 DPRINTF(sc, ATH_DBG_FATAL, 1375 DPRINTF(sc, ATH_DBG_FATAL,
1355 "Unable to attach hardware; HAL status %u\n", status); 1376 "Unable to attach hardware; HAL status %d\n", status);
1356 error = -ENXIO; 1377 error = -ENXIO;
1357 goto bad; 1378 goto bad;
1358 } 1379 }
1359 sc->sc_ah = ah; 1380 sc->sc_ah = ah;
1360 1381
1361 /* Get the hardware key cache size. */ 1382 /* Get the hardware key cache size. */
1362 sc->sc_keymax = ah->ah_caps.keycache_size; 1383 sc->keymax = ah->caps.keycache_size;
1363 if (sc->sc_keymax > ATH_KEYMAX) { 1384 if (sc->keymax > ATH_KEYMAX) {
1364 DPRINTF(sc, ATH_DBG_KEYCACHE, 1385 DPRINTF(sc, ATH_DBG_KEYCACHE,
1365 "Warning, using only %u entries in %u key cache\n", 1386 "Warning, using only %u entries in %u key cache\n",
1366 ATH_KEYMAX, sc->sc_keymax); 1387 ATH_KEYMAX, sc->keymax);
1367 sc->sc_keymax = ATH_KEYMAX; 1388 sc->keymax = ATH_KEYMAX;
1368 } 1389 }
1369 1390
1370 /* 1391 /*
1371 * Reset the key cache since some parts do not 1392 * Reset the key cache since some parts do not
1372 * reset the contents on initial power up. 1393 * reset the contents on initial power up.
1373 */ 1394 */
1374 for (i = 0; i < sc->sc_keymax; i++) 1395 for (i = 0; i < sc->keymax; i++)
1375 ath9k_hw_keyreset(ah, (u16) i); 1396 ath9k_hw_keyreset(ah, (u16) i);
1376 1397
1377 /* Collect the channel list using the default country code */ 1398 if (ath9k_regd_init(sc->sc_ah))
1378
1379 error = ath_setup_channels(sc);
1380 if (error)
1381 goto bad; 1399 goto bad;
1382 1400
1383 /* default to MONITOR mode */ 1401 /* default to MONITOR mode */
1384 sc->sc_ah->ah_opmode = NL80211_IFTYPE_MONITOR; 1402 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1385
1386 1403
1387 /* Setup rate tables */ 1404 /* Setup rate tables */
1388 1405
@@ -1411,7 +1428,7 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1411 goto bad2; 1428 goto bad2;
1412 } 1429 }
1413 1430
1414 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME; 1431 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1415 ath_cabq_update(sc); 1432 ath_cabq_update(sc);
1416 1433
1417 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++) 1434 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
@@ -1448,8 +1465,8 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1448 /* Initializes the noise floor to a reasonable default value. 1465 /* Initializes the noise floor to a reasonable default value.
1449 * Later on this will be updated during ANI processing. */ 1466 * Later on this will be updated during ANI processing. */
1450 1467
1451 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR; 1468 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1452 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc); 1469 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1453 1470
1454 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 1471 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1455 ATH9K_CIPHER_TKIP, NULL)) { 1472 ATH9K_CIPHER_TKIP, NULL)) {
@@ -1475,33 +1492,31 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1475 ATH9K_CIPHER_MIC, NULL) 1492 ATH9K_CIPHER_MIC, NULL)
1476 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT, 1493 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1477 0, NULL)) 1494 0, NULL))
1478 sc->sc_splitmic = 1; 1495 sc->splitmic = 1;
1479 1496
1480 /* turn on mcast key search if possible */ 1497 /* turn on mcast key search if possible */
1481 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) 1498 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1482 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1, 1499 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1483 1, NULL); 1500 1, NULL);
1484 1501
1485 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX; 1502 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1486 sc->sc_config.txpowlimit_override = 0;
1487 1503
1488 /* 11n Capabilities */ 1504 /* 11n Capabilities */
1489 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 1505 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1490 sc->sc_flags |= SC_OP_TXAGGR; 1506 sc->sc_flags |= SC_OP_TXAGGR;
1491 sc->sc_flags |= SC_OP_RXAGGR; 1507 sc->sc_flags |= SC_OP_RXAGGR;
1492 } 1508 }
1493 1509
1494 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; 1510 sc->tx_chainmask = ah->caps.tx_chainmask;
1495 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; 1511 sc->rx_chainmask = ah->caps.rx_chainmask;
1496 1512
1497 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1513 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1498 sc->rx.defant = ath9k_hw_getdefantenna(ah); 1514 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1499 1515
1500 ath9k_hw_getmac(ah, sc->sc_myaddr); 1516 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1501 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) { 1517 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1502 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask); 1518 ATH_SET_VIF_BSSID_MASK(sc->bssidmask);
1503 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask); 1519 ath9k_hw_setbssidmask(sc);
1504 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1505 } 1520 }
1506 1521
1507 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ 1522 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
@@ -1511,24 +1526,29 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1511 sc->beacon.bslot[i] = ATH_IF_ID_ANY; 1526 sc->beacon.bslot[i] = ATH_IF_ID_ANY;
1512 1527
1513 /* save MISC configurations */ 1528 /* save MISC configurations */
1514 sc->sc_config.swBeaconProcess = 1; 1529 sc->config.swBeaconProcess = 1;
1515 1530
1516 /* setup channels and rates */ 1531 /* setup channels and rates */
1517 1532
1518 sc->sbands[IEEE80211_BAND_2GHZ].channels = 1533 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1519 sc->channels[IEEE80211_BAND_2GHZ];
1520 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = 1534 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1521 sc->rates[IEEE80211_BAND_2GHZ]; 1535 sc->rates[IEEE80211_BAND_2GHZ];
1522 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 1536 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1537 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1538 ARRAY_SIZE(ath9k_2ghz_chantable);
1523 1539
1524 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) { 1540 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1525 sc->sbands[IEEE80211_BAND_5GHZ].channels = 1541 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1526 sc->channels[IEEE80211_BAND_5GHZ];
1527 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 1542 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1528 sc->rates[IEEE80211_BAND_5GHZ]; 1543 sc->rates[IEEE80211_BAND_5GHZ];
1529 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 1544 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1545 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1546 ARRAY_SIZE(ath9k_5ghz_chantable);
1530 } 1547 }
1531 1548
1549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1550 ath9k_hw_btcoex_enable(sc->sc_ah);
1551
1532 return 0; 1552 return 0;
1533bad2: 1553bad2:
1534 /* cleanup tx queues */ 1554 /* cleanup tx queues */
@@ -1542,7 +1562,7 @@ bad:
1542 return error; 1562 return error;
1543} 1563}
1544 1564
1545static int ath_attach(u16 devid, struct ath_softc *sc) 1565int ath_attach(u16 devid, struct ath_softc *sc)
1546{ 1566{
1547 struct ieee80211_hw *hw = sc->hw; 1567 struct ieee80211_hw *hw = sc->hw;
1548 int error = 0; 1568 int error = 0;
@@ -1555,34 +1575,42 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1555 1575
1556 /* get mac address from hardware and set in mac80211 */ 1576 /* get mac address from hardware and set in mac80211 */
1557 1577
1558 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr); 1578 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1559 1579
1560 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1580 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1561 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1581 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1562 IEEE80211_HW_SIGNAL_DBM | 1582 IEEE80211_HW_SIGNAL_DBM |
1563 IEEE80211_HW_AMPDU_AGGREGATION; 1583 IEEE80211_HW_AMPDU_AGGREGATION |
1584 IEEE80211_HW_SUPPORTS_PS |
1585 IEEE80211_HW_PS_NULLFUNC_STACK;
1586
1587 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah))
1588 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1564 1589
1565 hw->wiphy->interface_modes = 1590 hw->wiphy->interface_modes =
1566 BIT(NL80211_IFTYPE_AP) | 1591 BIT(NL80211_IFTYPE_AP) |
1567 BIT(NL80211_IFTYPE_STATION) | 1592 BIT(NL80211_IFTYPE_STATION) |
1568 BIT(NL80211_IFTYPE_ADHOC); 1593 BIT(NL80211_IFTYPE_ADHOC);
1569 1594
1595 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1596 hw->wiphy->strict_regulatory = true;
1597
1570 hw->queues = 4; 1598 hw->queues = 4;
1571 hw->max_rates = 4; 1599 hw->max_rates = 4;
1572 hw->max_rate_tries = ATH_11N_TXMAXTRY; 1600 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1573 hw->sta_data_size = sizeof(struct ath_node); 1601 hw->sta_data_size = sizeof(struct ath_node);
1574 hw->vif_data_size = sizeof(struct ath_vap); 1602 hw->vif_data_size = sizeof(struct ath_vif);
1575 1603
1576 hw->rate_control_algorithm = "ath9k_rate_control"; 1604 hw->rate_control_algorithm = "ath9k_rate_control";
1577 1605
1578 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 1606 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1579 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 1607 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1580 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) 1608 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1581 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 1609 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1582 } 1610 }
1583 1611
1584 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ]; 1612 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ];
1585 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) 1613 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1586 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1614 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1587 &sc->sbands[IEEE80211_BAND_5GHZ]; 1615 &sc->sbands[IEEE80211_BAND_5GHZ];
1588 1616
@@ -1597,7 +1625,7 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1597 1625
1598#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1626#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1599 /* Initialze h/w Rfkill */ 1627 /* Initialze h/w Rfkill */
1600 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1628 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1601 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll); 1629 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1602 1630
1603 /* Initialize s/w rfkill */ 1631 /* Initialize s/w rfkill */
@@ -1605,11 +1633,36 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1605 goto detach; 1633 goto detach;
1606#endif 1634#endif
1607 1635
1636 if (ath9k_is_world_regd(sc->sc_ah)) {
1637 /* Anything applied here (prior to wiphy registratoin) gets
1638 * saved on the wiphy orig_* parameters */
1639 const struct ieee80211_regdomain *regd =
1640 ath9k_world_regdomain(sc->sc_ah);
1641 hw->wiphy->custom_regulatory = true;
1642 hw->wiphy->strict_regulatory = false;
1643 wiphy_apply_custom_regulatory(sc->hw->wiphy, regd);
1644 ath9k_reg_apply_radar_flags(hw->wiphy);
1645 ath9k_reg_apply_world_flags(hw->wiphy, REGDOM_SET_BY_INIT);
1646 } else {
1647 /* This gets applied in the case of the absense of CRDA,
1648 * its our own custom world regulatory domain, similar to
1649 * cfg80211's but we enable passive scanning */
1650 const struct ieee80211_regdomain *regd =
1651 ath9k_default_world_regdomain();
1652 wiphy_apply_custom_regulatory(sc->hw->wiphy, regd);
1653 ath9k_reg_apply_radar_flags(hw->wiphy);
1654 ath9k_reg_apply_world_flags(hw->wiphy, REGDOM_SET_BY_INIT);
1655 }
1656
1608 error = ieee80211_register_hw(hw); 1657 error = ieee80211_register_hw(hw);
1609 1658
1659 if (!ath9k_is_world_regd(sc->sc_ah))
1660 regulatory_hint(hw->wiphy, sc->sc_ah->regulatory.alpha2);
1661
1610 /* Initialize LED control */ 1662 /* Initialize LED control */
1611 ath_init_leds(sc); 1663 ath_init_leds(sc);
1612 1664
1665
1613 return 0; 1666 return 0;
1614detach: 1667detach:
1615 ath_detach(sc); 1668 ath_detach(sc);
@@ -1618,24 +1671,20 @@ detach:
1618 1671
1619int ath_reset(struct ath_softc *sc, bool retry_tx) 1672int ath_reset(struct ath_softc *sc, bool retry_tx)
1620{ 1673{
1621 struct ath_hal *ah = sc->sc_ah; 1674 struct ath_hw *ah = sc->sc_ah;
1622 int status; 1675 struct ieee80211_hw *hw = sc->hw;
1623 int error = 0; 1676 int r;
1624 1677
1625 ath9k_hw_set_interrupts(ah, 0); 1678 ath9k_hw_set_interrupts(ah, 0);
1626 ath_draintxq(sc, retry_tx); 1679 ath_drain_all_txq(sc, retry_tx);
1627 ath_stoprecv(sc); 1680 ath_stoprecv(sc);
1628 ath_flushrecv(sc); 1681 ath_flushrecv(sc);
1629 1682
1630 spin_lock_bh(&sc->sc_resetlock); 1683 spin_lock_bh(&sc->sc_resetlock);
1631 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, 1684 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1632 sc->tx_chan_width, 1685 if (r)
1633 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1634 sc->sc_ht_extprotspacing, false, &status)) {
1635 DPRINTF(sc, ATH_DBG_FATAL, 1686 DPRINTF(sc, ATH_DBG_FATAL,
1636 "Unable to reset hardware; hal status %u\n", status); 1687 "Unable to reset hardware; reset status %u\n", r);
1637 error = -EIO;
1638 }
1639 spin_unlock_bh(&sc->sc_resetlock); 1688 spin_unlock_bh(&sc->sc_resetlock);
1640 1689
1641 if (ath_startrecv(sc) != 0) 1690 if (ath_startrecv(sc) != 0)
@@ -1646,14 +1695,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1646 * that changes the channel so update any state that 1695 * that changes the channel so update any state that
1647 * might change as a result. 1696 * might change as a result.
1648 */ 1697 */
1649 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan)); 1698 ath_cache_conf_rate(sc, &hw->conf);
1650 1699
1651 ath_update_txpow(sc); 1700 ath_update_txpow(sc);
1652 1701
1653 if (sc->sc_flags & SC_OP_BEACONS) 1702 if (sc->sc_flags & SC_OP_BEACONS)
1654 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */ 1703 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1655 1704
1656 ath9k_hw_set_interrupts(ah, sc->sc_imask); 1705 ath9k_hw_set_interrupts(ah, sc->imask);
1657 1706
1658 if (retry_tx) { 1707 if (retry_tx) {
1659 int i; 1708 int i;
@@ -1666,7 +1715,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1666 } 1715 }
1667 } 1716 }
1668 1717
1669 return error; 1718 return r;
1670} 1719}
1671 1720
1672/* 1721/*
@@ -1706,7 +1755,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1706 * descriptors that cross the 4K page boundary. Assume 1755 * descriptors that cross the 4K page boundary. Assume
1707 * one skipped descriptor per 4K page. 1756 * one skipped descriptor per 4K page.
1708 */ 1757 */
1709 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 1758 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1710 u32 ndesc_skipped = 1759 u32 ndesc_skipped =
1711 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 1760 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1712 u32 dma_len; 1761 u32 dma_len;
@@ -1720,9 +1769,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1720 } 1769 }
1721 1770
1722 /* allocate descriptors */ 1771 /* allocate descriptors */
1723 dd->dd_desc = pci_alloc_consistent(sc->pdev, 1772 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1724 dd->dd_desc_len, 1773 &dd->dd_desc_paddr, GFP_ATOMIC);
1725 &dd->dd_desc_paddr);
1726 if (dd->dd_desc == NULL) { 1774 if (dd->dd_desc == NULL) {
1727 error = -ENOMEM; 1775 error = -ENOMEM;
1728 goto fail; 1776 goto fail;
@@ -1747,7 +1795,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1747 bf->bf_desc = ds; 1795 bf->bf_desc = ds;
1748 bf->bf_daddr = DS2PHYS(dd, ds); 1796 bf->bf_daddr = DS2PHYS(dd, ds);
1749 1797
1750 if (!(sc->sc_ah->ah_caps.hw_caps & 1798 if (!(sc->sc_ah->caps.hw_caps &
1751 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 1799 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1752 /* 1800 /*
1753 * Skip descriptor addresses which can cause 4KB 1801 * Skip descriptor addresses which can cause 4KB
@@ -1768,8 +1816,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1768 } 1816 }
1769 return 0; 1817 return 0;
1770fail2: 1818fail2:
1771 pci_free_consistent(sc->pdev, 1819 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1772 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr); 1820 dd->dd_desc_paddr);
1773fail: 1821fail:
1774 memset(dd, 0, sizeof(*dd)); 1822 memset(dd, 0, sizeof(*dd));
1775 return error; 1823 return error;
@@ -1782,8 +1830,8 @@ void ath_descdma_cleanup(struct ath_softc *sc,
1782 struct ath_descdma *dd, 1830 struct ath_descdma *dd,
1783 struct list_head *head) 1831 struct list_head *head)
1784{ 1832{
1785 pci_free_consistent(sc->pdev, 1833 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1786 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr); 1834 dd->dd_desc_paddr);
1787 1835
1788 INIT_LIST_HEAD(head); 1836 INIT_LIST_HEAD(head);
1789 kfree(dd->dd_bufptr); 1837 kfree(dd->dd_bufptr);
@@ -1840,6 +1888,37 @@ int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1840 return qnum; 1888 return qnum;
1841} 1889}
1842 1890
1891/* XXX: Remove me once we don't depend on ath9k_channel for all
1892 * this redundant data */
1893static void ath9k_update_ichannel(struct ath_softc *sc,
1894 struct ath9k_channel *ichan)
1895{
1896 struct ieee80211_hw *hw = sc->hw;
1897 struct ieee80211_channel *chan = hw->conf.channel;
1898 struct ieee80211_conf *conf = &hw->conf;
1899
1900 ichan->channel = chan->center_freq;
1901 ichan->chan = chan;
1902
1903 if (chan->band == IEEE80211_BAND_2GHZ) {
1904 ichan->chanmode = CHANNEL_G;
1905 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1906 } else {
1907 ichan->chanmode = CHANNEL_A;
1908 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1909 }
1910
1911 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1912
1913 if (conf_is_ht(conf)) {
1914 if (conf_is_ht40(conf))
1915 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1916
1917 ichan->chanmode = ath_get_extchanmode(sc, chan,
1918 conf->channel_type);
1919 }
1920}
1921
1843/**********************/ 1922/**********************/
1844/* mac80211 callbacks */ 1923/* mac80211 callbacks */
1845/**********************/ 1924/**********************/
@@ -1849,24 +1928,19 @@ static int ath9k_start(struct ieee80211_hw *hw)
1849 struct ath_softc *sc = hw->priv; 1928 struct ath_softc *sc = hw->priv;
1850 struct ieee80211_channel *curchan = hw->conf.channel; 1929 struct ieee80211_channel *curchan = hw->conf.channel;
1851 struct ath9k_channel *init_channel; 1930 struct ath9k_channel *init_channel;
1852 int error = 0, pos, status; 1931 int r, pos;
1853 1932
1854 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " 1933 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1855 "initial channel: %d MHz\n", curchan->center_freq); 1934 "initial channel: %d MHz\n", curchan->center_freq);
1856 1935
1936 mutex_lock(&sc->mutex);
1937
1857 /* setup initial channel */ 1938 /* setup initial channel */
1858 1939
1859 pos = ath_get_channel(sc, curchan); 1940 pos = curchan->hw_value;
1860 if (pos == -1) {
1861 DPRINTF(sc, ATH_DBG_FATAL, "Invalid channel: %d\n", curchan->center_freq);
1862 error = -EINVAL;
1863 goto error;
1864 }
1865 1941
1866 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 1942 init_channel = &sc->sc_ah->channels[pos];
1867 sc->sc_ah->ah_channels[pos].chanmode = 1943 ath9k_update_ichannel(sc, init_channel);
1868 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
1869 init_channel = &sc->sc_ah->ah_channels[pos];
1870 1944
1871 /* Reset SERDES registers */ 1945 /* Reset SERDES registers */
1872 ath9k_hw_configpcipowersave(sc->sc_ah, 0); 1946 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
@@ -1879,17 +1953,14 @@ static int ath9k_start(struct ieee80211_hw *hw)
1879 * and then setup of the interrupt mask. 1953 * and then setup of the interrupt mask.
1880 */ 1954 */
1881 spin_lock_bh(&sc->sc_resetlock); 1955 spin_lock_bh(&sc->sc_resetlock);
1882 if (!ath9k_hw_reset(sc->sc_ah, init_channel, 1956 r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
1883 sc->tx_chan_width, 1957 if (r) {
1884 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1885 sc->sc_ht_extprotspacing, false, &status)) {
1886 DPRINTF(sc, ATH_DBG_FATAL, 1958 DPRINTF(sc, ATH_DBG_FATAL,
1887 "Unable to reset hardware; hal status %u " 1959 "Unable to reset hardware; reset status %u "
1888 "(freq %u flags 0x%x)\n", status, 1960 "(freq %u MHz)\n", r,
1889 init_channel->channel, init_channel->channelFlags); 1961 curchan->center_freq);
1890 error = -EIO;
1891 spin_unlock_bh(&sc->sc_resetlock); 1962 spin_unlock_bh(&sc->sc_resetlock);
1892 goto error; 1963 goto mutex_unlock;
1893 } 1964 }
1894 spin_unlock_bh(&sc->sc_resetlock); 1965 spin_unlock_bh(&sc->sc_resetlock);
1895 1966
@@ -1909,56 +1980,39 @@ static int ath9k_start(struct ieee80211_hw *hw)
1909 if (ath_startrecv(sc) != 0) { 1980 if (ath_startrecv(sc) != 0) {
1910 DPRINTF(sc, ATH_DBG_FATAL, 1981 DPRINTF(sc, ATH_DBG_FATAL,
1911 "Unable to start recv logic\n"); 1982 "Unable to start recv logic\n");
1912 error = -EIO; 1983 r = -EIO;
1913 goto error; 1984 goto mutex_unlock;
1914 } 1985 }
1915 1986
1916 /* Setup our intr mask. */ 1987 /* Setup our intr mask. */
1917 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX 1988 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
1918 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 1989 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1919 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 1990 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1920 1991
1921 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT) 1992 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1922 sc->sc_imask |= ATH9K_INT_GTT; 1993 sc->imask |= ATH9K_INT_GTT;
1923 1994
1924 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) 1995 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1925 sc->sc_imask |= ATH9K_INT_CST; 1996 sc->imask |= ATH9K_INT_CST;
1926 1997
1927 /* 1998 ath_cache_conf_rate(sc, &hw->conf);
1928 * Enable MIB interrupts when there are hardware phy counters.
1929 * Note we only do this (at the moment) for station mode.
1930 */
1931 if (ath9k_hw_phycounters(sc->sc_ah) &&
1932 ((sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) ||
1933 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)))
1934 sc->sc_imask |= ATH9K_INT_MIB;
1935 /*
1936 * Some hardware processes the TIM IE and fires an
1937 * interrupt when the TIM bit is set. For hardware
1938 * that does, if not overridden by configuration,
1939 * enable the TIM interrupt when operating as station.
1940 */
1941 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
1942 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) &&
1943 !sc->sc_config.swBeaconProcess)
1944 sc->sc_imask |= ATH9K_INT_TIM;
1945
1946 ath_setcurmode(sc, ath_chan2mode(init_channel));
1947 1999
1948 sc->sc_flags &= ~SC_OP_INVALID; 2000 sc->sc_flags &= ~SC_OP_INVALID;
1949 2001
1950 /* Disable BMISS interrupt when we're not associated */ 2002 /* Disable BMISS interrupt when we're not associated */
1951 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 2003 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1952 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask); 2004 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
1953 2005
1954 ieee80211_wake_queues(sc->hw); 2006 ieee80211_wake_queues(sc->hw);
1955 2007
1956#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2008#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1957 error = ath_start_rfkill_poll(sc); 2009 r = ath_start_rfkill_poll(sc);
1958#endif 2010#endif
1959 2011
1960error: 2012mutex_unlock:
1961 return error; 2013 mutex_unlock(&sc->mutex);
2014
2015 return r;
1962} 2016}
1963 2017
1964static int ath9k_tx(struct ieee80211_hw *hw, 2018static int ath9k_tx(struct ieee80211_hw *hw,
@@ -2022,7 +2076,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2022 return; 2076 return;
2023 } 2077 }
2024 2078
2025 DPRINTF(sc, ATH_DBG_CONFIG, "Cleaning up\n"); 2079 mutex_lock(&sc->mutex);
2026 2080
2027 ieee80211_stop_queues(sc->hw); 2081 ieee80211_stop_queues(sc->hw);
2028 2082
@@ -2031,14 +2085,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2031 ath9k_hw_set_interrupts(sc->sc_ah, 0); 2085 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2032 2086
2033 if (!(sc->sc_flags & SC_OP_INVALID)) { 2087 if (!(sc->sc_flags & SC_OP_INVALID)) {
2034 ath_draintxq(sc, false); 2088 ath_drain_all_txq(sc, false);
2035 ath_stoprecv(sc); 2089 ath_stoprecv(sc);
2036 ath9k_hw_phy_disable(sc->sc_ah); 2090 ath9k_hw_phy_disable(sc->sc_ah);
2037 } else 2091 } else
2038 sc->rx.rxlink = NULL; 2092 sc->rx.rxlink = NULL;
2039 2093
2040#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2094#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2041 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2095 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2042 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll); 2096 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2043#endif 2097#endif
2044 /* disable HAL and put h/w to sleep */ 2098 /* disable HAL and put h/w to sleep */
@@ -2047,6 +2101,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2047 2101
2048 sc->sc_flags |= SC_OP_INVALID; 2102 sc->sc_flags |= SC_OP_INVALID;
2049 2103
2104 mutex_unlock(&sc->mutex);
2105
2050 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n"); 2106 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2051} 2107}
2052 2108
@@ -2054,14 +2110,16 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2054 struct ieee80211_if_init_conf *conf) 2110 struct ieee80211_if_init_conf *conf)
2055{ 2111{
2056 struct ath_softc *sc = hw->priv; 2112 struct ath_softc *sc = hw->priv;
2057 struct ath_vap *avp = (void *)conf->vif->drv_priv; 2113 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2058 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 2114 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2059 2115
2060 /* Support only vap for now */ 2116 /* Support only vif for now */
2061 2117
2062 if (sc->sc_nvaps) 2118 if (sc->nvifs)
2063 return -ENOBUFS; 2119 return -ENOBUFS;
2064 2120
2121 mutex_lock(&sc->mutex);
2122
2065 switch (conf->type) { 2123 switch (conf->type) {
2066 case NL80211_IFTYPE_STATION: 2124 case NL80211_IFTYPE_STATION:
2067 ic_opmode = NL80211_IFTYPE_STATION; 2125 ic_opmode = NL80211_IFTYPE_STATION;
@@ -2078,28 +2136,51 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2078 return -EOPNOTSUPP; 2136 return -EOPNOTSUPP;
2079 } 2137 }
2080 2138
2081 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VAP of type: %d\n", ic_opmode); 2139 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2082 2140
2083 /* Set the VAP opmode */ 2141 /* Set the VIF opmode */
2084 avp->av_opmode = ic_opmode; 2142 avp->av_opmode = ic_opmode;
2085 avp->av_bslot = -1; 2143 avp->av_bslot = -1;
2086 2144
2087 if (ic_opmode == NL80211_IFTYPE_AP) 2145 if (ic_opmode == NL80211_IFTYPE_AP)
2088 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 2146 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2089 2147
2090 sc->sc_vaps[0] = conf->vif; 2148 sc->vifs[0] = conf->vif;
2091 sc->sc_nvaps++; 2149 sc->nvifs++;
2092 2150
2093 /* Set the device opmode */ 2151 /* Set the device opmode */
2094 sc->sc_ah->ah_opmode = ic_opmode; 2152 sc->sc_ah->opmode = ic_opmode;
2153
2154 /*
2155 * Enable MIB interrupts when there are hardware phy counters.
2156 * Note we only do this (at the moment) for station mode.
2157 */
2158 if (ath9k_hw_phycounters(sc->sc_ah) &&
2159 ((conf->type == NL80211_IFTYPE_STATION) ||
2160 (conf->type == NL80211_IFTYPE_ADHOC)))
2161 sc->imask |= ATH9K_INT_MIB;
2162 /*
2163 * Some hardware processes the TIM IE and fires an
2164 * interrupt when the TIM bit is set. For hardware
2165 * that does, if not overridden by configuration,
2166 * enable the TIM interrupt when operating as station.
2167 */
2168 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2169 (conf->type == NL80211_IFTYPE_STATION) &&
2170 !sc->config.swBeaconProcess)
2171 sc->imask |= ATH9K_INT_TIM;
2172
2173 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2095 2174
2096 if (conf->type == NL80211_IFTYPE_AP) { 2175 if (conf->type == NL80211_IFTYPE_AP) {
2097 /* TODO: is this a suitable place to start ANI for AP mode? */ 2176 /* TODO: is this a suitable place to start ANI for AP mode? */
2098 /* Start ANI */ 2177 /* Start ANI */
2099 mod_timer(&sc->sc_ani.timer, 2178 mod_timer(&sc->ani.timer,
2100 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 2179 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
2101 } 2180 }
2102 2181
2182 mutex_unlock(&sc->mutex);
2183
2103 return 0; 2184 return 0;
2104} 2185}
2105 2186
@@ -2107,24 +2188,28 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2107 struct ieee80211_if_init_conf *conf) 2188 struct ieee80211_if_init_conf *conf)
2108{ 2189{
2109 struct ath_softc *sc = hw->priv; 2190 struct ath_softc *sc = hw->priv;
2110 struct ath_vap *avp = (void *)conf->vif->drv_priv; 2191 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2111 2192
2112 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n"); 2193 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2113 2194
2195 mutex_lock(&sc->mutex);
2196
2114 /* Stop ANI */ 2197 /* Stop ANI */
2115 del_timer_sync(&sc->sc_ani.timer); 2198 del_timer_sync(&sc->ani.timer);
2116 2199
2117 /* Reclaim beacon resources */ 2200 /* Reclaim beacon resources */
2118 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP || 2201 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
2119 sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) { 2202 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) {
2120 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2203 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2121 ath_beacon_return(sc, avp); 2204 ath_beacon_return(sc, avp);
2122 } 2205 }
2123 2206
2124 sc->sc_flags &= ~SC_OP_BEACONS; 2207 sc->sc_flags &= ~SC_OP_BEACONS;
2125 2208
2126 sc->sc_vaps[0] = NULL; 2209 sc->vifs[0] = NULL;
2127 sc->sc_nvaps--; 2210 sc->nvifs--;
2211
2212 mutex_unlock(&sc->mutex);
2128} 2213}
2129 2214
2130static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 2215static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2133,40 +2218,41 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2133 struct ieee80211_conf *conf = &hw->conf; 2218 struct ieee80211_conf *conf = &hw->conf;
2134 2219
2135 mutex_lock(&sc->mutex); 2220 mutex_lock(&sc->mutex);
2136 if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
2137 IEEE80211_CONF_CHANGE_HT)) {
2138 struct ieee80211_channel *curchan = hw->conf.channel;
2139 int pos;
2140 2221
2141 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 2222 if (changed & IEEE80211_CONF_CHANGE_PS) {
2142 curchan->center_freq); 2223 if (conf->flags & IEEE80211_CONF_PS) {
2143 2224 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2144 pos = ath_get_channel(sc, curchan); 2225 sc->imask |= ATH9K_INT_TIM_TIMER;
2145 if (pos == -1) { 2226 ath9k_hw_set_interrupts(sc->sc_ah,
2146 DPRINTF(sc, ATH_DBG_FATAL, "Invalid channel: %d\n", 2227 sc->imask);
2147 curchan->center_freq); 2228 }
2148 mutex_unlock(&sc->mutex); 2229 ath9k_hw_setrxabort(sc->sc_ah, 1);
2149 return -EINVAL; 2230 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2231 } else {
2232 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2233 ath9k_hw_setrxabort(sc->sc_ah, 0);
2234 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
2235 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2236 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2237 ath9k_hw_set_interrupts(sc->sc_ah,
2238 sc->imask);
2239 }
2150 } 2240 }
2241 }
2151 2242
2152 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 2243 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2153 sc->sc_ah->ah_channels[pos].chanmode = 2244 struct ieee80211_channel *curchan = hw->conf.channel;
2154 (curchan->band == IEEE80211_BAND_2GHZ) ? 2245 int pos = curchan->hw_value;
2155 CHANNEL_G : CHANNEL_A;
2156 2246
2157 if (conf->ht.enabled) { 2247 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2158 if (conf->ht.channel_type == NL80211_CHAN_HT40PLUS || 2248 curchan->center_freq);
2159 conf->ht.channel_type == NL80211_CHAN_HT40MINUS)
2160 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
2161 2249
2162 sc->sc_ah->ah_channels[pos].chanmode = 2250 /* XXX: remove me eventualy */
2163 ath_get_extchanmode(sc, curchan, 2251 ath9k_update_ichannel(sc, &sc->sc_ah->channels[pos]);
2164 conf->ht.channel_type);
2165 }
2166 2252
2167 ath_update_chainmask(sc, conf->ht.enabled); 2253 ath_update_chainmask(sc, conf_is_ht(conf));
2168 2254
2169 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) { 2255 if (ath_set_channel(sc, &sc->sc_ah->channels[pos]) < 0) {
2170 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n"); 2256 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2171 mutex_unlock(&sc->mutex); 2257 mutex_unlock(&sc->mutex);
2172 return -EINVAL; 2258 return -EINVAL;
@@ -2174,9 +2260,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2174 } 2260 }
2175 2261
2176 if (changed & IEEE80211_CONF_CHANGE_POWER) 2262 if (changed & IEEE80211_CONF_CHANGE_POWER)
2177 sc->sc_config.txpowlimit = 2 * conf->power_level; 2263 sc->config.txpowlimit = 2 * conf->power_level;
2178 2264
2179 mutex_unlock(&sc->mutex); 2265 mutex_unlock(&sc->mutex);
2266
2180 return 0; 2267 return 0;
2181} 2268}
2182 2269
@@ -2185,18 +2272,20 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2185 struct ieee80211_if_conf *conf) 2272 struct ieee80211_if_conf *conf)
2186{ 2273{
2187 struct ath_softc *sc = hw->priv; 2274 struct ath_softc *sc = hw->priv;
2188 struct ath_hal *ah = sc->sc_ah; 2275 struct ath_hw *ah = sc->sc_ah;
2189 struct ath_vap *avp = (void *)vif->drv_priv; 2276 struct ath_vif *avp = (void *)vif->drv_priv;
2190 u32 rfilt = 0; 2277 u32 rfilt = 0;
2191 int error, i; 2278 int error, i;
2192 2279
2193 /* TODO: Need to decide which hw opmode to use for multi-interface 2280 /* TODO: Need to decide which hw opmode to use for multi-interface
2194 * cases */ 2281 * cases */
2195 if (vif->type == NL80211_IFTYPE_AP && 2282 if (vif->type == NL80211_IFTYPE_AP &&
2196 ah->ah_opmode != NL80211_IFTYPE_AP) { 2283 ah->opmode != NL80211_IFTYPE_AP) {
2197 ah->ah_opmode = NL80211_IFTYPE_STATION; 2284 ah->opmode = NL80211_IFTYPE_STATION;
2198 ath9k_hw_setopmode(ah); 2285 ath9k_hw_setopmode(ah);
2199 ath9k_hw_write_associd(ah, sc->sc_myaddr, 0); 2286 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2287 sc->curaid = 0;
2288 ath9k_hw_write_associd(sc);
2200 /* Request full reset to get hw opmode changed properly */ 2289 /* Request full reset to get hw opmode changed properly */
2201 sc->sc_flags |= SC_OP_FULL_RESET; 2290 sc->sc_flags |= SC_OP_FULL_RESET;
2202 } 2291 }
@@ -2207,17 +2296,16 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2207 case NL80211_IFTYPE_STATION: 2296 case NL80211_IFTYPE_STATION:
2208 case NL80211_IFTYPE_ADHOC: 2297 case NL80211_IFTYPE_ADHOC:
2209 /* Set BSSID */ 2298 /* Set BSSID */
2210 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN); 2299 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2211 sc->sc_curaid = 0; 2300 sc->curaid = 0;
2212 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid, 2301 ath9k_hw_write_associd(sc);
2213 sc->sc_curaid);
2214 2302
2215 /* Set aggregation protection mode parameters */ 2303 /* Set aggregation protection mode parameters */
2216 sc->sc_config.ath_aggr_prot = 0; 2304 sc->config.ath_aggr_prot = 0;
2217 2305
2218 DPRINTF(sc, ATH_DBG_CONFIG, 2306 DPRINTF(sc, ATH_DBG_CONFIG,
2219 "RX filter 0x%x bssid %pM aid 0x%x\n", 2307 "RX filter 0x%x bssid %pM aid 0x%x\n",
2220 rfilt, sc->sc_curbssid, sc->sc_curaid); 2308 rfilt, sc->curbssid, sc->curaid);
2221 2309
2222 /* need to reconfigure the beacon */ 2310 /* need to reconfigure the beacon */
2223 sc->sc_flags &= ~SC_OP_BEACONS ; 2311 sc->sc_flags &= ~SC_OP_BEACONS ;
@@ -2228,24 +2316,27 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2228 } 2316 }
2229 } 2317 }
2230 2318
2231 if ((conf->changed & IEEE80211_IFCC_BEACON) && 2319 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2232 ((vif->type == NL80211_IFTYPE_ADHOC) || 2320 (vif->type == NL80211_IFTYPE_AP)) {
2233 (vif->type == NL80211_IFTYPE_AP))) { 2321 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2234 /* 2322 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2235 * Allocate and setup the beacon frame. 2323 conf->enable_beacon)) {
2236 * 2324 /*
2237 * Stop any previous beacon DMA. This may be 2325 * Allocate and setup the beacon frame.
2238 * necessary, for example, when an ibss merge 2326 *
2239 * causes reconfiguration; we may be called 2327 * Stop any previous beacon DMA. This may be
2240 * with beacon transmission active. 2328 * necessary, for example, when an ibss merge
2241 */ 2329 * causes reconfiguration; we may be called
2242 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2330 * with beacon transmission active.
2331 */
2332 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2243 2333
2244 error = ath_beacon_alloc(sc, 0); 2334 error = ath_beacon_alloc(sc, 0);
2245 if (error != 0) 2335 if (error != 0)
2246 return error; 2336 return error;
2247 2337
2248 ath_beacon_sync(sc, 0); 2338 ath_beacon_sync(sc, 0);
2339 }
2249 } 2340 }
2250 2341
2251 /* Check for WLAN_CAPABILITY_PRIVACY ? */ 2342 /* Check for WLAN_CAPABILITY_PRIVACY ? */
@@ -2254,7 +2345,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2254 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) 2345 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2255 ath9k_hw_keysetmac(sc->sc_ah, 2346 ath9k_hw_keysetmac(sc->sc_ah,
2256 (u16)i, 2347 (u16)i,
2257 sc->sc_curbssid); 2348 sc->curbssid);
2258 } 2349 }
2259 2350
2260 /* Only legacy IBSS for now */ 2351 /* Only legacy IBSS for now */
@@ -2290,8 +2381,11 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
2290 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 2381 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2291 2382
2292 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 2383 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2293 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 2384 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
2294 ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0); 2385 memcpy(sc->curbssid, ath_bcast_mac, ETH_ALEN);
2386 sc->curaid = 0;
2387 ath9k_hw_write_associd(sc);
2388 }
2295 } 2389 }
2296 2390
2297 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter); 2391 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
@@ -2316,8 +2410,7 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
2316 } 2410 }
2317} 2411}
2318 2412
2319static int ath9k_conf_tx(struct ieee80211_hw *hw, 2413static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2320 u16 queue,
2321 const struct ieee80211_tx_queue_params *params) 2414 const struct ieee80211_tx_queue_params *params)
2322{ 2415{
2323 struct ath_softc *sc = hw->priv; 2416 struct ath_softc *sc = hw->priv;
@@ -2327,6 +2420,8 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
2327 if (queue >= WME_NUM_AC) 2420 if (queue >= WME_NUM_AC)
2328 return 0; 2421 return 0;
2329 2422
2423 mutex_lock(&sc->mutex);
2424
2330 qi.tqi_aifs = params->aifs; 2425 qi.tqi_aifs = params->aifs;
2331 qi.tqi_cwmin = params->cw_min; 2426 qi.tqi_cwmin = params->cw_min;
2332 qi.tqi_cwmax = params->cw_max; 2427 qi.tqi_cwmax = params->cw_max;
@@ -2343,29 +2438,35 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
2343 if (ret) 2438 if (ret)
2344 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n"); 2439 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2345 2440
2441 mutex_unlock(&sc->mutex);
2442
2346 return ret; 2443 return ret;
2347} 2444}
2348 2445
2349static int ath9k_set_key(struct ieee80211_hw *hw, 2446static int ath9k_set_key(struct ieee80211_hw *hw,
2350 enum set_key_cmd cmd, 2447 enum set_key_cmd cmd,
2351 const u8 *local_addr, 2448 struct ieee80211_vif *vif,
2352 const u8 *addr, 2449 struct ieee80211_sta *sta,
2353 struct ieee80211_key_conf *key) 2450 struct ieee80211_key_conf *key)
2354{ 2451{
2355 struct ath_softc *sc = hw->priv; 2452 struct ath_softc *sc = hw->priv;
2356 int ret = 0; 2453 int ret = 0;
2357 2454
2455 mutex_lock(&sc->mutex);
2456 ath9k_ps_wakeup(sc);
2358 DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n"); 2457 DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n");
2359 2458
2360 switch (cmd) { 2459 switch (cmd) {
2361 case SET_KEY: 2460 case SET_KEY:
2362 ret = ath_key_config(sc, addr, key); 2461 ret = ath_key_config(sc, sta, key);
2363 if (ret >= 0) { 2462 if (ret >= 0) {
2364 key->hw_key_idx = ret; 2463 key->hw_key_idx = ret;
2365 /* push IV and Michael MIC generation to stack */ 2464 /* push IV and Michael MIC generation to stack */
2366 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 2465 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2367 if (key->alg == ALG_TKIP) 2466 if (key->alg == ALG_TKIP)
2368 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 2467 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2468 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2469 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2369 ret = 0; 2470 ret = 0;
2370 } 2471 }
2371 break; 2472 break;
@@ -2376,6 +2477,9 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2376 ret = -EINVAL; 2477 ret = -EINVAL;
2377 } 2478 }
2378 2479
2480 ath9k_ps_restore(sc);
2481 mutex_unlock(&sc->mutex);
2482
2379 return ret; 2483 return ret;
2380} 2484}
2381 2485
@@ -2386,6 +2490,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2386{ 2490{
2387 struct ath_softc *sc = hw->priv; 2491 struct ath_softc *sc = hw->priv;
2388 2492
2493 mutex_lock(&sc->mutex);
2494
2389 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2495 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2390 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 2496 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2391 bss_conf->use_short_preamble); 2497 bss_conf->use_short_preamble);
@@ -2410,31 +2516,44 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2410 bss_conf->assoc); 2516 bss_conf->assoc);
2411 ath9k_bss_assoc_info(sc, vif, bss_conf); 2517 ath9k_bss_assoc_info(sc, vif, bss_conf);
2412 } 2518 }
2519
2520 mutex_unlock(&sc->mutex);
2413} 2521}
2414 2522
2415static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 2523static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2416{ 2524{
2417 u64 tsf; 2525 u64 tsf;
2418 struct ath_softc *sc = hw->priv; 2526 struct ath_softc *sc = hw->priv;
2419 struct ath_hal *ah = sc->sc_ah;
2420 2527
2421 tsf = ath9k_hw_gettsf64(ah); 2528 mutex_lock(&sc->mutex);
2529 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2530 mutex_unlock(&sc->mutex);
2422 2531
2423 return tsf; 2532 return tsf;
2424} 2533}
2425 2534
2535static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2536{
2537 struct ath_softc *sc = hw->priv;
2538
2539 mutex_lock(&sc->mutex);
2540 ath9k_hw_settsf64(sc->sc_ah, tsf);
2541 mutex_unlock(&sc->mutex);
2542}
2543
2426static void ath9k_reset_tsf(struct ieee80211_hw *hw) 2544static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2427{ 2545{
2428 struct ath_softc *sc = hw->priv; 2546 struct ath_softc *sc = hw->priv;
2429 struct ath_hal *ah = sc->sc_ah;
2430 2547
2431 ath9k_hw_reset_tsf(ah); 2548 mutex_lock(&sc->mutex);
2549 ath9k_hw_reset_tsf(sc->sc_ah);
2550 mutex_unlock(&sc->mutex);
2432} 2551}
2433 2552
2434static int ath9k_ampdu_action(struct ieee80211_hw *hw, 2553static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2435 enum ieee80211_ampdu_mlme_action action, 2554 enum ieee80211_ampdu_mlme_action action,
2436 struct ieee80211_sta *sta, 2555 struct ieee80211_sta *sta,
2437 u16 tid, u16 *ssn) 2556 u16 tid, u16 *ssn)
2438{ 2557{
2439 struct ath_softc *sc = hw->priv; 2558 struct ath_softc *sc = hw->priv;
2440 int ret = 0; 2559 int ret = 0;
@@ -2472,7 +2591,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2472 return ret; 2591 return ret;
2473} 2592}
2474 2593
2475static struct ieee80211_ops ath9k_ops = { 2594struct ieee80211_ops ath9k_ops = {
2476 .tx = ath9k_tx, 2595 .tx = ath9k_tx,
2477 .start = ath9k_start, 2596 .start = ath9k_start,
2478 .stop = ath9k_stop, 2597 .stop = ath9k_stop,
@@ -2486,6 +2605,7 @@ static struct ieee80211_ops ath9k_ops = {
2486 .bss_info_changed = ath9k_bss_info_changed, 2605 .bss_info_changed = ath9k_bss_info_changed,
2487 .set_key = ath9k_set_key, 2606 .set_key = ath9k_set_key,
2488 .get_tsf = ath9k_get_tsf, 2607 .get_tsf = ath9k_get_tsf,
2608 .set_tsf = ath9k_set_tsf,
2489 .reset_tsf = ath9k_reset_tsf, 2609 .reset_tsf = ath9k_reset_tsf,
2490 .ampdu_action = ath9k_ampdu_action, 2610 .ampdu_action = ath9k_ampdu_action,
2491}; 2611};
@@ -2516,7 +2636,7 @@ static struct {
2516/* 2636/*
2517 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. 2637 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2518 */ 2638 */
2519static const char * 2639const char *
2520ath_mac_bb_name(u32 mac_bb_version) 2640ath_mac_bb_name(u32 mac_bb_version)
2521{ 2641{
2522 int i; 2642 int i;
@@ -2533,7 +2653,7 @@ ath_mac_bb_name(u32 mac_bb_version)
2533/* 2653/*
2534 * Return the RF name. "????" is returned if the RF is unknown. 2654 * Return the RF name. "????" is returned if the RF is unknown.
2535 */ 2655 */
2536static const char * 2656const char *
2537ath_rf_name(u16 rf_version) 2657ath_rf_name(u16 rf_version)
2538{ 2658{
2539 int i; 2659 int i;
@@ -2547,254 +2667,51 @@ ath_rf_name(u16 rf_version)
2547 return "????"; 2667 return "????";
2548} 2668}
2549 2669
2550static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2670static int __init ath9k_init(void)
2551{
2552 void __iomem *mem;
2553 struct ath_softc *sc;
2554 struct ieee80211_hw *hw;
2555 u8 csz;
2556 u32 val;
2557 int ret = 0;
2558 struct ath_hal *ah;
2559
2560 if (pci_enable_device(pdev))
2561 return -EIO;
2562
2563 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2564
2565 if (ret) {
2566 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
2567 goto bad;
2568 }
2569
2570 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2571
2572 if (ret) {
2573 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
2574 "DMA enable failed\n");
2575 goto bad;
2576 }
2577
2578 /*
2579 * Cache line size is used to size and align various
2580 * structures used to communicate with the hardware.
2581 */
2582 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
2583 if (csz == 0) {
2584 /*
2585 * Linux 2.4.18 (at least) writes the cache line size
2586 * register as a 16-bit wide register which is wrong.
2587 * We must have this setup properly for rx buffer
2588 * DMA to work so force a reasonable value here if it
2589 * comes up zero.
2590 */
2591 csz = L1_CACHE_BYTES / sizeof(u32);
2592 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
2593 }
2594 /*
2595 * The default setting of latency timer yields poor results,
2596 * set it to the value used by other systems. It may be worth
2597 * tweaking this setting more.
2598 */
2599 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
2600
2601 pci_set_master(pdev);
2602
2603 /*
2604 * Disable the RETRY_TIMEOUT register (0x41) to keep
2605 * PCI Tx retries from interfering with C3 CPU state.
2606 */
2607 pci_read_config_dword(pdev, 0x40, &val);
2608 if ((val & 0x0000ff00) != 0)
2609 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2610
2611 ret = pci_request_region(pdev, 0, "ath9k");
2612 if (ret) {
2613 dev_err(&pdev->dev, "PCI memory region reserve error\n");
2614 ret = -ENODEV;
2615 goto bad;
2616 }
2617
2618 mem = pci_iomap(pdev, 0, 0);
2619 if (!mem) {
2620 printk(KERN_ERR "PCI memory map error\n") ;
2621 ret = -EIO;
2622 goto bad1;
2623 }
2624
2625 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
2626 if (hw == NULL) {
2627 printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
2628 goto bad2;
2629 }
2630
2631 SET_IEEE80211_DEV(hw, &pdev->dev);
2632 pci_set_drvdata(pdev, hw);
2633
2634 sc = hw->priv;
2635 sc->hw = hw;
2636 sc->pdev = pdev;
2637 sc->mem = mem;
2638
2639 if (ath_attach(id->device, sc) != 0) {
2640 ret = -ENODEV;
2641 goto bad3;
2642 }
2643
2644 /* setup interrupt service routine */
2645
2646 if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
2647 printk(KERN_ERR "%s: request_irq failed\n",
2648 wiphy_name(hw->wiphy));
2649 ret = -EIO;
2650 goto bad4;
2651 }
2652
2653 ah = sc->sc_ah;
2654 printk(KERN_INFO
2655 "%s: Atheros AR%s MAC/BB Rev:%x "
2656 "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
2657 wiphy_name(hw->wiphy),
2658 ath_mac_bb_name(ah->ah_macVersion),
2659 ah->ah_macRev,
2660 ath_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
2661 ah->ah_phyRev,
2662 (unsigned long)mem, pdev->irq);
2663
2664 return 0;
2665bad4:
2666 ath_detach(sc);
2667bad3:
2668 ieee80211_free_hw(hw);
2669bad2:
2670 pci_iounmap(pdev, mem);
2671bad1:
2672 pci_release_region(pdev, 0);
2673bad:
2674 pci_disable_device(pdev);
2675 return ret;
2676}
2677
2678static void ath_pci_remove(struct pci_dev *pdev)
2679{
2680 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2681 struct ath_softc *sc = hw->priv;
2682
2683 ath_detach(sc);
2684 if (pdev->irq)
2685 free_irq(pdev->irq, sc);
2686 pci_iounmap(pdev, sc->mem);
2687 pci_release_region(pdev, 0);
2688 pci_disable_device(pdev);
2689 ieee80211_free_hw(hw);
2690}
2691
2692#ifdef CONFIG_PM
2693
2694static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2695{
2696 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2697 struct ath_softc *sc = hw->priv;
2698
2699 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
2700
2701#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2702 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2703 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2704#endif
2705
2706 pci_save_state(pdev);
2707 pci_disable_device(pdev);
2708 pci_set_power_state(pdev, 3);
2709
2710 return 0;
2711}
2712
2713static int ath_pci_resume(struct pci_dev *pdev)
2714{
2715 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2716 struct ath_softc *sc = hw->priv;
2717 u32 val;
2718 int err;
2719
2720 err = pci_enable_device(pdev);
2721 if (err)
2722 return err;
2723 pci_restore_state(pdev);
2724 /*
2725 * Suspend/Resume resets the PCI configuration space, so we have to
2726 * re-disable the RETRY_TIMEOUT register (0x41) to keep
2727 * PCI Tx retries from interfering with C3 CPU state
2728 */
2729 pci_read_config_dword(pdev, 0x40, &val);
2730 if ((val & 0x0000ff00) != 0)
2731 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2732
2733 /* Enable LED */
2734 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
2735 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
2736 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
2737
2738#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2739 /*
2740 * check the h/w rfkill state on resume
2741 * and start the rfkill poll timer
2742 */
2743 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2744 queue_delayed_work(sc->hw->workqueue,
2745 &sc->rf_kill.rfkill_poll, 0);
2746#endif
2747
2748 return 0;
2749}
2750
2751#endif /* CONFIG_PM */
2752
2753MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
2754
2755static struct pci_driver ath_pci_driver = {
2756 .name = "ath9k",
2757 .id_table = ath_pci_id_table,
2758 .probe = ath_pci_probe,
2759 .remove = ath_pci_remove,
2760#ifdef CONFIG_PM
2761 .suspend = ath_pci_suspend,
2762 .resume = ath_pci_resume,
2763#endif /* CONFIG_PM */
2764};
2765
2766static int __init init_ath_pci(void)
2767{ 2671{
2768 int error; 2672 int error;
2769 2673
2770 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
2771
2772 /* Register rate control algorithm */ 2674 /* Register rate control algorithm */
2773 error = ath_rate_control_register(); 2675 error = ath_rate_control_register();
2774 if (error != 0) { 2676 if (error != 0) {
2775 printk(KERN_ERR 2677 printk(KERN_ERR
2776 "Unable to register rate control algorithm: %d\n", 2678 "ath9k: Unable to register rate control "
2679 "algorithm: %d\n",
2777 error); 2680 error);
2778 ath_rate_control_unregister(); 2681 goto err_out;
2779 return error;
2780 } 2682 }
2781 2683
2782 if (pci_register_driver(&ath_pci_driver) < 0) { 2684 error = ath_pci_init();
2685 if (error < 0) {
2783 printk(KERN_ERR 2686 printk(KERN_ERR
2784 "ath_pci: No devices found, driver not installed.\n"); 2687 "ath9k: No PCI devices found, driver not installed.\n");
2785 ath_rate_control_unregister(); 2688 error = -ENODEV;
2786 pci_unregister_driver(&ath_pci_driver); 2689 goto err_rate_unregister;
2787 return -ENODEV; 2690 }
2691
2692 error = ath_ahb_init();
2693 if (error < 0) {
2694 error = -ENODEV;
2695 goto err_pci_exit;
2788 } 2696 }
2789 2697
2790 return 0; 2698 return 0;
2699
2700 err_pci_exit:
2701 ath_pci_exit();
2702
2703 err_rate_unregister:
2704 ath_rate_control_unregister();
2705 err_out:
2706 return error;
2791} 2707}
2792module_init(init_ath_pci); 2708module_init(ath9k_init);
2793 2709
2794static void __exit exit_ath_pci(void) 2710static void __exit ath9k_exit(void)
2795{ 2711{
2712 ath_ahb_exit();
2713 ath_pci_exit();
2796 ath_rate_control_unregister(); 2714 ath_rate_control_unregister();
2797 pci_unregister_driver(&ath_pci_driver);
2798 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 2715 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2799} 2716}
2800module_exit(exit_ath_pci); 2717module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c
new file mode 100644
index 00000000000..c28afe42b26
--- /dev/null
+++ b/drivers/net/wireless/ath9k/pci.c
@@ -0,0 +1,303 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/nl80211.h>
18#include <linux/pci.h>
19#include "ath9k.h"
20
21static struct pci_device_id ath_pci_id_table[] __devinitdata = {
22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
25 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
26 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
27 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
28 { 0 }
29};
30
31/* return bus cachesize in 4B word units */
32static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
33{
34 u8 u8tmp;
35
36 pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE,
37 (u8 *)&u8tmp);
38 *csz = (int)u8tmp;
39
40 /*
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
44 */
45
46 if (*csz == 0)
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
48}
49
50static void ath_pci_cleanup(struct ath_softc *sc)
51{
52 struct pci_dev *pdev = to_pci_dev(sc->dev);
53
54 pci_iounmap(pdev, sc->mem);
55 pci_release_region(pdev, 0);
56 pci_disable_device(pdev);
57}
58
59static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
60{
61 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
62
63 if (!ath9k_hw_wait(ah,
64 AR_EEPROM_STATUS_DATA,
65 AR_EEPROM_STATUS_DATA_BUSY |
66 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
67 return false;
68 }
69
70 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
71 AR_EEPROM_STATUS_DATA_VAL);
72
73 return true;
74}
75
76static struct ath_bus_ops ath_pci_bus_ops = {
77 .read_cachesize = ath_pci_read_cachesize,
78 .cleanup = ath_pci_cleanup,
79 .eeprom_read = ath_pci_eeprom_read,
80};
81
82static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
83{
84 void __iomem *mem;
85 struct ath_softc *sc;
86 struct ieee80211_hw *hw;
87 u8 csz;
88 u32 val;
89 int ret = 0;
90 struct ath_hw *ah;
91
92 if (pci_enable_device(pdev))
93 return -EIO;
94
95 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
96
97 if (ret) {
98 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
99 goto bad;
100 }
101
102 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
103
104 if (ret) {
105 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
106 "DMA enable failed\n");
107 goto bad;
108 }
109
110 /*
111 * Cache line size is used to size and align various
112 * structures used to communicate with the hardware.
113 */
114 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
115 if (csz == 0) {
116 /*
117 * Linux 2.4.18 (at least) writes the cache line size
118 * register as a 16-bit wide register which is wrong.
119 * We must have this setup properly for rx buffer
120 * DMA to work so force a reasonable value here if it
121 * comes up zero.
122 */
123 csz = L1_CACHE_BYTES / sizeof(u32);
124 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
125 }
126 /*
127 * The default setting of latency timer yields poor results,
128 * set it to the value used by other systems. It may be worth
129 * tweaking this setting more.
130 */
131 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
132
133 pci_set_master(pdev);
134
135 /*
136 * Disable the RETRY_TIMEOUT register (0x41) to keep
137 * PCI Tx retries from interfering with C3 CPU state.
138 */
139 pci_read_config_dword(pdev, 0x40, &val);
140 if ((val & 0x0000ff00) != 0)
141 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
142
143 ret = pci_request_region(pdev, 0, "ath9k");
144 if (ret) {
145 dev_err(&pdev->dev, "PCI memory region reserve error\n");
146 ret = -ENODEV;
147 goto bad;
148 }
149
150 mem = pci_iomap(pdev, 0, 0);
151 if (!mem) {
152 printk(KERN_ERR "PCI memory map error\n") ;
153 ret = -EIO;
154 goto bad1;
155 }
156
157 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
158 if (hw == NULL) {
159 printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
160 goto bad2;
161 }
162
163 SET_IEEE80211_DEV(hw, &pdev->dev);
164 pci_set_drvdata(pdev, hw);
165
166 sc = hw->priv;
167 sc->hw = hw;
168 sc->dev = &pdev->dev;
169 sc->mem = mem;
170 sc->bus_ops = &ath_pci_bus_ops;
171
172 if (ath_attach(id->device, sc) != 0) {
173 ret = -ENODEV;
174 goto bad3;
175 }
176
177 /* setup interrupt service routine */
178
179 if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
180 printk(KERN_ERR "%s: request_irq failed\n",
181 wiphy_name(hw->wiphy));
182 ret = -EIO;
183 goto bad4;
184 }
185
186 sc->irq = pdev->irq;
187
188 ah = sc->sc_ah;
189 printk(KERN_INFO
190 "%s: Atheros AR%s MAC/BB Rev:%x "
191 "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
192 wiphy_name(hw->wiphy),
193 ath_mac_bb_name(ah->hw_version.macVersion),
194 ah->hw_version.macRev,
195 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
196 ah->hw_version.phyRev,
197 (unsigned long)mem, pdev->irq);
198
199 return 0;
200bad4:
201 ath_detach(sc);
202bad3:
203 ieee80211_free_hw(hw);
204bad2:
205 pci_iounmap(pdev, mem);
206bad1:
207 pci_release_region(pdev, 0);
208bad:
209 pci_disable_device(pdev);
210 return ret;
211}
212
213static void ath_pci_remove(struct pci_dev *pdev)
214{
215 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
216 struct ath_softc *sc = hw->priv;
217
218 ath_cleanup(sc);
219}
220
221#ifdef CONFIG_PM
222
223static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
224{
225 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
226 struct ath_softc *sc = hw->priv;
227
228 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
229
230#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
231 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
232 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
233#endif
234
235 pci_save_state(pdev);
236 pci_disable_device(pdev);
237 pci_set_power_state(pdev, PCI_D3hot);
238
239 return 0;
240}
241
242static int ath_pci_resume(struct pci_dev *pdev)
243{
244 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
245 struct ath_softc *sc = hw->priv;
246 u32 val;
247 int err;
248
249 err = pci_enable_device(pdev);
250 if (err)
251 return err;
252 pci_restore_state(pdev);
253 /*
254 * Suspend/Resume resets the PCI configuration space, so we have to
255 * re-disable the RETRY_TIMEOUT register (0x41) to keep
256 * PCI Tx retries from interfering with C3 CPU state
257 */
258 pci_read_config_dword(pdev, 0x40, &val);
259 if ((val & 0x0000ff00) != 0)
260 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
261
262 /* Enable LED */
263 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
264 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
265 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
266
267#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
268 /*
269 * check the h/w rfkill state on resume
270 * and start the rfkill poll timer
271 */
272 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
273 queue_delayed_work(sc->hw->workqueue,
274 &sc->rf_kill.rfkill_poll, 0);
275#endif
276
277 return 0;
278}
279
280#endif /* CONFIG_PM */
281
282MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
283
284static struct pci_driver ath_pci_driver = {
285 .name = "ath9k",
286 .id_table = ath_pci_id_table,
287 .probe = ath_pci_probe,
288 .remove = ath_pci_remove,
289#ifdef CONFIG_PM
290 .suspend = ath_pci_suspend,
291 .resume = ath_pci_resume,
292#endif /* CONFIG_PM */
293};
294
295int __init ath_pci_init(void)
296{
297 return pci_register_driver(&ath_pci_driver);
298}
299
300void ath_pci_exit(void)
301{
302 pci_unregister_driver(&ath_pci_driver);
303}
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
index 766982a8196..52aa2a7abe7 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -14,22 +14,17 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21 18
22void 19void
23ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex, 20ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, u32 freqIndex,
24 int regWrites) 21 int regWrites)
25{ 22{
26 struct ath_hal_5416 *ahp = AH5416(ah); 23 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
27
28 REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites);
29} 24}
30 25
31bool 26bool
32ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan) 27ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
33{ 28{
34 u32 channelSel = 0; 29 u32 channelSel = 0;
35 u32 bModeSynth = 0; 30 u32 bModeSynth = 0;
@@ -95,15 +90,14 @@ ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
95 90
96 REG_WRITE(ah, AR_PHY(0x37), reg32); 91 REG_WRITE(ah, AR_PHY(0x37), reg32);
97 92
98 ah->ah_curchan = chan; 93 ah->curchan = chan;
99 94 ah->curchan_rad_index = -1;
100 AH5416(ah)->ah_curchanRadIndex = -1;
101 95
102 return true; 96 return true;
103} 97}
104 98
105bool 99bool
106ath9k_hw_ar9280_set_channel(struct ath_hal *ah, 100ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
107 struct ath9k_channel *chan) 101 struct ath9k_channel *chan)
108{ 102{
109 u16 bMode, fracMode, aModeRefSel = 0; 103 u16 bMode, fracMode, aModeRefSel = 0;
@@ -166,9 +160,8 @@ ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
166 160
167 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); 161 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
168 162
169 ah->ah_curchan = chan; 163 ah->curchan = chan;
170 164 ah->curchan_rad_index = -1;
171 AH5416(ah)->ah_curchanRadIndex = -1;
172 165
173 return true; 166 return true;
174} 167}
@@ -201,11 +194,9 @@ ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
201} 194}
202 195
203bool 196bool
204ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan, 197ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
205 u16 modesIndex) 198 u16 modesIndex)
206{ 199{
207 struct ath_hal_5416 *ahp = AH5416(ah);
208
209 u32 eepMinorRev; 200 u32 eepMinorRev;
210 u32 ob5GHz = 0, db5GHz = 0; 201 u32 ob5GHz = 0, db5GHz = 0;
211 u32 ob2GHz = 0, db2GHz = 0; 202 u32 ob2GHz = 0, db2GHz = 0;
@@ -214,161 +205,156 @@ ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
214 if (AR_SREV_9280_10_OR_LATER(ah)) 205 if (AR_SREV_9280_10_OR_LATER(ah))
215 return true; 206 return true;
216 207
217 eepMinorRev = ath9k_hw_get_eeprom(ah, EEP_MINOR_REV); 208 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
218 209
219 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1); 210 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
220 211
221 RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1); 212 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
222 213
223 RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1); 214 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
224 215
225 RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3, 216 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
226 modesIndex); 217 modesIndex);
227 { 218 {
228 int i; 219 int i;
229 for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) { 220 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
230 ahp->ah_analogBank6Data[i] = 221 ah->analogBank6Data[i] =
231 INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex); 222 INI_RA(&ah->iniBank6TPC, i, modesIndex);
232 } 223 }
233 } 224 }
234 225
235 if (eepMinorRev >= 2) { 226 if (eepMinorRev >= 2) {
236 if (IS_CHAN_2GHZ(chan)) { 227 if (IS_CHAN_2GHZ(chan)) {
237 ob2GHz = ath9k_hw_get_eeprom(ah, EEP_OB_2); 228 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
238 db2GHz = ath9k_hw_get_eeprom(ah, EEP_DB_2); 229 db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
239 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 230 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
240 ob2GHz, 3, 197, 0); 231 ob2GHz, 3, 197, 0);
241 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 232 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
242 db2GHz, 3, 194, 0); 233 db2GHz, 3, 194, 0);
243 } else { 234 } else {
244 ob5GHz = ath9k_hw_get_eeprom(ah, EEP_OB_5); 235 ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
245 db5GHz = ath9k_hw_get_eeprom(ah, EEP_DB_5); 236 db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
246 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 237 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
247 ob5GHz, 3, 203, 0); 238 ob5GHz, 3, 203, 0);
248 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 239 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
249 db5GHz, 3, 200, 0); 240 db5GHz, 3, 200, 0);
250 } 241 }
251 } 242 }
252 243
253 RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1); 244 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
254 245
255 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data, 246 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
256 regWrites); 247 regWrites);
257 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data, 248 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
258 regWrites); 249 regWrites);
259 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data, 250 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
260 regWrites); 251 regWrites);
261 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data, 252 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
262 regWrites); 253 regWrites);
263 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data, 254 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
264 regWrites); 255 regWrites);
265 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data, 256 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
266 regWrites); 257 regWrites);
267 258
268 return true; 259 return true;
269} 260}
270 261
271void 262void
272ath9k_hw_rfdetach(struct ath_hal *ah) 263ath9k_hw_rfdetach(struct ath_hw *ah)
273{ 264{
274 struct ath_hal_5416 *ahp = AH5416(ah); 265 if (ah->analogBank0Data != NULL) {
275 266 kfree(ah->analogBank0Data);
276 if (ahp->ah_analogBank0Data != NULL) { 267 ah->analogBank0Data = NULL;
277 kfree(ahp->ah_analogBank0Data);
278 ahp->ah_analogBank0Data = NULL;
279 } 268 }
280 if (ahp->ah_analogBank1Data != NULL) { 269 if (ah->analogBank1Data != NULL) {
281 kfree(ahp->ah_analogBank1Data); 270 kfree(ah->analogBank1Data);
282 ahp->ah_analogBank1Data = NULL; 271 ah->analogBank1Data = NULL;
283 } 272 }
284 if (ahp->ah_analogBank2Data != NULL) { 273 if (ah->analogBank2Data != NULL) {
285 kfree(ahp->ah_analogBank2Data); 274 kfree(ah->analogBank2Data);
286 ahp->ah_analogBank2Data = NULL; 275 ah->analogBank2Data = NULL;
287 } 276 }
288 if (ahp->ah_analogBank3Data != NULL) { 277 if (ah->analogBank3Data != NULL) {
289 kfree(ahp->ah_analogBank3Data); 278 kfree(ah->analogBank3Data);
290 ahp->ah_analogBank3Data = NULL; 279 ah->analogBank3Data = NULL;
291 } 280 }
292 if (ahp->ah_analogBank6Data != NULL) { 281 if (ah->analogBank6Data != NULL) {
293 kfree(ahp->ah_analogBank6Data); 282 kfree(ah->analogBank6Data);
294 ahp->ah_analogBank6Data = NULL; 283 ah->analogBank6Data = NULL;
295 } 284 }
296 if (ahp->ah_analogBank6TPCData != NULL) { 285 if (ah->analogBank6TPCData != NULL) {
297 kfree(ahp->ah_analogBank6TPCData); 286 kfree(ah->analogBank6TPCData);
298 ahp->ah_analogBank6TPCData = NULL; 287 ah->analogBank6TPCData = NULL;
299 } 288 }
300 if (ahp->ah_analogBank7Data != NULL) { 289 if (ah->analogBank7Data != NULL) {
301 kfree(ahp->ah_analogBank7Data); 290 kfree(ah->analogBank7Data);
302 ahp->ah_analogBank7Data = NULL; 291 ah->analogBank7Data = NULL;
303 } 292 }
304 if (ahp->ah_addac5416_21 != NULL) { 293 if (ah->addac5416_21 != NULL) {
305 kfree(ahp->ah_addac5416_21); 294 kfree(ah->addac5416_21);
306 ahp->ah_addac5416_21 = NULL; 295 ah->addac5416_21 = NULL;
307 } 296 }
308 if (ahp->ah_bank6Temp != NULL) { 297 if (ah->bank6Temp != NULL) {
309 kfree(ahp->ah_bank6Temp); 298 kfree(ah->bank6Temp);
310 ahp->ah_bank6Temp = NULL; 299 ah->bank6Temp = NULL;
311 } 300 }
312} 301}
313 302
314bool ath9k_hw_init_rf(struct ath_hal *ah, int *status) 303bool ath9k_hw_init_rf(struct ath_hw *ah, int *status)
315{ 304{
316 struct ath_hal_5416 *ahp = AH5416(ah);
317
318 if (!AR_SREV_9280_10_OR_LATER(ah)) { 305 if (!AR_SREV_9280_10_OR_LATER(ah)) {
319 306 ah->analogBank0Data =
320 ahp->ah_analogBank0Data =
321 kzalloc((sizeof(u32) * 307 kzalloc((sizeof(u32) *
322 ahp->ah_iniBank0.ia_rows), GFP_KERNEL); 308 ah->iniBank0.ia_rows), GFP_KERNEL);
323 ahp->ah_analogBank1Data = 309 ah->analogBank1Data =
324 kzalloc((sizeof(u32) * 310 kzalloc((sizeof(u32) *
325 ahp->ah_iniBank1.ia_rows), GFP_KERNEL); 311 ah->iniBank1.ia_rows), GFP_KERNEL);
326 ahp->ah_analogBank2Data = 312 ah->analogBank2Data =
327 kzalloc((sizeof(u32) * 313 kzalloc((sizeof(u32) *
328 ahp->ah_iniBank2.ia_rows), GFP_KERNEL); 314 ah->iniBank2.ia_rows), GFP_KERNEL);
329 ahp->ah_analogBank3Data = 315 ah->analogBank3Data =
330 kzalloc((sizeof(u32) * 316 kzalloc((sizeof(u32) *
331 ahp->ah_iniBank3.ia_rows), GFP_KERNEL); 317 ah->iniBank3.ia_rows), GFP_KERNEL);
332 ahp->ah_analogBank6Data = 318 ah->analogBank6Data =
333 kzalloc((sizeof(u32) * 319 kzalloc((sizeof(u32) *
334 ahp->ah_iniBank6.ia_rows), GFP_KERNEL); 320 ah->iniBank6.ia_rows), GFP_KERNEL);
335 ahp->ah_analogBank6TPCData = 321 ah->analogBank6TPCData =
336 kzalloc((sizeof(u32) * 322 kzalloc((sizeof(u32) *
337 ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL); 323 ah->iniBank6TPC.ia_rows), GFP_KERNEL);
338 ahp->ah_analogBank7Data = 324 ah->analogBank7Data =
339 kzalloc((sizeof(u32) * 325 kzalloc((sizeof(u32) *
340 ahp->ah_iniBank7.ia_rows), GFP_KERNEL); 326 ah->iniBank7.ia_rows), GFP_KERNEL);
341 327
342 if (ahp->ah_analogBank0Data == NULL 328 if (ah->analogBank0Data == NULL
343 || ahp->ah_analogBank1Data == NULL 329 || ah->analogBank1Data == NULL
344 || ahp->ah_analogBank2Data == NULL 330 || ah->analogBank2Data == NULL
345 || ahp->ah_analogBank3Data == NULL 331 || ah->analogBank3Data == NULL
346 || ahp->ah_analogBank6Data == NULL 332 || ah->analogBank6Data == NULL
347 || ahp->ah_analogBank6TPCData == NULL 333 || ah->analogBank6TPCData == NULL
348 || ahp->ah_analogBank7Data == NULL) { 334 || ah->analogBank7Data == NULL) {
349 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 335 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
350 "Cannot allocate RF banks\n"); 336 "Cannot allocate RF banks\n");
351 *status = -ENOMEM; 337 *status = -ENOMEM;
352 return false; 338 return false;
353 } 339 }
354 340
355 ahp->ah_addac5416_21 = 341 ah->addac5416_21 =
356 kzalloc((sizeof(u32) * 342 kzalloc((sizeof(u32) *
357 ahp->ah_iniAddac.ia_rows * 343 ah->iniAddac.ia_rows *
358 ahp->ah_iniAddac.ia_columns), GFP_KERNEL); 344 ah->iniAddac.ia_columns), GFP_KERNEL);
359 if (ahp->ah_addac5416_21 == NULL) { 345 if (ah->addac5416_21 == NULL) {
360 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 346 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
361 "Cannot allocate ah_addac5416_21\n"); 347 "Cannot allocate addac5416_21\n");
362 *status = -ENOMEM; 348 *status = -ENOMEM;
363 return false; 349 return false;
364 } 350 }
365 351
366 ahp->ah_bank6Temp = 352 ah->bank6Temp =
367 kzalloc((sizeof(u32) * 353 kzalloc((sizeof(u32) *
368 ahp->ah_iniBank6.ia_rows), GFP_KERNEL); 354 ah->iniBank6.ia_rows), GFP_KERNEL);
369 if (ahp->ah_bank6Temp == NULL) { 355 if (ah->bank6Temp == NULL) {
370 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 356 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
371 "Cannot allocate ah_bank6Temp\n"); 357 "Cannot allocate bank6Temp\n");
372 *status = -ENOMEM; 358 *status = -ENOMEM;
373 return false; 359 return false;
374 } 360 }
@@ -378,24 +364,23 @@ bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
378} 364}
379 365
380void 366void
381ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan) 367ath9k_hw_decrease_chain_power(struct ath_hw *ah, struct ath9k_channel *chan)
382{ 368{
383 int i, regWrites = 0; 369 int i, regWrites = 0;
384 struct ath_hal_5416 *ahp = AH5416(ah);
385 u32 bank6SelMask; 370 u32 bank6SelMask;
386 u32 *bank6Temp = ahp->ah_bank6Temp; 371 u32 *bank6Temp = ah->bank6Temp;
387 372
388 switch (ahp->ah_diversityControl) { 373 switch (ah->diversity_control) {
389 case ATH9K_ANT_FIXED_A: 374 case ATH9K_ANT_FIXED_A:
390 bank6SelMask = 375 bank6SelMask =
391 (ahp-> 376 (ah->
392 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 : 377 antenna_switch_swap & ANTSWAP_AB) ? REDUCE_CHAIN_0 :
393 REDUCE_CHAIN_1; 378 REDUCE_CHAIN_1;
394 break; 379 break;
395 case ATH9K_ANT_FIXED_B: 380 case ATH9K_ANT_FIXED_B:
396 bank6SelMask = 381 bank6SelMask =
397 (ahp-> 382 (ah->
398 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 : 383 antenna_switch_swap & ANTSWAP_AB) ? REDUCE_CHAIN_1 :
399 REDUCE_CHAIN_0; 384 REDUCE_CHAIN_0;
400 break; 385 break;
401 case ATH9K_ANT_VARIABLE: 386 case ATH9K_ANT_VARIABLE:
@@ -406,8 +391,8 @@ ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
406 break; 391 break;
407 } 392 }
408 393
409 for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++) 394 for (i = 0; i < ah->iniBank6.ia_rows; i++)
410 bank6Temp[i] = ahp->ah_analogBank6Data[i]; 395 bank6Temp[i] = ah->analogBank6Data[i];
411 396
412 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask); 397 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
413 398
@@ -421,7 +406,7 @@ ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
421 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0); 406 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
422 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0); 407 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
423 408
424 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites); 409 REG_WRITE_RF_ARRAY(&ah->iniBank6, bank6Temp, regWrites);
425 410
426 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053); 411 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
427#ifdef ALTER_SWITCH 412#ifdef ALTER_SWITCH
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 3a406a5c059..837a598a7ae 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -17,19 +17,19 @@
17#ifndef PHY_H 17#ifndef PHY_H
18#define PHY_H 18#define PHY_H
19 19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah, 20bool ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
21 struct ath9k_channel 21 struct ath9k_channel
22 *chan); 22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah, 23bool ath9k_hw_set_channel(struct ath_hw *ah,
24 struct ath9k_channel *chan); 24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, 25void ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites); 26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah, 27bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
28 struct ath9k_channel *chan, 28 struct ath9k_channel *chan,
29 u16 modesIndex); 29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah, 30void ath9k_hw_decrease_chain_power(struct ath_hw *ah,
31 struct ath9k_channel *chan); 31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah, 32bool ath9k_hw_init_rf(struct ath_hw *ah,
33 int *status); 33 int *status);
34 34
35#define AR_PHY_BASE 0x9800 35#define AR_PHY_BASE 0x9800
@@ -533,7 +533,7 @@ bool ath9k_hw_init_rf(struct ath_hal *ah,
533#define ATH9K_KEY_XOR 0xaa 533#define ATH9K_KEY_XOR 0xaa
534 534
535#define ATH9K_IS_MIC_ENABLED(ah) \ 535#define ATH9K_IS_MIC_ENABLED(ah) \
536 (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE) 536 ((ah)->sta_id1_defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
537 537
538#define ANTSWAP_AB 0x0001 538#define ANTSWAP_AB 0x0001
539#define REDUCE_CHAIN_0 0x00000050 539#define REDUCE_CHAIN_0 0x00000050
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 1b71b934bb5..a4e86319176 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -15,16 +15,15 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#include "core.h" 18#include "ath9k.h"
19 19
20static struct ath_rate_table ar5416_11na_ratetable = { 20static struct ath_rate_table ar5416_11na_ratetable = {
21 42, 21 42,
22 {0},
23 { 22 {
24 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 23 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
25 5400, 0x0b, 0x00, 12, 24 5400, 0x0b, 0x00, 12,
26 0, 2, 1, 0, 0, 0, 0, 0 }, 25 0, 2, 1, 0, 0, 0, 0, 0 },
27 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 26 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
28 7800, 0x0f, 0x00, 18, 27 7800, 0x0f, 0x00, 18,
29 0, 3, 1, 1, 1, 1, 1, 0 }, 28 0, 3, 1, 1, 1, 1, 1, 0 },
30 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 29 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
@@ -158,7 +157,6 @@ static struct ath_rate_table ar5416_11na_ratetable = {
158 157
159static struct ath_rate_table ar5416_11ng_ratetable = { 158static struct ath_rate_table ar5416_11ng_ratetable = {
160 46, 159 46,
161 {0},
162 { 160 {
163 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 161 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
164 900, 0x1b, 0x00, 2, 162 900, 0x1b, 0x00, 2,
@@ -306,7 +304,6 @@ static struct ath_rate_table ar5416_11ng_ratetable = {
306 304
307static struct ath_rate_table ar5416_11a_ratetable = { 305static struct ath_rate_table ar5416_11a_ratetable = {
308 8, 306 8,
309 {0},
310 { 307 {
311 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 308 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
312 5400, 0x0b, 0x00, (0x80|12), 309 5400, 0x0b, 0x00, (0x80|12),
@@ -340,7 +337,6 @@ static struct ath_rate_table ar5416_11a_ratetable = {
340 337
341static struct ath_rate_table ar5416_11g_ratetable = { 338static struct ath_rate_table ar5416_11g_ratetable = {
342 12, 339 12,
343 {0},
344 { 340 {
345 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 341 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
346 900, 0x1b, 0x00, 2, 342 900, 0x1b, 0x00, 2,
@@ -386,7 +382,6 @@ static struct ath_rate_table ar5416_11g_ratetable = {
386 382
387static struct ath_rate_table ar5416_11b_ratetable = { 383static struct ath_rate_table ar5416_11b_ratetable = {
388 4, 384 4,
389 {0},
390 { 385 {
391 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 386 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
392 900, 0x1b, 0x00, (0x80|2), 387 900, 0x1b, 0x00, (0x80|2),
@@ -636,8 +631,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
636static u8 ath_rc_ratefind_ht(struct ath_softc *sc, 631static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
637 struct ath_rate_priv *ath_rc_priv, 632 struct ath_rate_priv *ath_rc_priv,
638 struct ath_rate_table *rate_table, 633 struct ath_rate_table *rate_table,
639 int probe_allowed, int *is_probing, 634 int *is_probing)
640 int is_retry)
641{ 635{
642 u32 dt, best_thruput, this_thruput, now_msec; 636 u32 dt, best_thruput, this_thruput, now_msec;
643 u8 rate, next_rate, best_rate, maxindex, minindex; 637 u8 rate, next_rate, best_rate, maxindex, minindex;
@@ -719,13 +713,6 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
719 } 713 }
720 714
721 rate = best_rate; 715 rate = best_rate;
722
723 /* if we are retrying for more than half the number
724 * of max retries, use the min rate for the next retry
725 */
726 if (is_retry)
727 rate = ath_rc_priv->valid_rate_index[minindex];
728
729 ath_rc_priv->rssi_last_lookup = rssi_last; 716 ath_rc_priv->rssi_last_lookup = rssi_last;
730 717
731 /* 718 /*
@@ -733,13 +720,12 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
733 * non-monoticity of 11g's rate table 720 * non-monoticity of 11g's rate table
734 */ 721 */
735 722
736 if (rate >= ath_rc_priv->rate_max_phy && probe_allowed) { 723 if (rate >= ath_rc_priv->rate_max_phy) {
737 rate = ath_rc_priv->rate_max_phy; 724 rate = ath_rc_priv->rate_max_phy;
738 725
739 /* Probe the next allowed phy state */ 726 /* Probe the next allowed phy state */
740 /* FIXME:XXXX Check to make sure ratMax is checked properly */
741 if (ath_rc_get_nextvalid_txrate(rate_table, 727 if (ath_rc_get_nextvalid_txrate(rate_table,
742 ath_rc_priv, rate, &next_rate) && 728 ath_rc_priv, rate, &next_rate) &&
743 (now_msec - ath_rc_priv->probe_time > 729 (now_msec - ath_rc_priv->probe_time >
744 rate_table->probe_interval) && 730 rate_table->probe_interval) &&
745 (ath_rc_priv->hw_maxretry_pktcnt >= 1)) { 731 (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
@@ -761,14 +747,17 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
761 return rate; 747 return rate;
762} 748}
763 749
764static void ath_rc_rate_set_series(struct ath_rate_table *rate_table , 750static void ath_rc_rate_set_series(struct ath_rate_table *rate_table,
765 struct ieee80211_tx_rate *rate, 751 struct ieee80211_tx_rate *rate,
752 struct ieee80211_tx_rate_control *txrc,
766 u8 tries, u8 rix, int rtsctsenable) 753 u8 tries, u8 rix, int rtsctsenable)
767{ 754{
768 rate->count = tries; 755 rate->count = tries;
769 rate->idx = rix; 756 rate->idx = rix;
770 757
771 if (rtsctsenable) 758 if (txrc->short_preamble)
759 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
760 if (txrc->rts || rtsctsenable)
772 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 761 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
773 if (WLAN_RC_PHY_40(rate_table->info[rix].phy)) 762 if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
774 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 763 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
@@ -778,6 +767,43 @@ static void ath_rc_rate_set_series(struct ath_rate_table *rate_table ,
778 rate->flags |= IEEE80211_TX_RC_MCS; 767 rate->flags |= IEEE80211_TX_RC_MCS;
779} 768}
780 769
770static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
771 struct ath_rate_table *rate_table,
772 struct ieee80211_tx_info *tx_info)
773{
774 struct ieee80211_tx_rate *rates = tx_info->control.rates;
775 int i = 0, rix = 0, cix, enable_g_protection = 0;
776
777 /* get the cix for the lowest valid rix */
778 for (i = 3; i >= 0; i--) {
779 if (rates[i].count && (rates[i].idx >= 0)) {
780 rix = rates[i].idx;
781 break;
782 }
783 }
784 cix = rate_table->info[rix].ctrl_rate;
785
786 /* All protection frames are transmited at 2Mb/s for 802.11g,
787 * otherwise we transmit them at 1Mb/s */
788 if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
789 !conf_is_ht(&sc->hw->conf))
790 enable_g_protection = 1;
791
792 /*
793 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
794 * just CTS. Note that this is only done for OFDM/HT unicast frames.
795 */
796 if ((sc->sc_flags & SC_OP_PROTECT_ENABLE) &&
797 !(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) &&
798 (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
799 WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
800 rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
801 cix = rate_table->info[enable_g_protection].ctrl_rate;
802 }
803
804 tx_info->control.rts_cts_rate_idx = cix;
805}
806
781static u8 ath_rc_rate_getidx(struct ath_softc *sc, 807static u8 ath_rc_rate_getidx(struct ath_softc *sc,
782 struct ath_rate_priv *ath_rc_priv, 808 struct ath_rate_priv *ath_rc_priv,
783 struct ath_rate_table *rate_table, 809 struct ath_rate_table *rate_table,
@@ -809,54 +835,56 @@ static u8 ath_rc_rate_getidx(struct ath_softc *sc,
809 835
810static void ath_rc_ratefind(struct ath_softc *sc, 836static void ath_rc_ratefind(struct ath_softc *sc,
811 struct ath_rate_priv *ath_rc_priv, 837 struct ath_rate_priv *ath_rc_priv,
812 int num_tries, int num_rates, 838 struct ieee80211_tx_rate_control *txrc)
813 struct ieee80211_tx_info *tx_info, int *is_probe,
814 int is_retry)
815{ 839{
816 u8 try_per_rate = 0, i = 0, rix, nrix;
817 struct ath_rate_table *rate_table; 840 struct ath_rate_table *rate_table;
841 struct sk_buff *skb = txrc->skb;
842 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
818 struct ieee80211_tx_rate *rates = tx_info->control.rates; 843 struct ieee80211_tx_rate *rates = tx_info->control.rates;
844 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
845 __le16 fc = hdr->frame_control;
846 u8 try_per_rate = 0, i = 0, rix, nrix;
847 int is_probe = 0;
819 848
820 rate_table = sc->cur_rate_table; 849 rate_table = sc->cur_rate_table;
821 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, 1, 850 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, &is_probe);
822 is_probe, is_retry);
823 nrix = rix; 851 nrix = rix;
824 852
825 if (*is_probe) { 853 if (is_probe) {
826 /* set one try for probe rates. For the 854 /* set one try for probe rates. For the
827 * probes don't enable rts */ 855 * probes don't enable rts */
828 ath_rc_rate_set_series(rate_table, 856 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
829 &rates[i++], 1, nrix, 0); 857 1, nrix, 0);
830 858
831 try_per_rate = (num_tries/num_rates); 859 try_per_rate = (ATH_11N_TXMAXTRY/4);
832 /* Get the next tried/allowed rate. No RTS for the next series 860 /* Get the next tried/allowed rate. No RTS for the next series
833 * after the probe rate 861 * after the probe rate
834 */ 862 */
835 nrix = ath_rc_rate_getidx(sc, 863 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
836 ath_rc_priv, rate_table, nrix, 1, 0); 864 rate_table, nrix, 1, 0);
837 ath_rc_rate_set_series(rate_table, 865 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
838 &rates[i++], try_per_rate, nrix, 0); 866 try_per_rate, nrix, 0);
839 } else { 867 } else {
840 try_per_rate = (num_tries/num_rates); 868 try_per_rate = (ATH_11N_TXMAXTRY/4);
841 /* Set the choosen rate. No RTS for first series entry. */ 869 /* Set the choosen rate. No RTS for first series entry. */
842 ath_rc_rate_set_series(rate_table, 870 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
843 &rates[i++], try_per_rate, nrix, 0); 871 try_per_rate, nrix, 0);
844 } 872 }
845 873
846 /* Fill in the other rates for multirate retry */ 874 /* Fill in the other rates for multirate retry */
847 for ( ; i < num_rates; i++) { 875 for ( ; i < 4; i++) {
848 u8 try_num; 876 u8 try_num;
849 u8 min_rate; 877 u8 min_rate;
850 878
851 try_num = ((i + 1) == num_rates) ? 879 try_num = ((i + 1) == 4) ?
852 num_tries - (try_per_rate * i) : try_per_rate ; 880 ATH_11N_TXMAXTRY - (try_per_rate * i) : try_per_rate ;
853 min_rate = (((i + 1) == num_rates) && 0); 881 min_rate = (((i + 1) == 4) && 0);
854 882
855 nrix = ath_rc_rate_getidx(sc, ath_rc_priv, 883 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
856 rate_table, nrix, 1, min_rate); 884 rate_table, nrix, 1, min_rate);
857 /* All other rates in the series have RTS enabled */ 885 /* All other rates in the series have RTS enabled */
858 ath_rc_rate_set_series(rate_table, 886 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
859 &rates[i], try_num, nrix, 1); 887 try_num, nrix, 1);
860 } 888 }
861 889
862 /* 890 /*
@@ -875,7 +903,7 @@ static void ath_rc_ratefind(struct ath_softc *sc,
875 * above conditions. 903 * above conditions.
876 */ 904 */
877 if ((sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ) && 905 if ((sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ) &&
878 (sc->hw->conf.ht.enabled)) { 906 (conf_is_ht(&sc->hw->conf))) {
879 u8 dot11rate = rate_table->info[rix].dot11rate; 907 u8 dot11rate = rate_table->info[rix].dot11rate;
880 u8 phy = rate_table->info[rix].phy; 908 u8 phy = rate_table->info[rix].phy;
881 if (i == 4 && 909 if (i == 4 &&
@@ -885,6 +913,24 @@ static void ath_rc_ratefind(struct ath_softc *sc,
885 rates[3].flags = rates[2].flags; 913 rates[3].flags = rates[2].flags;
886 } 914 }
887 } 915 }
916
917 /*
918 * Force hardware to use computed duration for next
919 * fragment by disabling multi-rate retry, which
920 * updates duration based on the multi-rate duration table.
921 *
922 * FIXME: Fix duration
923 */
924 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) &&
925 (ieee80211_has_morefrags(fc) ||
926 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG))) {
927 rates[1].count = rates[2].count = rates[3].count = 0;
928 rates[1].idx = rates[2].idx = rates[3].idx = 0;
929 rates[0].count = ATH_TXMAXTRY;
930 }
931
932 /* Setup RTS/CTS */
933 ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
888} 934}
889 935
890static bool ath_rc_update_per(struct ath_softc *sc, 936static bool ath_rc_update_per(struct ath_softc *sc,
@@ -1221,6 +1267,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1221 ath_rc_priv->per_down_time = now_msec; 1267 ath_rc_priv->per_down_time = now_msec;
1222 } 1268 }
1223 1269
1270 ath_debug_stat_retries(sc, tx_rate, xretries, retries);
1271
1224#undef CHK_RSSI 1272#undef CHK_RSSI
1225} 1273}
1226 1274
@@ -1346,13 +1394,13 @@ static void ath_rc_init(struct ath_softc *sc,
1346 u8 i, j, k, hi = 0, hthi = 0; 1394 u8 i, j, k, hi = 0, hthi = 0;
1347 1395
1348 /* FIXME: Adhoc */ 1396 /* FIXME: Adhoc */
1349 if ((sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) || 1397 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) ||
1350 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)) { 1398 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
1351 bool is_cw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; 1399 bool is_cw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1352 rate_table = ath_choose_rate_table(sc, sband->band, 1400 rate_table = ath_choose_rate_table(sc, sband->band,
1353 sta->ht_cap.ht_supported, 1401 sta->ht_cap.ht_supported,
1354 is_cw_40); 1402 is_cw_40);
1355 } else if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { 1403 } else if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
1356 /* cur_rate_table would be set on init through config() */ 1404 /* cur_rate_table would be set on init through config() */
1357 rate_table = sc->cur_rate_table; 1405 rate_table = sc->cur_rate_table;
1358 } 1406 }
@@ -1363,9 +1411,13 @@ static void ath_rc_init(struct ath_softc *sc,
1363 } 1411 }
1364 1412
1365 if (sta->ht_cap.ht_supported) { 1413 if (sta->ht_cap.ht_supported) {
1366 ath_rc_priv->ht_cap = (WLAN_RC_HT_FLAG | WLAN_RC_DS_FLAG); 1414 ath_rc_priv->ht_cap = WLAN_RC_HT_FLAG;
1415 if (sc->sc_ah->caps.tx_chainmask != 1)
1416 ath_rc_priv->ht_cap |= WLAN_RC_DS_FLAG;
1367 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 1417 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1368 ath_rc_priv->ht_cap |= WLAN_RC_40_FLAG; 1418 ath_rc_priv->ht_cap |= WLAN_RC_40_FLAG;
1419 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1420 ath_rc_priv->ht_cap |= WLAN_RC_SGI_FLAG;
1369 } 1421 }
1370 1422
1371 /* Initial rate table size. Will change depending 1423 /* Initial rate table size. Will change depending
@@ -1395,16 +1447,16 @@ static void ath_rc_init(struct ath_softc *sc,
1395 if (!rateset->rs_nrates) { 1447 if (!rateset->rs_nrates) {
1396 /* No working rate, just initialize valid rates */ 1448 /* No working rate, just initialize valid rates */
1397 hi = ath_rc_init_validrates(ath_rc_priv, rate_table, 1449 hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
1398 ath_rc_priv->ht_cap); 1450 ath_rc_priv->ht_cap);
1399 } else { 1451 } else {
1400 /* Use intersection of working rates and valid rates */ 1452 /* Use intersection of working rates and valid rates */
1401 hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table, 1453 hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
1402 rateset, ath_rc_priv->ht_cap); 1454 rateset, ath_rc_priv->ht_cap);
1403 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) { 1455 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
1404 hthi = ath_rc_setvalid_htrates(ath_rc_priv, 1456 hthi = ath_rc_setvalid_htrates(ath_rc_priv,
1405 rate_table, 1457 rate_table,
1406 ht_mcs, 1458 ht_mcs,
1407 ath_rc_priv->ht_cap); 1459 ath_rc_priv->ht_cap);
1408 } 1460 }
1409 hi = A_MAX(hi, hthi); 1461 hi = A_MAX(hi, hthi);
1410 } 1462 }
@@ -1467,7 +1519,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1467 */ 1519 */
1468 if (tx_info_priv->tx.ts_flags & 1520 if (tx_info_priv->tx.ts_flags &
1469 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) && 1521 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1470 ((sc->sc_ah->ah_txTrigLevel) >= ath_rc_priv->tx_triglevel_max)) { 1522 ((sc->sc_ah->tx_trig_level) >= ath_rc_priv->tx_triglevel_max)) {
1471 tx_status = 1; 1523 tx_status = 1;
1472 is_underrun = 1; 1524 is_underrun = 1;
1473 } 1525 }
@@ -1480,6 +1532,22 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1480 (is_underrun) ? ATH_11N_TXMAXTRY : 1532 (is_underrun) ? ATH_11N_TXMAXTRY :
1481 tx_info_priv->tx.ts_longretry); 1533 tx_info_priv->tx.ts_longretry);
1482 1534
1535 /* Check if aggregation has to be enabled for this tid */
1536 if (conf_is_ht(&sc->hw->conf)) {
1537 if (ieee80211_is_data_qos(fc)) {
1538 u8 *qc, tid;
1539 struct ath_node *an;
1540
1541 qc = ieee80211_get_qos_ctl(hdr);
1542 tid = qc[0] & 0xf;
1543 an = (struct ath_node *)sta->drv_priv;
1544
1545 if(ath_tx_aggr_check(sc, an, tid))
1546 ieee80211_start_tx_ba_session(sc->hw, hdr->addr1, tid);
1547 }
1548 }
1549
1550 ath_debug_stat_rc(sc, skb);
1483exit: 1551exit:
1484 kfree(tx_info_priv); 1552 kfree(tx_info_priv);
1485} 1553}
@@ -1490,11 +1558,9 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
1490 struct ieee80211_supported_band *sband = txrc->sband; 1558 struct ieee80211_supported_band *sband = txrc->sband;
1491 struct sk_buff *skb = txrc->skb; 1559 struct sk_buff *skb = txrc->skb;
1492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1560 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1561 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1493 struct ath_softc *sc = priv; 1562 struct ath_softc *sc = priv;
1494 struct ieee80211_hw *hw = sc->hw;
1495 struct ath_rate_priv *ath_rc_priv = priv_sta; 1563 struct ath_rate_priv *ath_rc_priv = priv_sta;
1496 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1497 int is_probe = 0;
1498 __le16 fc = hdr->frame_control; 1564 __le16 fc = hdr->frame_control;
1499 1565
1500 /* lowest rate for management and multicast/broadcast frames */ 1566 /* lowest rate for management and multicast/broadcast frames */
@@ -1507,23 +1573,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
1507 } 1573 }
1508 1574
1509 /* Find tx rate for unicast frames */ 1575 /* Find tx rate for unicast frames */
1510 ath_rc_ratefind(sc, ath_rc_priv, ATH_11N_TXMAXTRY, 4, 1576 ath_rc_ratefind(sc, ath_rc_priv, txrc);
1511 tx_info, &is_probe, false);
1512
1513 /* Check if aggregation has to be enabled for this tid */
1514 if (hw->conf.ht.enabled) {
1515 if (ieee80211_is_data_qos(fc)) {
1516 u8 *qc, tid;
1517 struct ath_node *an;
1518
1519 qc = ieee80211_get_qos_ctl(hdr);
1520 tid = qc[0] & 0xf;
1521 an = (struct ath_node *)sta->drv_priv;
1522
1523 if(ath_tx_aggr_check(sc, an, tid))
1524 ieee80211_start_tx_ba_session(hw, hdr->addr1, tid);
1525 }
1526 }
1527} 1577}
1528 1578
1529static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1579static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
@@ -1578,7 +1628,7 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
1578 } 1628 }
1579 1629
1580 rate_priv->rssi_down_time = jiffies_to_msecs(jiffies); 1630 rate_priv->rssi_down_time = jiffies_to_msecs(jiffies);
1581 rate_priv->tx_triglevel_max = sc->sc_ah->ah_caps.tx_triglevel_max; 1631 rate_priv->tx_triglevel_max = sc->sc_ah->caps.tx_triglevel_max;
1582 1632
1583 return rate_priv; 1633 return rate_priv;
1584} 1634}
@@ -1607,16 +1657,8 @@ static void ath_setup_rate_table(struct ath_softc *sc,
1607{ 1657{
1608 int i; 1658 int i;
1609 1659
1610 for (i = 0; i < 256; i++)
1611 rate_table->rateCodeToIndex[i] = (u8)-1;
1612
1613 for (i = 0; i < rate_table->rate_cnt; i++) { 1660 for (i = 0; i < rate_table->rate_cnt; i++) {
1614 u8 code = rate_table->info[i].ratecode;
1615 u8 cix = rate_table->info[i].ctrl_rate; 1661 u8 cix = rate_table->info[i].ctrl_rate;
1616 u8 sh = rate_table->info[i].short_preamble;
1617
1618 rate_table->rateCodeToIndex[code] = i;
1619 rate_table->rateCodeToIndex[code | sh] = i;
1620 1662
1621 rate_table->info[i].lpAckDuration = 1663 rate_table->info[i].lpAckDuration =
1622 ath9k_hw_computetxtime(sc->sc_ah, rate_table, 1664 ath9k_hw_computetxtime(sc->sc_ah, rate_table,
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index 97c60d12e8a..d688ec51a14 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -19,13 +19,12 @@
19#ifndef RC_H 19#ifndef RC_H
20#define RC_H 20#define RC_H
21 21
22#include "ath9k.h"
23
24struct ath_softc; 22struct ath_softc;
25 23
26#define ATH_RATE_MAX 30 24#define ATH_RATE_MAX 30
27#define RATE_TABLE_SIZE 64 25#define RATE_TABLE_SIZE 64
28#define MAX_TX_RATE_PHY 48 26#define MAX_TX_RATE_PHY 48
27#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
29 28
30/* VALID_ALL - valid for 20/40/Legacy, 29/* VALID_ALL - valid for 20/40/Legacy,
31 * VALID - Legacy only, 30 * VALID - Legacy only,
@@ -39,6 +38,20 @@ struct ath_softc;
39#define VALID_2040 (VALID_20|VALID_40) 38#define VALID_2040 (VALID_20|VALID_40)
40#define VALID_ALL (VALID_2040|VALID) 39#define VALID_ALL (VALID_2040|VALID)
41 40
41enum {
42 WLAN_RC_PHY_OFDM,
43 WLAN_RC_PHY_CCK,
44 WLAN_RC_PHY_HT_20_SS,
45 WLAN_RC_PHY_HT_20_DS,
46 WLAN_RC_PHY_HT_40_SS,
47 WLAN_RC_PHY_HT_40_DS,
48 WLAN_RC_PHY_HT_20_SS_HGI,
49 WLAN_RC_PHY_HT_20_DS_HGI,
50 WLAN_RC_PHY_HT_40_SS_HGI,
51 WLAN_RC_PHY_HT_40_DS_HGI,
52 WLAN_RC_PHY_MAX
53};
54
42#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ 55#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
43 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 56 || (_phy == WLAN_RC_PHY_HT_40_DS) \
44 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 57 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
@@ -90,7 +103,6 @@ struct ath_softc;
90 */ 103 */
91struct ath_rate_table { 104struct ath_rate_table {
92 int rate_cnt; 105 int rate_cnt;
93 u8 rateCodeToIndex[256];
94 struct { 106 struct {
95 int valid; 107 int valid;
96 int valid_single_stream; 108 int valid_single_stream;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 462e08c3d09..08f676af894 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18 18
19/* 19/*
20 * Setup and link descriptors. 20 * Setup and link descriptors.
@@ -26,7 +26,7 @@
26 */ 26 */
27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
28{ 28{
29 struct ath_hal *ah = sc->sc_ah; 29 struct ath_hw *ah = sc->sc_ah;
30 struct ath_desc *ds; 30 struct ath_desc *ds;
31 struct sk_buff *skb; 31 struct sk_buff *skb;
32 32
@@ -97,11 +97,11 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
97 * Unfortunately this means we may get 8 KB here from the 97 * Unfortunately this means we may get 8 KB here from the
98 * kernel... and that is actually what is observed on some 98 * kernel... and that is actually what is observed on some
99 * systems :( */ 99 * systems :( */
100 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); 100 skb = dev_alloc_skb(len + sc->cachelsz - 1);
101 if (skb != NULL) { 101 if (skb != NULL) {
102 off = ((unsigned long) skb->data) % sc->sc_cachelsz; 102 off = ((unsigned long) skb->data) % sc->cachelsz;
103 if (off != 0) 103 if (off != 0)
104 skb_reserve(skb, sc->sc_cachelsz - off); 104 skb_reserve(skb, sc->cachelsz - off);
105 } else { 105 } else {
106 DPRINTF(sc, ATH_DBG_FATAL, 106 DPRINTF(sc, ATH_DBG_FATAL,
107 "skbuff alloc of size %u failed\n", len); 107 "skbuff alloc of size %u failed\n", len);
@@ -135,7 +135,7 @@ static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
135 * discard the frame. Enable this if you want to see 135 * discard the frame. Enable this if you want to see
136 * error frames in Monitor mode. 136 * error frames in Monitor mode.
137 */ 137 */
138 if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_MONITOR) 138 if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR)
139 goto rx_next; 139 goto rx_next;
140 } else if (ds->ds_rxstat.rs_status != 0) { 140 } else if (ds->ds_rxstat.rs_status != 0) {
141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
@@ -161,7 +161,7 @@ static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
161 * decryption and MIC failures. For monitor mode, 161 * decryption and MIC failures. For monitor mode,
162 * we also ignore the CRC error. 162 * we also ignore the CRC error.
163 */ 163 */
164 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR) { 164 if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) {
165 if (ds->ds_rxstat.rs_status & 165 if (ds->ds_rxstat.rs_status &
166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
167 ATH9K_RXERR_CRC)) 167 ATH9K_RXERR_CRC))
@@ -210,7 +210,7 @@ static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
210 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); 210 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
211 rx_status->band = sc->hw->conf.channel->band; 211 rx_status->band = sc->hw->conf.channel->band;
212 rx_status->freq = sc->hw->conf.channel->center_freq; 212 rx_status->freq = sc->hw->conf.channel->center_freq;
213 rx_status->noise = sc->sc_ani.sc_noise_floor; 213 rx_status->noise = sc->ani.noise_floor;
214 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi; 214 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
215 rx_status->antenna = ds->ds_rxstat.rs_antenna; 215 rx_status->antenna = ds->ds_rxstat.rs_antenna;
216 216
@@ -233,7 +233,7 @@ rx_next:
233 233
234static void ath_opmode_init(struct ath_softc *sc) 234static void ath_opmode_init(struct ath_softc *sc)
235{ 235{
236 struct ath_hal *ah = sc->sc_ah; 236 struct ath_hw *ah = sc->sc_ah;
237 u32 rfilt, mfilt[2]; 237 u32 rfilt, mfilt[2];
238 238
239 /* configure rx filter */ 239 /* configure rx filter */
@@ -241,14 +241,14 @@ static void ath_opmode_init(struct ath_softc *sc)
241 ath9k_hw_setrxfilter(ah, rfilt); 241 ath9k_hw_setrxfilter(ah, rfilt);
242 242
243 /* configure bssid mask */ 243 /* configure bssid mask */
244 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 244 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
245 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); 245 ath9k_hw_setbssidmask(sc);
246 246
247 /* configure operational mode */ 247 /* configure operational mode */
248 ath9k_hw_setopmode(ah); 248 ath9k_hw_setopmode(ah);
249 249
250 /* Handle any link-level address change. */ 250 /* Handle any link-level address change. */
251 ath9k_hw_setmac(ah, sc->sc_myaddr); 251 ath9k_hw_setmac(ah, sc->sc_ah->macaddr);
252 252
253 /* calculate and install multicast filter */ 253 /* calculate and install multicast filter */
254 mfilt[0] = mfilt[1] = ~0; 254 mfilt[0] = mfilt[1] = ~0;
@@ -267,11 +267,11 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
267 spin_lock_init(&sc->rx.rxbuflock); 267 spin_lock_init(&sc->rx.rxbuflock);
268 268
269 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 269 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
270 min(sc->sc_cachelsz, 270 min(sc->cachelsz,
271 (u16)64)); 271 (u16)64));
272 272
273 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 273 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
274 sc->sc_cachelsz, sc->rx.bufsize); 274 sc->cachelsz, sc->rx.bufsize);
275 275
276 /* Initialize rx descriptors */ 276 /* Initialize rx descriptors */
277 277
@@ -291,15 +291,15 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
291 } 291 }
292 292
293 bf->bf_mpdu = skb; 293 bf->bf_mpdu = skb;
294 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, 294 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
295 sc->rx.bufsize, 295 sc->rx.bufsize,
296 PCI_DMA_FROMDEVICE); 296 DMA_FROM_DEVICE);
297 if (unlikely(pci_dma_mapping_error(sc->pdev, 297 if (unlikely(dma_mapping_error(sc->dev,
298 bf->bf_buf_addr))) { 298 bf->bf_buf_addr))) {
299 dev_kfree_skb_any(skb); 299 dev_kfree_skb_any(skb);
300 bf->bf_mpdu = NULL; 300 bf->bf_mpdu = NULL;
301 DPRINTF(sc, ATH_DBG_CONFIG, 301 DPRINTF(sc, ATH_DBG_CONFIG,
302 "pci_dma_mapping_error() on RX init\n"); 302 "dma_mapping_error() on RX init\n");
303 error = -ENOMEM; 303 error = -ENOMEM;
304 break; 304 break;
305 } 305 }
@@ -360,25 +360,28 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
360 | ATH9K_RX_FILTER_MCAST; 360 | ATH9K_RX_FILTER_MCAST;
361 361
362 /* If not a STA, enable processing of Probe Requests */ 362 /* If not a STA, enable processing of Probe Requests */
363 if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_STATION) 363 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
364 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 364 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
365 365
366 /* Can't set HOSTAP into promiscous mode */ 366 /* Can't set HOSTAP into promiscous mode */
367 if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && 367 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
368 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 368 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
369 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { 369 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) {
370 rfilt |= ATH9K_RX_FILTER_PROM; 370 rfilt |= ATH9K_RX_FILTER_PROM;
371 /* ??? To prevent from sending ACK */ 371 /* ??? To prevent from sending ACK */
372 rfilt &= ~ATH9K_RX_FILTER_UCAST; 372 rfilt &= ~ATH9K_RX_FILTER_UCAST;
373 } 373 }
374 374
375 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION || 375 if (sc->rx.rxfilter & FIF_CONTROL)
376 sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) 376 rfilt |= ATH9K_RX_FILTER_CONTROL;
377
378 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION ||
379 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
377 rfilt |= ATH9K_RX_FILTER_BEACON; 380 rfilt |= ATH9K_RX_FILTER_BEACON;
378 381
379 /* If in HOSTAP mode, want to enable reception of PSPOLL frames 382 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
380 & beacon frames */ 383 & beacon frames */
381 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) 384 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP)
382 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); 385 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
383 386
384 return rfilt; 387 return rfilt;
@@ -388,7 +391,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
388 391
389int ath_startrecv(struct ath_softc *sc) 392int ath_startrecv(struct ath_softc *sc)
390{ 393{
391 struct ath_hal *ah = sc->sc_ah; 394 struct ath_hw *ah = sc->sc_ah;
392 struct ath_buf *bf, *tbf; 395 struct ath_buf *bf, *tbf;
393 396
394 spin_lock_bh(&sc->rx.rxbuflock); 397 spin_lock_bh(&sc->rx.rxbuflock);
@@ -418,7 +421,7 @@ start_recv:
418 421
419bool ath_stoprecv(struct ath_softc *sc) 422bool ath_stoprecv(struct ath_softc *sc)
420{ 423{
421 struct ath_hal *ah = sc->sc_ah; 424 struct ath_hw *ah = sc->sc_ah;
422 bool stopped; 425 bool stopped;
423 426
424 ath9k_hw_stoppcurecv(ah); 427 ath9k_hw_stoppcurecv(ah);
@@ -449,7 +452,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
449 struct ath_desc *ds; 452 struct ath_desc *ds;
450 struct sk_buff *skb = NULL, *requeue_skb; 453 struct sk_buff *skb = NULL, *requeue_skb;
451 struct ieee80211_rx_status rx_status; 454 struct ieee80211_rx_status rx_status;
452 struct ath_hal *ah = sc->sc_ah; 455 struct ath_hw *ah = sc->sc_ah;
453 struct ieee80211_hdr *hdr; 456 struct ieee80211_hdr *hdr;
454 int hdrlen, padsize, retval; 457 int hdrlen, padsize, retval;
455 bool decrypt_error = false; 458 bool decrypt_error = false;
@@ -524,9 +527,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
524 * 1. accessing the frame 527 * 1. accessing the frame
525 * 2. requeueing the same buffer to h/w 528 * 2. requeueing the same buffer to h/w
526 */ 529 */
527 pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr, 530 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
528 sc->rx.bufsize, 531 sc->rx.bufsize,
529 PCI_DMA_FROMDEVICE); 532 DMA_FROM_DEVICE);
530 533
531 /* 534 /*
532 * If we're asked to flush receive queue, directly 535 * If we're asked to flush receive queue, directly
@@ -557,9 +560,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
557 goto requeue; 560 goto requeue;
558 561
559 /* Unmap the frame */ 562 /* Unmap the frame */
560 pci_unmap_single(sc->pdev, bf->bf_buf_addr, 563 dma_unmap_single(sc->dev, bf->bf_buf_addr,
561 sc->rx.bufsize, 564 sc->rx.bufsize,
562 PCI_DMA_FROMDEVICE); 565 DMA_FROM_DEVICE);
563 566
564 skb_put(skb, ds->ds_rxstat.rs_datalen); 567 skb_put(skb, ds->ds_rxstat.rs_datalen);
565 skb->protocol = cpu_to_be16(ETH_P_CONTROL); 568 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
@@ -590,24 +593,30 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
590 && !decrypt_error && skb->len >= hdrlen + 4) { 593 && !decrypt_error && skb->len >= hdrlen + 4) {
591 keyix = skb->data[hdrlen + 3] >> 6; 594 keyix = skb->data[hdrlen + 3] >> 6;
592 595
593 if (test_bit(keyix, sc->sc_keymap)) 596 if (test_bit(keyix, sc->keymap))
594 rx_status.flag |= RX_FLAG_DECRYPTED; 597 rx_status.flag |= RX_FLAG_DECRYPTED;
595 } 598 }
599 if (ah->sw_mgmt_crypto &&
600 (rx_status.flag & RX_FLAG_DECRYPTED) &&
601 ieee80211_is_mgmt(hdr->frame_control)) {
602 /* Use software decrypt for management frames. */
603 rx_status.flag &= ~RX_FLAG_DECRYPTED;
604 }
596 605
597 /* Send the frame to mac80211 */ 606 /* Send the frame to mac80211 */
598 __ieee80211_rx(sc->hw, skb, &rx_status); 607 __ieee80211_rx(sc->hw, skb, &rx_status);
599 608
600 /* We will now give hardware our shiny new allocated skb */ 609 /* We will now give hardware our shiny new allocated skb */
601 bf->bf_mpdu = requeue_skb; 610 bf->bf_mpdu = requeue_skb;
602 bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, 611 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
603 sc->rx.bufsize, 612 sc->rx.bufsize,
604 PCI_DMA_FROMDEVICE); 613 DMA_FROM_DEVICE);
605 if (unlikely(pci_dma_mapping_error(sc->pdev, 614 if (unlikely(dma_mapping_error(sc->dev,
606 bf->bf_buf_addr))) { 615 bf->bf_buf_addr))) {
607 dev_kfree_skb_any(requeue_skb); 616 dev_kfree_skb_any(requeue_skb);
608 bf->bf_mpdu = NULL; 617 bf->bf_mpdu = NULL;
609 DPRINTF(sc, ATH_DBG_CONFIG, 618 DPRINTF(sc, ATH_DBG_CONFIG,
610 "pci_dma_mapping_error() on RX\n"); 619 "dma_mapping_error() on RX\n");
611 break; 620 break;
612 } 621 }
613 bf->bf_dmacontext = bf->bf_buf_addr; 622 bf->bf_dmacontext = bf->bf_buf_addr;
@@ -622,6 +631,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
622 } else { 631 } else {
623 sc->rx.rxotherant = 0; 632 sc->rx.rxotherant = 0;
624 } 633 }
634
635 if (ieee80211_is_beacon(hdr->frame_control) &&
636 (sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) {
637 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
638 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
639 }
625requeue: 640requeue:
626 list_move_tail(&bf->list, &sc->rx.rxbuf); 641 list_move_tail(&bf->list, &sc->rx.rxbuf);
627 ath_rx_buf_link(sc, bf); 642 ath_rx_buf_link(sc, bf);
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index 9fedb4911bc..17ed190349a 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -160,6 +160,7 @@
160 160
161#define AR_SREV_VERSION_9100 0x014 161#define AR_SREV_VERSION_9100 0x014
162 162
163#define AR_SREV_9100(ah) ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100)
163#define AR_SREV_5416_V20_OR_LATER(_ah) \ 164#define AR_SREV_5416_V20_OR_LATER(_ah) \
164 (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah)) 165 (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah))
165#define AR_SREV_5416_V22_OR_LATER(_ah) \ 166#define AR_SREV_5416_V22_OR_LATER(_ah) \
@@ -746,44 +747,50 @@
746#define AR_SREV_REVISION_9285_12 2 747#define AR_SREV_REVISION_9285_12 2
747 748
748#define AR_SREV_9100_OR_LATER(_ah) \ 749#define AR_SREV_9100_OR_LATER(_ah) \
749 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE)) 750 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_5416_PCIE))
750#define AR_SREV_5416_20_OR_LATER(_ah) \ 751#define AR_SREV_5416_20_OR_LATER(_ah) \
751 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \ 752 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9160) || \
752 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20)) 753 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_20))
753#define AR_SREV_5416_22_OR_LATER(_ah) \ 754#define AR_SREV_5416_22_OR_LATER(_ah) \
754 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \ 755 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9160) || \
755 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22)) 756 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22))
756#define AR_SREV_9160(_ah) \ 757#define AR_SREV_9160(_ah) \
757 (((_ah)->ah_macVersion == AR_SREV_VERSION_9160)) 758 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9160))
758#define AR_SREV_9160_10_OR_LATER(_ah) \ 759#define AR_SREV_9160_10_OR_LATER(_ah) \
759 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160)) 760 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9160))
760#define AR_SREV_9160_11(_ah) \ 761#define AR_SREV_9160_11(_ah) \
761 (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11)) 762 (AR_SREV_9160(_ah) && \
763 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9160_11))
762#define AR_SREV_9280(_ah) \ 764#define AR_SREV_9280(_ah) \
763 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280)) 765 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280))
764#define AR_SREV_9280_10_OR_LATER(_ah) \ 766#define AR_SREV_9280_10_OR_LATER(_ah) \
765 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280)) 767 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9280))
766#define AR_SREV_9280_20(_ah) \ 768#define AR_SREV_9280_20(_ah) \
767 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \ 769 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \
768 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)) 770 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20))
769#define AR_SREV_9280_20_OR_LATER(_ah) \ 771#define AR_SREV_9280_20_OR_LATER(_ah) \
770 (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \ 772 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9280) || \
771 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \ 773 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \
772 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))) 774 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20)))
773 775
774#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285)) 776#define AR_SREV_9285(_ah) \
777 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9285))
775#define AR_SREV_9285_10_OR_LATER(_ah) \ 778#define AR_SREV_9285_10_OR_LATER(_ah) \
776 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285)) 779 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9285))
777#define AR_SREV_9285_11(_ah) \ 780#define AR_SREV_9285_11(_ah) \
778 (AR_SREV_9280(ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9285_11)) 781 (AR_SREV_9280(ah) && \
782 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_11))
779#define AR_SREV_9285_11_OR_LATER(_ah) \ 783#define AR_SREV_9285_11_OR_LATER(_ah) \
780 (((_ah)->ah_macVersion > AR_SREV_VERSION_9285) || \ 784 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \
781 (AR_SREV_9285(ah) && ((_ah)->ah_macRev >= AR_SREV_REVISION_9285_11))) 785 (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \
786 AR_SREV_REVISION_9285_11)))
782#define AR_SREV_9285_12(_ah) \ 787#define AR_SREV_9285_12(_ah) \
783 (AR_SREV_9280(ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9285_12)) 788 (AR_SREV_9280(ah) && \
789 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_12))
784#define AR_SREV_9285_12_OR_LATER(_ah) \ 790#define AR_SREV_9285_12_OR_LATER(_ah) \
785 (((_ah)->ah_macVersion > AR_SREV_VERSION_9285) || \ 791 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \
786 (AR_SREV_9285(ah) && ((_ah)->ah_macRev >= AR_SREV_REVISION_9285_12))) 792 (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \
793 AR_SREV_REVISION_9285_12)))
787 794
788#define AR_RADIO_SREV_MAJOR 0xf0 795#define AR_RADIO_SREV_MAJOR 0xf0
789#define AR_RAD5133_SREV_MAJOR 0xc0 796#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -875,12 +882,15 @@ enum {
875 882
876#define AR_NUM_GPIO 14 883#define AR_NUM_GPIO 14
877#define AR928X_NUM_GPIO 10 884#define AR928X_NUM_GPIO 10
885#define AR9285_NUM_GPIO 12
878 886
879#define AR_GPIO_IN_OUT 0x4048 887#define AR_GPIO_IN_OUT 0x4048
880#define AR_GPIO_IN_VAL 0x0FFFC000 888#define AR_GPIO_IN_VAL 0x0FFFC000
881#define AR_GPIO_IN_VAL_S 14 889#define AR_GPIO_IN_VAL_S 14
882#define AR928X_GPIO_IN_VAL 0x000FFC00 890#define AR928X_GPIO_IN_VAL 0x000FFC00
883#define AR928X_GPIO_IN_VAL_S 10 891#define AR928X_GPIO_IN_VAL_S 10
892#define AR9285_GPIO_IN_VAL 0x00FFF000
893#define AR9285_GPIO_IN_VAL_S 12
884 894
885#define AR_GPIO_OE_OUT 0x404c 895#define AR_GPIO_OE_OUT 0x404c
886#define AR_GPIO_OE_OUT_DRV 0x3 896#define AR_GPIO_OE_OUT_DRV 0x3
@@ -894,14 +904,24 @@ enum {
894#define AR_GPIO_INTR_POL_VAL_S 0 904#define AR_GPIO_INTR_POL_VAL_S 0
895 905
896#define AR_GPIO_INPUT_EN_VAL 0x4054 906#define AR_GPIO_INPUT_EN_VAL 0x4054
907#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
908#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
909#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
910#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_S 3
911#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_DEF 0x00000010
912#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
897#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 913#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
898#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 914#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
915#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
916#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
899#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 917#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
900#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 918#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
901#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 919#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
902#define AR_GPIO_JTAG_DISABLE 0x00020000 920#define AR_GPIO_JTAG_DISABLE 0x00020000
903 921
904#define AR_GPIO_INPUT_MUX1 0x4058 922#define AR_GPIO_INPUT_MUX1 0x4058
923#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
924#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
905 925
906#define AR_GPIO_INPUT_MUX2 0x405c 926#define AR_GPIO_INPUT_MUX2 0x405c
907#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f 927#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
@@ -940,7 +960,7 @@ enum {
940 960
941#define AR_RTC_BASE 0x00020000 961#define AR_RTC_BASE 0x00020000
942#define AR_RTC_RC \ 962#define AR_RTC_RC \
943 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000 963 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
944#define AR_RTC_RC_M 0x00000003 964#define AR_RTC_RC_M 0x00000003
945#define AR_RTC_RC_MAC_WARM 0x00000001 965#define AR_RTC_RC_MAC_WARM 0x00000001
946#define AR_RTC_RC_MAC_COLD 0x00000002 966#define AR_RTC_RC_MAC_COLD 0x00000002
@@ -948,7 +968,7 @@ enum {
948#define AR_RTC_RC_WARM_RESET 0x00000008 968#define AR_RTC_RC_WARM_RESET 0x00000008
949 969
950#define AR_RTC_PLL_CONTROL \ 970#define AR_RTC_PLL_CONTROL \
951 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014 971 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
952 972
953#define AR_RTC_PLL_DIV 0x0000001f 973#define AR_RTC_PLL_DIV 0x0000001f
954#define AR_RTC_PLL_DIV_S 0 974#define AR_RTC_PLL_DIV_S 0
@@ -1021,6 +1041,10 @@ enum {
1021#define AR_AN_RF5G1_CH1_DB5 0x00380000 1041#define AR_AN_RF5G1_CH1_DB5 0x00380000
1022#define AR_AN_RF5G1_CH1_DB5_S 19 1042#define AR_AN_RF5G1_CH1_DB5_S 19
1023 1043
1044#define AR_AN_TOP1 0x7890
1045#define AR_AN_TOP1_DACIPMODE 0x00040000
1046#define AR_AN_TOP1_DACIPMODE_S 18
1047
1024#define AR_AN_TOP2 0x7894 1048#define AR_AN_TOP2 0x7894
1025#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000 1049#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
1026#define AR_AN_TOP2_XPABIAS_LVL_S 30 1050#define AR_AN_TOP2_XPABIAS_LVL_S 30
@@ -1181,18 +1205,7 @@ enum {
1181#define AR_CFP_VAL 0x0000FFFF 1205#define AR_CFP_VAL 0x0000FFFF
1182 1206
1183#define AR_RX_FILTER 0x803C 1207#define AR_RX_FILTER 0x803C
1184#define AR_RX_FILTER_ALL 0x00000000
1185#define AR_RX_UCAST 0x00000001
1186#define AR_RX_MCAST 0x00000002
1187#define AR_RX_BCAST 0x00000004
1188#define AR_RX_CONTROL 0x00000008
1189#define AR_RX_BEACON 0x00000010
1190#define AR_RX_PROM 0x00000020
1191#define AR_RX_PROBE_REQ 0x00000080
1192#define AR_RX_MY_BEACON 0x00000200
1193#define AR_RX_COMPR_BAR 0x00000400 1208#define AR_RX_COMPR_BAR 0x00000400
1194#define AR_RX_COMPR_BA 0x00000800
1195#define AR_RX_UNCOM_BA_BAR 0x00001000
1196 1209
1197#define AR_MCAST_FIL0 0x8040 1210#define AR_MCAST_FIL0 0x8040
1198#define AR_MCAST_FIL1 0x8044 1211#define AR_MCAST_FIL1 0x8044
@@ -1236,6 +1249,8 @@ enum {
1236 1249
1237#define AR_AES_MUTE_MASK1 0x8060 1250#define AR_AES_MUTE_MASK1 0x8060
1238#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF 1251#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
1252#define AR_AES_MUTE_MASK1_FC_MGMT 0xFFFF0000
1253#define AR_AES_MUTE_MASK1_FC_MGMT_S 16
1239 1254
1240#define AR_GATED_CLKS 0x8064 1255#define AR_GATED_CLKS 0x8064
1241#define AR_GATED_CLKS_TX 0x00000002 1256#define AR_GATED_CLKS_TX 0x00000002
@@ -1460,6 +1475,10 @@ enum {
1460#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 1475#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
1461#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 1476#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
1462 1477
1478#define AR_PCU_MISC_MODE2 0x8344
1479#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002
1480#define AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT 0x00000004
1481
1463#define AR_KEYTABLE_0 0x8800 1482#define AR_KEYTABLE_0 0x8800
1464#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32)) 1483#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
1465#define AR_KEY_CACHE_SIZE 128 1484#define AR_KEY_CACHE_SIZE 128
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
index 64043e99fac..8c2b56ac55f 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -16,179 +16,335 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include "core.h" 19#include "ath9k.h"
20#include "hw.h"
21#include "regd.h"
22#include "regd_common.h" 20#include "regd_common.h"
23 21
24static int ath9k_regd_chansort(const void *a, const void *b) 22/*
25{ 23 * This is a set of common rules used by our world regulatory domains.
26 const struct ath9k_channel *ca = a; 24 * We have 12 world regulatory domains. To save space we consolidate
27 const struct ath9k_channel *cb = b; 25 * the regulatory domains in 5 structures by frequency and change
26 * the flags on our reg_notifier() on a case by case basis.
27 */
28 28
29 return (ca->channel == cb->channel) ? 29/* Only these channels all allow active scan on all world regulatory domains */
30 (ca->channelFlags & CHAN_FLAGS) - 30#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
31 (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel; 31
32/* We enable active scan on these a case by case basis by regulatory domain */
33#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
34 NL80211_RRF_PASSIVE_SCAN)
35#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
36 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
37
38/* We allow IBSS on these on a case by case basis by regulatory domain */
39#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\
40 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
41#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\
42 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
43#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\
44 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
45
46#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
47 ATH9K_2GHZ_CH12_13, \
48 ATH9K_2GHZ_CH14
49
50#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
51 ATH9K_5GHZ_5470_5850
52/* This one skips what we call "mid band" */
53#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
54 ATH9K_5GHZ_5725_5850
55
56/* Can be used for:
57 * 0x60, 0x61, 0x62 */
58static const struct ieee80211_regdomain ath9k_world_regdom_60_61_62 = {
59 .n_reg_rules = 5,
60 .alpha2 = "99",
61 .reg_rules = {
62 ATH9K_2GHZ_ALL,
63 ATH9K_5GHZ_ALL,
64 }
65};
66
67/* Can be used by 0x63 and 0x65 */
68static const struct ieee80211_regdomain ath9k_world_regdom_63_65 = {
69 .n_reg_rules = 4,
70 .alpha2 = "99",
71 .reg_rules = {
72 ATH9K_2GHZ_CH01_11,
73 ATH9K_2GHZ_CH12_13,
74 ATH9K_5GHZ_NO_MIDBAND,
75 }
76};
77
78/* Can be used by 0x64 only */
79static const struct ieee80211_regdomain ath9k_world_regdom_64 = {
80 .n_reg_rules = 3,
81 .alpha2 = "99",
82 .reg_rules = {
83 ATH9K_2GHZ_CH01_11,
84 ATH9K_5GHZ_NO_MIDBAND,
85 }
86};
87
88/* Can be used by 0x66 and 0x69 */
89static const struct ieee80211_regdomain ath9k_world_regdom_66_69 = {
90 .n_reg_rules = 3,
91 .alpha2 = "99",
92 .reg_rules = {
93 ATH9K_2GHZ_CH01_11,
94 ATH9K_5GHZ_ALL,
95 }
96};
97
98/* Can be used by 0x67, 0x6A and 0x68 */
99static const struct ieee80211_regdomain ath9k_world_regdom_67_68_6A = {
100 .n_reg_rules = 4,
101 .alpha2 = "99",
102 .reg_rules = {
103 ATH9K_2GHZ_CH01_11,
104 ATH9K_2GHZ_CH12_13,
105 ATH9K_5GHZ_ALL,
106 }
107};
108
109static u16 ath9k_regd_get_eepromRD(struct ath_hw *ah)
110{
111 return ah->regulatory.current_rd & ~WORLDWIDE_ROAMING_FLAG;
32} 112}
33 113
34static void 114u16 ath9k_regd_get_rd(struct ath_hw *ah)
35ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
36{ 115{
37 u8 *aa = a; 116 return ath9k_regd_get_eepromRD(ah);
38 u8 *ai, *t;
39
40 for (ai = aa + size; --n >= 1; ai += size)
41 for (t = ai; t > aa; t -= size) {
42 u8 *u = t - size;
43 if (cmp(u, t) <= 0)
44 break;
45 swap_array(u, t, size);
46 }
47} 117}
48 118
49static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah) 119bool ath9k_is_world_regd(struct ath_hw *ah)
50{ 120{
51 return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG; 121 return isWwrSKU(ah);
52} 122}
53 123
54static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask) 124const struct ieee80211_regdomain *ath9k_default_world_regdomain(void)
55{ 125{
56 int i; 126 /* this is the most restrictive */
57 127 return &ath9k_world_regdom_64;
58 for (i = 0; i < BMLEN; i++) {
59 if (bitmask[i] != 0)
60 return false;
61 }
62 return true;
63} 128}
64 129
65static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah) 130const struct ieee80211_regdomain *ath9k_world_regdomain(struct ath_hw *ah)
66{ 131{
67 u16 rd = ath9k_regd_get_eepromRD(ah); 132 switch (ah->regulatory.regpair->regDmnEnum) {
68 int i; 133 case 0x60:
69 134 case 0x61:
70 if (rd & COUNTRY_ERD_FLAG) { 135 case 0x62:
71 u16 cc = rd & ~COUNTRY_ERD_FLAG; 136 return &ath9k_world_regdom_60_61_62;
72 for (i = 0; i < ARRAY_SIZE(allCountries); i++) 137 case 0x63:
73 if (allCountries[i].countryCode == cc) 138 case 0x65:
74 return true; 139 return &ath9k_world_regdom_63_65;
75 } else { 140 case 0x64:
76 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) 141 return &ath9k_world_regdom_64;
77 if (regDomainPairs[i].regDmnEnum == rd) 142 case 0x66:
78 return true; 143 case 0x69:
144 return &ath9k_world_regdom_66_69;
145 case 0x67:
146 case 0x68:
147 case 0x6A:
148 return &ath9k_world_regdom_67_68_6A;
149 default:
150 WARN_ON(1);
151 return ath9k_default_world_regdomain();
79 } 152 }
80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
81 "invalid regulatory domain/country code 0x%x\n", rd);
82 return false;
83} 153}
84 154
85static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah) 155/* Frequency is one where radar detection is required */
156static bool ath9k_is_radar_freq(u16 center_freq)
86{ 157{
87 u32 regcap; 158 return (center_freq >= 5260 && center_freq <= 5700);
159}
88 160
89 regcap = ah->ah_caps.reg_cap; 161/*
162 * Enable adhoc on 5 GHz if allowed by 11d.
163 * Remove passive scan if channel is allowed by 11d,
164 * except when on radar frequencies.
165 */
166static void ath9k_reg_apply_5ghz_beaconing_flags(struct wiphy *wiphy,
167 enum reg_set_by setby)
168{
169 struct ieee80211_supported_band *sband;
170 const struct ieee80211_reg_rule *reg_rule;
171 struct ieee80211_channel *ch;
172 unsigned int i;
173 u32 bandwidth = 0;
174 int r;
175
176 if (setby != REGDOM_SET_BY_COUNTRY_IE)
177 return;
178 if (!wiphy->bands[IEEE80211_BAND_5GHZ])
179 return;
90 180
91 if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND) 181 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
92 return true; 182 for (i = 0; i < sband->n_channels; i++) {
93 else 183 ch = &sband->channels[i];
94 return false; 184 r = freq_reg_info(wiphy, ch->center_freq,
185 &bandwidth, &reg_rule);
186 if (r)
187 continue;
188 /* If 11d had a rule for this channel ensure we enable adhoc
189 * if it allows us to use it. Note that we would have disabled
190 * it by applying our static world regdomain by default during
191 * probe */
192 if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
193 ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
194 if (!ath9k_is_radar_freq(ch->center_freq))
195 continue;
196 if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
197 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
198 }
95} 199}
96 200
97static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah, 201/* Allows active scan scan on Ch 12 and 13 */
98 u16 cc) 202static void ath9k_reg_apply_active_scan_flags(struct wiphy *wiphy,
203 enum reg_set_by setby)
99{ 204{
100 u16 rd; 205 struct ieee80211_supported_band *sband;
101 int i; 206 struct ieee80211_channel *ch;
102 207 const struct ieee80211_reg_rule *reg_rule;
103 if (cc == CTRY_DEFAULT) 208 u32 bandwidth = 0;
104 return true; 209 int r;
105 if (cc == CTRY_DEBUG) 210
106 return true; 211 /* Force passive scan on Channels 12-13 */
212 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
213
214 /* If no country IE has been received always enable active scan
215 * on these channels */
216 if (setby != REGDOM_SET_BY_COUNTRY_IE) {
217 ch = &sband->channels[11]; /* CH 12 */
218 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
219 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
220 ch = &sband->channels[12]; /* CH 13 */
221 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
222 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
223 return;
224 }
107 225
108 rd = ath9k_regd_get_eepromRD(ah); 226 /* If a country IE has been recieved check its rule for this
109 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "EEPROM regdomain 0x%x\n", rd); 227 * channel first before enabling active scan. The passive scan
228 * would have been enforced by the initial probe processing on
229 * our custom regulatory domain. */
110 230
111 if (rd & COUNTRY_ERD_FLAG) { 231 ch = &sband->channels[11]; /* CH 12 */
112 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 232 r = freq_reg_info(wiphy, ch->center_freq, &bandwidth, &reg_rule);
113 "EEPROM setting is country code %u\n", 233 if (!r) {
114 rd & ~COUNTRY_ERD_FLAG); 234 if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
115 return cc == (rd & ~COUNTRY_ERD_FLAG); 235 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
236 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
116 } 237 }
117 238
118 for (i = 0; i < ARRAY_SIZE(allCountries); i++) { 239 ch = &sband->channels[12]; /* CH 13 */
119 if (cc == allCountries[i].countryCode) { 240 r = freq_reg_info(wiphy, ch->center_freq, &bandwidth, &reg_rule);
120#ifdef AH_SUPPORT_11D 241 if (!r) {
121 if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) 242 if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
122 return true; 243 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
123#endif 244 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
124 if (allCountries[i].regDmnEnum == rd ||
125 rd == DEBUG_REG_DMN || rd == NO_ENUMRD)
126 return true;
127 }
128 } 245 }
129 return false;
130} 246}
131 247
132static void 248/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
133ath9k_regd_get_wmodes_nreg(struct ath_hal *ah, 249void ath9k_reg_apply_radar_flags(struct wiphy *wiphy)
134 struct country_code_to_enum_rd *country,
135 struct regDomain *rd5GHz,
136 unsigned long *modes_allowed)
137{ 250{
138 bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX); 251 struct ieee80211_supported_band *sband;
252 struct ieee80211_channel *ch;
253 unsigned int i;
139 254
140 if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) && 255 if (!wiphy->bands[IEEE80211_BAND_5GHZ])
141 (!country->allow11g)) 256 return;
142 clear_bit(ATH9K_MODE_11G, modes_allowed);
143 257
144 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) && 258 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
145 (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
146 clear_bit(ATH9K_MODE_11A, modes_allowed);
147 259
148 if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes) 260 for (i = 0; i < sband->n_channels; i++) {
149 && (!country->allow11ng20)) 261 ch = &sband->channels[i];
150 clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed); 262 if (!ath9k_is_radar_freq(ch->center_freq))
263 continue;
264 /* We always enable radar detection/DFS on this
265 * frequency range. Additionally we also apply on
266 * this frequency range:
267 * - If STA mode does not yet have DFS supports disable
268 * active scanning
269 * - If adhoc mode does not support DFS yet then
270 * disable adhoc in the frequency.
271 * - If AP mode does not yet support radar detection/DFS
272 * do not allow AP mode
273 */
274 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
275 ch->flags |= IEEE80211_CHAN_RADAR |
276 IEEE80211_CHAN_NO_IBSS |
277 IEEE80211_CHAN_PASSIVE_SCAN;
278 }
279}
151 280
152 if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes) 281void ath9k_reg_apply_world_flags(struct wiphy *wiphy, enum reg_set_by setby)
153 && (!country->allow11na20)) 282{
154 clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed); 283 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
284 struct ath_softc *sc = hw->priv;
285 struct ath_hw *ah = sc->sc_ah;
286
287 switch (ah->regulatory.regpair->regDmnEnum) {
288 case 0x60:
289 case 0x63:
290 case 0x66:
291 case 0x67:
292 ath9k_reg_apply_5ghz_beaconing_flags(wiphy, setby);
293 break;
294 case 0x68:
295 ath9k_reg_apply_5ghz_beaconing_flags(wiphy, setby);
296 ath9k_reg_apply_active_scan_flags(wiphy, setby);
297 break;
298 }
299 return;
300}
155 301
156 if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) && 302int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
157 (!country->allow11ng40)) 303{
158 clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed); 304 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
305 struct ath_softc *sc = hw->priv;
159 306
160 if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) && 307 /* We always apply this */
161 (!country->allow11ng40)) 308 ath9k_reg_apply_radar_flags(wiphy);
162 clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);
163 309
164 if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) && 310 switch (request->initiator) {
165 (!country->allow11na40)) 311 case REGDOM_SET_BY_DRIVER:
166 clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed); 312 case REGDOM_SET_BY_INIT:
313 case REGDOM_SET_BY_CORE:
314 case REGDOM_SET_BY_USER:
315 break;
316 case REGDOM_SET_BY_COUNTRY_IE:
317 if (ath9k_is_world_regd(sc->sc_ah))
318 ath9k_reg_apply_world_flags(wiphy, request->initiator);
319 break;
320 }
167 321
168 if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) && 322 return 0;
169 (!country->allow11na40))
170 clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
171} 323}
172 324
173bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah) 325bool ath9k_regd_is_eeprom_valid(struct ath_hw *ah)
174{ 326{
175 u16 rd; 327 u16 rd = ath9k_regd_get_eepromRD(ah);
176 328 int i;
177 rd = ath9k_regd_get_eepromRD(ah);
178 329
179 switch (rd) { 330 if (rd & COUNTRY_ERD_FLAG) {
180 case FCC4_FCCA: 331 /* EEPROM value is a country code */
181 case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG): 332 u16 cc = rd & ~COUNTRY_ERD_FLAG;
182 return true; 333 for (i = 0; i < ARRAY_SIZE(allCountries); i++)
183 case DEBUG_REG_DMN: 334 if (allCountries[i].countryCode == cc)
184 case NO_ENUMRD: 335 return true;
185 if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49) 336 } else {
186 return true; 337 /* EEPROM value is a regpair value */
187 break; 338 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
339 if (regDomainPairs[i].regDmnEnum == rd)
340 return true;
188 } 341 }
342 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
343 "invalid regulatory domain/country code 0x%x\n", rd);
189 return false; 344 return false;
190} 345}
191 346
347/* EEPROM country code to regpair mapping */
192static struct country_code_to_enum_rd* 348static struct country_code_to_enum_rd*
193ath9k_regd_find_country(u16 countryCode) 349ath9k_regd_find_country(u16 countryCode)
194{ 350{
@@ -201,11 +357,24 @@ ath9k_regd_find_country(u16 countryCode)
201 return NULL; 357 return NULL;
202} 358}
203 359
204static u16 ath9k_regd_get_default_country(struct ath_hal *ah) 360/* EEPROM rd code to regpair mapping */
361static struct country_code_to_enum_rd*
362ath9k_regd_find_country_by_rd(int regdmn)
205{ 363{
206 u16 rd;
207 int i; 364 int i;
208 365
366 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
367 if (allCountries[i].regDmnEnum == regdmn)
368 return &allCountries[i];
369 }
370 return NULL;
371}
372
373/* Returns the map of the EEPROM set RD to a country code */
374static u16 ath9k_regd_get_default_country(struct ath_hw *ah)
375{
376 u16 rd;
377
209 rd = ath9k_regd_get_eepromRD(ah); 378 rd = ath9k_regd_get_eepromRD(ah);
210 if (rd & COUNTRY_ERD_FLAG) { 379 if (rd & COUNTRY_ERD_FLAG) {
211 struct country_code_to_enum_rd *country = NULL; 380 struct country_code_to_enum_rd *country = NULL;
@@ -216,798 +385,104 @@ static u16 ath9k_regd_get_default_country(struct ath_hal *ah)
216 return cc; 385 return cc;
217 } 386 }
218 387
219 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
220 if (regDomainPairs[i].regDmnEnum == rd) {
221 if (regDomainPairs[i].singleCC != 0)
222 return regDomainPairs[i].singleCC;
223 else
224 i = ARRAY_SIZE(regDomainPairs);
225 }
226 return CTRY_DEFAULT; 388 return CTRY_DEFAULT;
227} 389}
228 390
229static bool ath9k_regd_is_valid_reg_domain(int regDmn, 391static struct reg_dmn_pair_mapping*
230 struct regDomain *rd) 392ath9k_get_regpair(int regdmn)
231{
232 int i;
233
234 for (i = 0; i < ARRAY_SIZE(regDomains); i++) {
235 if (regDomains[i].regDmnEnum == regDmn) {
236 if (rd != NULL) {
237 memcpy(rd, &regDomains[i],
238 sizeof(struct regDomain));
239 }
240 return true;
241 }
242 }
243 return false;
244}
245
246static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair)
247{ 393{
248 int i; 394 int i;
249 395
250 if (regDmnPair == NO_ENUMRD) 396 if (regdmn == NO_ENUMRD)
251 return false; 397 return NULL;
252 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { 398 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
253 if (regDomainPairs[i].regDmnEnum == regDmnPair) 399 if (regDomainPairs[i].regDmnEnum == regdmn)
254 return true; 400 return &regDomainPairs[i];
255 }
256 return false;
257}
258
259static bool
260ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
261 u16 channelFlag, struct regDomain *rd)
262{
263 int i, found;
264 u64 flags = NO_REQ;
265 struct reg_dmn_pair_mapping *regPair = NULL;
266 int regOrg;
267
268 regOrg = regDmn;
269 if (regDmn == CTRY_DEFAULT) {
270 u16 rdnum;
271 rdnum = ath9k_regd_get_eepromRD(ah);
272
273 if (!(rdnum & COUNTRY_ERD_FLAG)) {
274 if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) ||
275 ath9k_regd_is_valid_reg_domainPair(rdnum)) {
276 regDmn = rdnum;
277 }
278 }
279 }
280
281 if ((regDmn & MULTI_DOMAIN_MASK) == 0) {
282 for (i = 0, found = 0;
283 (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
284 if (regDomainPairs[i].regDmnEnum == regDmn) {
285 regPair = &regDomainPairs[i];
286 found = 1;
287 }
288 }
289 if (!found) {
290 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
291 "Failed to find reg domain pair %u\n", regDmn);
292 return false;
293 }
294 if (!(channelFlag & CHANNEL_2GHZ)) {
295 regDmn = regPair->regDmn5GHz;
296 flags = regPair->flags5GHz;
297 }
298 if (channelFlag & CHANNEL_2GHZ) {
299 regDmn = regPair->regDmn2GHz;
300 flags = regPair->flags2GHz;
301 }
302 }
303
304 found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
305 if (!found) {
306 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
307 "Failed to find unitary reg domain %u\n", regDmn);
308 return false;
309 } else {
310 rd->pscan &= regPair->pscanMask;
311 if (((regOrg & MULTI_DOMAIN_MASK) == 0) &&
312 (flags != NO_REQ)) {
313 rd->flags = flags;
314 }
315
316 rd->flags &= (channelFlag & CHANNEL_2GHZ) ?
317 REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK;
318 return true;
319 }
320}
321
322static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask)
323{
324 int byteOffset, bitnum;
325 u64 val;
326
327 byteOffset = bit / 64;
328 bitnum = bit - byteOffset * 64;
329 val = ((u64) 1) << bitnum;
330 if (bitmask[byteOffset] & val)
331 return true;
332 else
333 return false;
334}
335
336static void
337ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids,
338 u32 *nregids, u8 regclassid)
339{
340 int i;
341
342 if (regclassid == 0)
343 return;
344
345 for (i = 0; i < maxregids; i++) {
346 if (regclassids[i] == regclassid)
347 return;
348 if (regclassids[i] == 0)
349 break;
350 }
351
352 if (i == maxregids)
353 return;
354 else {
355 regclassids[i] = regclassid;
356 *nregids += 1;
357 }
358
359 return;
360}
361
362static bool
363ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah,
364 enum reg_ext_bitmap bit)
365{
366 return (ah->ah_currentRDExt & (1 << bit)) ? true : false;
367}
368
369#ifdef ATH_NF_PER_CHAN
370
371static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans,
372 int nchans)
373{
374 int i, j, next;
375
376 for (next = 0; next < nchans; next++) {
377 for (i = 0; i < NUM_NF_READINGS; i++) {
378 ichans[next].nfCalHist[i].currIndex = 0;
379 ichans[next].nfCalHist[i].privNF =
380 AR_PHY_CCA_MAX_GOOD_VALUE;
381 ichans[next].nfCalHist[i].invalidNFcount =
382 AR_PHY_CCA_FILTERWINDOW_LENGTH;
383 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
384 ichans[next].nfCalHist[i].nfCalBuffer[j] =
385 AR_PHY_CCA_MAX_GOOD_VALUE;
386 }
387 }
388 }
389}
390#endif
391
392static int ath9k_regd_is_chan_present(struct ath_hal *ah,
393 u16 c)
394{
395 int i;
396
397 for (i = 0; i < 150; i++) {
398 if (!ah->ah_channels[i].channel)
399 return -1;
400 else if (ah->ah_channels[i].channel == c)
401 return i;
402 }
403
404 return -1;
405}
406
407static bool
408ath9k_regd_add_channel(struct ath_hal *ah,
409 u16 c,
410 u16 c_lo,
411 u16 c_hi,
412 u16 maxChan,
413 u8 ctl,
414 int pos,
415 struct regDomain rd5GHz,
416 struct RegDmnFreqBand *fband,
417 struct regDomain *rd,
418 const struct cmode *cm,
419 struct ath9k_channel *ichans,
420 bool enableExtendedChannels)
421{
422 struct ath9k_channel *chan;
423 int ret;
424 u32 channelFlags = 0;
425 u8 privFlags = 0;
426
427 if (!(c_lo <= c && c <= c_hi)) {
428 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
429 "c %u out of range [%u..%u]\n",
430 c, c_lo, c_hi);
431 return false;
432 }
433 if ((fband->channelBW == CHANNEL_HALF_BW) &&
434 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
435 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
436 "Skipping %u half rate channel\n", c);
437 return false;
438 }
439
440 if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
441 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
442 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
443 "Skipping %u quarter rate channel\n", c);
444 return false;
445 }
446
447 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
448 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
449 "c %u > maxChan %u\n", c, maxChan);
450 return false;
451 }
452
453 if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) {
454 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
455 "Skipping ecm channel\n");
456 return false;
457 } 401 }
458 402 return NULL;
459 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == NL80211_IFTYPE_AP)) {
460 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
461 "Skipping HOSTAP channel\n");
462 return false;
463 }
464
465 if (IS_HT40_MODE(cm->mode) &&
466 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) &&
467 (fband->useDfs) &&
468 (rd->conformanceTestLimit != MKK)) {
469 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
470 "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n");
471 return false;
472 }
473
474 if (IS_HT40_MODE(cm->mode) &&
475 !(ath9k_regd_get_eeprom_reg_ext_bits(ah,
476 REG_EXT_JAPAN_NONDFS_HT40)) &&
477 !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) {
478 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
479 "Skipping HT40 channel (en_jap_ht40 = 0)\n");
480 return false;
481 }
482
483 if (IS_HT40_MODE(cm->mode) &&
484 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) &&
485 (fband->useDfs) &&
486 (rd->conformanceTestLimit == MKK)) {
487 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
488 "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n");
489 return false;
490 }
491
492 /* Calculate channel flags */
493
494 channelFlags = cm->flags;
495
496 switch (fband->channelBW) {
497 case CHANNEL_HALF_BW:
498 channelFlags |= CHANNEL_HALF;
499 break;
500 case CHANNEL_QUARTER_BW:
501 channelFlags |= CHANNEL_QUARTER;
502 break;
503 }
504
505 if (fband->usePassScan & rd->pscan)
506 channelFlags |= CHANNEL_PASSIVE;
507 else
508 channelFlags &= ~CHANNEL_PASSIVE;
509 if (fband->useDfs & rd->dfsMask)
510 privFlags = CHANNEL_DFS;
511 else
512 privFlags = 0;
513 if (rd->flags & LIMIT_FRAME_4MS)
514 privFlags |= CHANNEL_4MS_LIMIT;
515 if (privFlags & CHANNEL_DFS)
516 privFlags |= CHANNEL_DISALLOW_ADHOC;
517 if (rd->flags & ADHOC_PER_11D)
518 privFlags |= CHANNEL_PER_11D_ADHOC;
519
520 if (channelFlags & CHANNEL_PASSIVE) {
521 if ((c < 2412) || (c > 2462)) {
522 if (rd5GHz.regDmnEnum == MKK1 ||
523 rd5GHz.regDmnEnum == MKK2) {
524 u32 regcap = ah->ah_caps.reg_cap;
525 if (!(regcap &
526 (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
527 AR_EEPROM_EEREGCAP_EN_KK_U2 |
528 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) &&
529 isUNII1OddChan(c)) {
530 channelFlags &= ~CHANNEL_PASSIVE;
531 } else {
532 privFlags |= CHANNEL_DISALLOW_ADHOC;
533 }
534 } else {
535 privFlags |= CHANNEL_DISALLOW_ADHOC;
536 }
537 }
538 }
539
540 if ((cm->mode == ATH9K_MODE_11A) ||
541 (cm->mode == ATH9K_MODE_11NA_HT20) ||
542 (cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
543 (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) {
544 if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A))
545 privFlags |= CHANNEL_DISALLOW_ADHOC;
546 }
547
548 /* Fill in channel details */
549
550 ret = ath9k_regd_is_chan_present(ah, c);
551 if (ret == -1) {
552 chan = &ah->ah_channels[pos];
553 chan->channel = c;
554 chan->maxRegTxPower = fband->powerDfs;
555 chan->antennaMax = fband->antennaMax;
556 chan->regDmnFlags = rd->flags;
557 chan->maxTxPower = AR5416_MAX_RATE_POWER;
558 chan->minTxPower = AR5416_MAX_RATE_POWER;
559 chan->channelFlags = channelFlags;
560 chan->privFlags = privFlags;
561 } else {
562 chan = &ah->ah_channels[ret];
563 chan->channelFlags |= channelFlags;
564 chan->privFlags |= privFlags;
565 }
566
567 /* Set CTLs */
568
569 if ((cm->flags & CHANNEL_ALL) == CHANNEL_A)
570 chan->conformanceTestLimit[0] = ctl;
571 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B)
572 chan->conformanceTestLimit[1] = ctl;
573 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G)
574 chan->conformanceTestLimit[2] = ctl;
575
576 return (ret == -1) ? true : false;
577}
578
579static bool ath9k_regd_japan_check(struct ath_hal *ah,
580 int b,
581 struct regDomain *rd5GHz)
582{
583 bool skipband = false;
584 int i;
585 u32 regcap;
586
587 for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) {
588 if (j_bandcheck[i].freqbandbit == b) {
589 regcap = ah->ah_caps.reg_cap;
590 if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) {
591 skipband = true;
592 } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) ||
593 (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) {
594 rd5GHz->dfsMask |= DFS_MKK4;
595 rd5GHz->pscan |= PSCAN_MKK3;
596 }
597 break;
598 }
599 }
600
601 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
602 "Skipping %d freq band\n", j_bandcheck[i].freqbandbit);
603
604 return skipband;
605} 403}
606 404
607bool 405int ath9k_regd_init(struct ath_hw *ah)
608ath9k_regd_init_channels(struct ath_hal *ah,
609 u32 maxchans,
610 u32 *nchans, u8 *regclassids,
611 u32 maxregids, u32 *nregids, u16 cc,
612 bool enableOutdoor,
613 bool enableExtendedChannels)
614{ 406{
615 u16 maxChan = 7000;
616 struct country_code_to_enum_rd *country = NULL; 407 struct country_code_to_enum_rd *country = NULL;
617 struct regDomain rd5GHz, rd2GHz;
618 const struct cmode *cm;
619 struct ath9k_channel *ichans = &ah->ah_channels[0];
620 int next = 0, b;
621 u8 ctl;
622 int regdmn; 408 int regdmn;
623 u16 chanSep;
624 unsigned long *modes_avail;
625 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
626
627 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "cc %u %s %s\n", cc,
628 enableOutdoor ? "Enable outdoor" : "",
629 enableExtendedChannels ? "Enable ecm" : "");
630
631 if (!ath9k_regd_is_ccode_valid(ah, cc)) {
632 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
633 "Invalid country code %d\n", cc);
634 return false;
635 }
636 409
637 if (!ath9k_regd_is_eeprom_valid(ah)) { 410 if (!ath9k_regd_is_eeprom_valid(ah)) {
638 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 411 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
639 "Invalid EEPROM contents\n"); 412 "Invalid EEPROM contents\n");
640 return false; 413 return -EINVAL;
641 } 414 }
642 415
643 ah->ah_countryCode = ath9k_regd_get_default_country(ah); 416 ah->regulatory.country_code = ath9k_regd_get_default_country(ah);
644 417
645 if (ah->ah_countryCode == CTRY_DEFAULT) { 418 if (ah->regulatory.country_code == CTRY_DEFAULT &&
646 ah->ah_countryCode = cc & COUNTRY_CODE_MASK; 419 ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)
647 if ((ah->ah_countryCode == CTRY_DEFAULT) && 420 ah->regulatory.country_code = CTRY_UNITED_STATES;
648 (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
649 ah->ah_countryCode = CTRY_UNITED_STATES;
650 }
651 }
652 421
653#ifdef AH_SUPPORT_11D 422 if (ah->regulatory.country_code == CTRY_DEFAULT) {
654 if (ah->ah_countryCode == CTRY_DEFAULT) {
655 regdmn = ath9k_regd_get_eepromRD(ah); 423 regdmn = ath9k_regd_get_eepromRD(ah);
656 country = NULL; 424 country = NULL;
657 } else { 425 } else {
658#endif 426 country = ath9k_regd_find_country(ah->regulatory.country_code);
659 country = ath9k_regd_find_country(ah->ah_countryCode);
660 if (country == NULL) { 427 if (country == NULL) {
661 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 428 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
662 "Country is NULL!!!!, cc= %d\n", 429 "Country is NULL!!!!, cc= %d\n",
663 ah->ah_countryCode); 430 ah->regulatory.country_code);
664 return false; 431 return -EINVAL;
665 } else { 432 } else
666 regdmn = country->regDmnEnum; 433 regdmn = country->regDmnEnum;
667#ifdef AH_SUPPORT_11D
668 if (((ath9k_regd_get_eepromRD(ah) &
669 WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
670 (cc == CTRY_UNITED_STATES)) {
671 if (!isWwrSKU_NoMidband(ah)
672 && ath9k_regd_is_fcc_midband_supported(ah))
673 regdmn = FCC3_FCCA;
674 else
675 regdmn = FCC1_FCCA;
676 }
677#endif
678 }
679#ifdef AH_SUPPORT_11D
680 }
681#endif
682 if (!ath9k_regd_get_wmode_regdomain(ah,
683 regdmn,
684 ~CHANNEL_2GHZ,
685 &rd5GHz)) {
686 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
687 "Couldn't find unitary "
688 "5GHz reg domain for country %u\n",
689 ah->ah_countryCode);
690 return false;
691 }
692 if (!ath9k_regd_get_wmode_regdomain(ah,
693 regdmn,
694 CHANNEL_2GHZ,
695 &rd2GHz)) {
696 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
697 "Couldn't find unitary 2GHz "
698 "reg domain for country %u\n",
699 ah->ah_countryCode);
700 return false;
701 }
702
703 if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
704 (rd5GHz.regDmnEnum == FCC2))) {
705 if (ath9k_regd_is_fcc_midband_supported(ah)) {
706 if (!ath9k_regd_get_wmode_regdomain(ah,
707 FCC3_FCCA,
708 ~CHANNEL_2GHZ,
709 &rd5GHz)) {
710 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
711 "Couldn't find unitary 5GHz "
712 "reg domain for country %u\n",
713 ah->ah_countryCode);
714 return false;
715 }
716 }
717 } 434 }
718 435
719 if (country == NULL) { 436 ah->regulatory.current_rd_inuse = regdmn;
720 modes_avail = ah->ah_caps.wireless_modes; 437 ah->regulatory.regpair = ath9k_get_regpair(regdmn);
721 } else {
722 ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
723 modes_avail = modes_allowed;
724 438
725 if (!enableOutdoor) 439 if (!ah->regulatory.regpair) {
726 maxChan = country->outdoorChanStart; 440 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
441 "No regulatory domain pair found, cannot continue\n");
442 return -EINVAL;
727 } 443 }
728 444
729 next = 0; 445 if (!country)
730 446 country = ath9k_regd_find_country_by_rd(regdmn);
731 if (maxchans > ARRAY_SIZE(ah->ah_channels))
732 maxchans = ARRAY_SIZE(ah->ah_channels);
733
734 for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
735 u16 c, c_hi, c_lo;
736 u64 *channelBM = NULL;
737 struct regDomain *rd = NULL;
738 struct RegDmnFreqBand *fband = NULL, *freqs;
739 int8_t low_adj = 0, hi_adj = 0;
740
741 if (!test_bit(cm->mode, modes_avail)) {
742 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
743 "!avail mode %d flags 0x%x\n",
744 cm->mode, cm->flags);
745 continue;
746 }
747 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
748 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
749 "channels 0x%x not supported "
750 "by hardware\n", cm->flags);
751 continue;
752 }
753
754 switch (cm->mode) {
755 case ATH9K_MODE_11A:
756 case ATH9K_MODE_11NA_HT20:
757 case ATH9K_MODE_11NA_HT40PLUS:
758 case ATH9K_MODE_11NA_HT40MINUS:
759 rd = &rd5GHz;
760 channelBM = rd->chan11a;
761 freqs = &regDmn5GhzFreq[0];
762 ctl = rd->conformanceTestLimit;
763 break;
764 case ATH9K_MODE_11B:
765 rd = &rd2GHz;
766 channelBM = rd->chan11b;
767 freqs = &regDmn2GhzFreq[0];
768 ctl = rd->conformanceTestLimit | CTL_11B;
769 break;
770 case ATH9K_MODE_11G:
771 case ATH9K_MODE_11NG_HT20:
772 case ATH9K_MODE_11NG_HT40PLUS:
773 case ATH9K_MODE_11NG_HT40MINUS:
774 rd = &rd2GHz;
775 channelBM = rd->chan11g;
776 freqs = &regDmn2Ghz11gFreq[0];
777 ctl = rd->conformanceTestLimit | CTL_11G;
778 break;
779 default:
780 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
781 "Unknown HAL mode 0x%x\n", cm->mode);
782 continue;
783 }
784
785 if (ath9k_regd_is_chan_bm_zero(channelBM))
786 continue;
787 447
788 if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) || 448 if (country) {
789 (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) { 449 ah->regulatory.alpha2[0] = country->isoName[0];
790 hi_adj = -20; 450 ah->regulatory.alpha2[1] = country->isoName[1];
791 }
792
793 if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
794 (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
795 low_adj = 20;
796 }
797
798 /* XXX: Add a helper here instead */
799 for (b = 0; b < 64 * BMLEN; b++) {
800 if (ath9k_regd_is_bit_set(b, channelBM)) {
801 fband = &freqs[b];
802 if (rd5GHz.regDmnEnum == MKK1
803 || rd5GHz.regDmnEnum == MKK2) {
804 if (ath9k_regd_japan_check(ah,
805 b,
806 &rd5GHz))
807 continue;
808 }
809
810 ath9k_regd_add_reg_classid(regclassids,
811 maxregids,
812 nregids,
813 fband->
814 regClassId);
815
816 if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
817 chanSep = 40;
818 if (fband->lowChannel == 5280)
819 low_adj += 20;
820
821 if (fband->lowChannel == 5170)
822 continue;
823 } else
824 chanSep = fband->channelSep;
825
826 for (c = fband->lowChannel + low_adj;
827 ((c <= (fband->highChannel + hi_adj)) &&
828 (c >= (fband->lowChannel + low_adj)));
829 c += chanSep) {
830 if (next >= maxchans) {
831 DPRINTF(ah->ah_sc,
832 ATH_DBG_REGULATORY,
833 "too many channels "
834 "for channel table\n");
835 goto done;
836 }
837 if (ath9k_regd_add_channel(ah,
838 c, c_lo, c_hi,
839 maxChan, ctl,
840 next,
841 rd5GHz,
842 fband, rd, cm,
843 ichans,
844 enableExtendedChannels))
845 next++;
846 }
847 if (IS_HT40_MODE(cm->mode) &&
848 (fband->lowChannel == 5280)) {
849 low_adj -= 20;
850 }
851 }
852 }
853 }
854done:
855 if (next != 0) {
856 int i;
857
858 if (next > ARRAY_SIZE(ah->ah_channels)) {
859 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
860 "too many channels %u; truncating to %u\n",
861 next, (int) ARRAY_SIZE(ah->ah_channels));
862 next = ARRAY_SIZE(ah->ah_channels);
863 }
864#ifdef ATH_NF_PER_CHAN
865 ath9k_regd_init_rf_buffer(ichans, next);
866#endif
867 ath9k_regd_sort(ichans, next,
868 sizeof(struct ath9k_channel),
869 ath9k_regd_chansort);
870
871 ah->ah_nchan = next;
872
873 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
874 for (i = 0; i < next; i++) {
875 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
876 "chan: %d flags: 0x%x\n",
877 ah->ah_channels[i].channel,
878 ah->ah_channels[i].channelFlags);
879 }
880 }
881 *nchans = next;
882
883 ah->ah_countryCode = ah->ah_countryCode;
884
885 ah->ah_currentRDInUse = regdmn;
886 ah->ah_currentRD5G = rd5GHz.regDmnEnum;
887 ah->ah_currentRD2G = rd2GHz.regDmnEnum;
888 if (country == NULL) {
889 ah->ah_iso[0] = 0;
890 ah->ah_iso[1] = 0;
891 } else { 451 } else {
892 ah->ah_iso[0] = country->isoName[0]; 452 ah->regulatory.alpha2[0] = '0';
893 ah->ah_iso[1] = country->isoName[1]; 453 ah->regulatory.alpha2[1] = '0';
894 } 454 }
895 455
896 return next != 0;
897}
898
899struct ath9k_channel*
900ath9k_regd_check_channel(struct ath_hal *ah,
901 const struct ath9k_channel *c)
902{
903 struct ath9k_channel *base, *cc;
904
905 int flags = c->channelFlags & CHAN_FLAGS;
906 int n, lim;
907
908 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 456 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
909 "channel %u/0x%x (0x%x) requested\n", 457 "Country alpha2 being used: %c%c\n"
910 c->channel, c->channelFlags, flags); 458 "Regulatory.Regpair detected: 0x%0x\n",
911 459 ah->regulatory.alpha2[0], ah->regulatory.alpha2[1],
912 cc = ah->ah_curchan; 460 ah->regulatory.regpair->regDmnEnum);
913 if (cc != NULL && cc->channel == c->channel &&
914 (cc->channelFlags & CHAN_FLAGS) == flags) {
915 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
916 (cc->privFlags & CHANNEL_DFS))
917 return NULL;
918 else
919 return cc;
920 }
921 461
922 base = ah->ah_channels; 462 return 0;
923 n = ah->ah_nchan;
924
925 for (lim = n; lim != 0; lim >>= 1) {
926 int d;
927 cc = &base[lim >> 1];
928 d = c->channel - cc->channel;
929 if (d == 0) {
930 if ((cc->channelFlags & CHAN_FLAGS) == flags) {
931 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
932 (cc->privFlags & CHANNEL_DFS))
933 return NULL;
934 else
935 return cc;
936 }
937 d = flags - (cc->channelFlags & CHAN_FLAGS);
938 }
939 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
940 "channel %u/0x%x d %d\n",
941 cc->channel, cc->channelFlags, d);
942 if (d > 0) {
943 base = cc + 1;
944 lim--;
945 }
946 }
947 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "no match for %u/0x%x\n",
948 c->channel, c->channelFlags);
949 return NULL;
950}
951
952u32
953ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
954 struct ath9k_channel *chan)
955{
956 struct ath9k_channel *ichan = NULL;
957
958 ichan = ath9k_regd_check_channel(ah, chan);
959 if (!ichan)
960 return 0;
961
962 return ichan->antennaMax;
963} 463}
964 464
965u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan) 465u32 ath9k_regd_get_ctl(struct ath_hw *ah, struct ath9k_channel *chan)
966{ 466{
967 u32 ctl = NO_CTL; 467 u32 ctl = NO_CTL;
968 struct ath9k_channel *ichan;
969 468
970 if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) { 469 if (!ah->regulatory.regpair ||
470 (ah->regulatory.country_code == CTRY_DEFAULT && isWwrSKU(ah))) {
971 if (IS_CHAN_B(chan)) 471 if (IS_CHAN_B(chan))
972 ctl = SD_NO_CTL | CTL_11B; 472 ctl = SD_NO_CTL | CTL_11B;
973 else if (IS_CHAN_G(chan)) 473 else if (IS_CHAN_G(chan))
974 ctl = SD_NO_CTL | CTL_11G; 474 ctl = SD_NO_CTL | CTL_11G;
975 else 475 else
976 ctl = SD_NO_CTL | CTL_11A; 476 ctl = SD_NO_CTL | CTL_11A;
977 } else { 477 return ctl;
978 ichan = ath9k_regd_check_channel(ah, chan);
979 if (ichan != NULL) {
980 /* FIXME */
981 if (IS_CHAN_A(ichan))
982 ctl = ichan->conformanceTestLimit[0];
983 else if (IS_CHAN_B(ichan))
984 ctl = ichan->conformanceTestLimit[1];
985 else if (IS_CHAN_G(ichan))
986 ctl = ichan->conformanceTestLimit[2];
987
988 if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B)
989 ctl = (ctl & ~0xf) | CTL_11G;
990 }
991 } 478 }
992 return ctl;
993}
994 479
995void ath9k_regd_get_current_country(struct ath_hal *ah, 480 if (IS_CHAN_B(chan))
996 struct ath9k_country_entry *ctry) 481 ctl = ah->regulatory.regpair->reg_2ghz_ctl | CTL_11B;
997{ 482 else if (IS_CHAN_G(chan))
998 u16 rd = ath9k_regd_get_eepromRD(ah); 483 ctl = ah->regulatory.regpair->reg_5ghz_ctl | CTL_11G;
484 else
485 ctl = ah->regulatory.regpair->reg_5ghz_ctl | CTL_11A;
999 486
1000 ctry->isMultidomain = false; 487 return ctl;
1001 if (rd == CTRY_DEFAULT)
1002 ctry->isMultidomain = true;
1003 else if (!(rd & COUNTRY_ERD_FLAG))
1004 ctry->isMultidomain = isWwrSKU(ah);
1005
1006 ctry->countryCode = ah->ah_countryCode;
1007 ctry->regDmnEnum = ah->ah_currentRD;
1008 ctry->regDmn5G = ah->ah_currentRD5G;
1009 ctry->regDmn2G = ah->ah_currentRD2G;
1010 ctry->iso[0] = ah->ah_iso[0];
1011 ctry->iso[1] = ah->ah_iso[1];
1012 ctry->iso[2] = ah->ah_iso[2];
1013} 488}
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
index 512d990aa7e..39420de818f 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -17,128 +17,14 @@
17#ifndef REGD_H 17#ifndef REGD_H
18#define REGD_H 18#define REGD_H
19 19
20#include "ath9k.h"
21
22#define BMLEN 2
23#define BMZERO {(u64) 0, (u64) 0}
24
25#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
26 {((((_fa >= 0) && (_fa < 64)) ? \
27 (((u64) 1) << _fa) : (u64) 0) | \
28 (((_fb >= 0) && (_fb < 64)) ? \
29 (((u64) 1) << _fb) : (u64) 0) | \
30 (((_fc >= 0) && (_fc < 64)) ? \
31 (((u64) 1) << _fc) : (u64) 0) | \
32 (((_fd >= 0) && (_fd < 64)) ? \
33 (((u64) 1) << _fd) : (u64) 0) | \
34 (((_fe >= 0) && (_fe < 64)) ? \
35 (((u64) 1) << _fe) : (u64) 0) | \
36 (((_ff >= 0) && (_ff < 64)) ? \
37 (((u64) 1) << _ff) : (u64) 0) | \
38 (((_fg >= 0) && (_fg < 64)) ? \
39 (((u64) 1) << _fg) : (u64) 0) | \
40 (((_fh >= 0) && (_fh < 64)) ? \
41 (((u64) 1) << _fh) : (u64) 0) | \
42 (((_fi >= 0) && (_fi < 64)) ? \
43 (((u64) 1) << _fi) : (u64) 0) | \
44 (((_fj >= 0) && (_fj < 64)) ? \
45 (((u64) 1) << _fj) : (u64) 0) | \
46 (((_fk >= 0) && (_fk < 64)) ? \
47 (((u64) 1) << _fk) : (u64) 0) | \
48 (((_fl >= 0) && (_fl < 64)) ? \
49 (((u64) 1) << _fl) : (u64) 0) | \
50 ((((_fa > 63) && (_fa < 128)) ? \
51 (((u64) 1) << (_fa - 64)) : (u64) 0) | \
52 (((_fb > 63) && (_fb < 128)) ? \
53 (((u64) 1) << (_fb - 64)) : (u64) 0) | \
54 (((_fc > 63) && (_fc < 128)) ? \
55 (((u64) 1) << (_fc - 64)) : (u64) 0) | \
56 (((_fd > 63) && (_fd < 128)) ? \
57 (((u64) 1) << (_fd - 64)) : (u64) 0) | \
58 (((_fe > 63) && (_fe < 128)) ? \
59 (((u64) 1) << (_fe - 64)) : (u64) 0) | \
60 (((_ff > 63) && (_ff < 128)) ? \
61 (((u64) 1) << (_ff - 64)) : (u64) 0) | \
62 (((_fg > 63) && (_fg < 128)) ? \
63 (((u64) 1) << (_fg - 64)) : (u64) 0) | \
64 (((_fh > 63) && (_fh < 128)) ? \
65 (((u64) 1) << (_fh - 64)) : (u64) 0) | \
66 (((_fi > 63) && (_fi < 128)) ? \
67 (((u64) 1) << (_fi - 64)) : (u64) 0) | \
68 (((_fj > 63) && (_fj < 128)) ? \
69 (((u64) 1) << (_fj - 64)) : (u64) 0) | \
70 (((_fk > 63) && (_fk < 128)) ? \
71 (((u64) 1) << (_fk - 64)) : (u64) 0) | \
72 (((_fl > 63) && (_fl < 128)) ? \
73 (((u64) 1) << (_fl - 64)) : (u64) 0)))}
74
75#define DEF_REGDMN FCC1_FCCA
76#define DEF_DMN_5 FCC1
77#define DEF_DMN_2 FCCA
78#define COUNTRY_ERD_FLAG 0x8000 20#define COUNTRY_ERD_FLAG 0x8000
79#define WORLDWIDE_ROAMING_FLAG 0x4000 21#define WORLDWIDE_ROAMING_FLAG 0x4000
80#define SUPER_DOMAIN_MASK 0x0fff
81#define COUNTRY_CODE_MASK 0x3fff
82#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
83#define CHANNEL_14 (2484)
84#define IS_11G_CH14(_ch,_cf) \
85 (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
86
87#define NO_PSCAN 0x0ULL
88#define PSCAN_FCC 0x0000000000000001ULL
89#define PSCAN_FCC_T 0x0000000000000002ULL
90#define PSCAN_ETSI 0x0000000000000004ULL
91#define PSCAN_MKK1 0x0000000000000008ULL
92#define PSCAN_MKK2 0x0000000000000010ULL
93#define PSCAN_MKKA 0x0000000000000020ULL
94#define PSCAN_MKKA_G 0x0000000000000040ULL
95#define PSCAN_ETSIA 0x0000000000000080ULL
96#define PSCAN_ETSIB 0x0000000000000100ULL
97#define PSCAN_ETSIC 0x0000000000000200ULL
98#define PSCAN_WWR 0x0000000000000400ULL
99#define PSCAN_MKKA1 0x0000000000000800ULL
100#define PSCAN_MKKA1_G 0x0000000000001000ULL
101#define PSCAN_MKKA2 0x0000000000002000ULL
102#define PSCAN_MKKA2_G 0x0000000000004000ULL
103#define PSCAN_MKK3 0x0000000000008000ULL
104#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
105#define IS_ECM_CHAN 0x8000000000000000ULL
106 22
107#define isWwrSKU(_ah) \ 23#define isWwrSKU(_ah) \
108 (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \ 24 (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \
109 WORLD_SKU_PREFIX) || \ 25 WORLD_SKU_PREFIX) || \
110 (ath9k_regd_get_eepromRD(_ah) == WORLD)) 26 (ath9k_regd_get_eepromRD(_ah) == WORLD))
111 27
112#define isWwrSKU_NoMidband(_ah) \
113 ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \
114 (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \
115 (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC))
116
117#define isUNII1OddChan(ch) \
118 ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230))
119
120#define IS_HT40_MODE(_mode) \
121 (((_mode == ATH9K_MODE_11NA_HT40PLUS || \
122 _mode == ATH9K_MODE_11NG_HT40PLUS || \
123 _mode == ATH9K_MODE_11NA_HT40MINUS || \
124 _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false))
125
126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
127
128#define swap_array(_a, _b, _size) { \
129 u8 *s = _b; \
130 int i = _size; \
131 do { \
132 u8 tmp = *_a; \
133 *_a++ = *s; \
134 *s++ = tmp; \
135 } while (--i); \
136 _a -= _size; \
137}
138
139
140#define HALF_MAXCHANBW 10
141
142#define MULTI_DOMAIN_MASK 0xFF00 28#define MULTI_DOMAIN_MASK 0xFF00
143 29
144#define WORLD_SKU_MASK 0x00F0 30#define WORLD_SKU_MASK 0x00F0
@@ -147,81 +33,28 @@
147#define CHANNEL_HALF_BW 10 33#define CHANNEL_HALF_BW 10
148#define CHANNEL_QUARTER_BW 5 34#define CHANNEL_QUARTER_BW 5
149 35
150typedef int ath_hal_cmp_t(const void *, const void *);
151
152struct reg_dmn_pair_mapping { 36struct reg_dmn_pair_mapping {
153 u16 regDmnEnum; 37 u16 regDmnEnum;
154 u16 regDmn5GHz; 38 u16 reg_5ghz_ctl;
155 u16 regDmn2GHz; 39 u16 reg_2ghz_ctl;
156 u32 flags5GHz;
157 u32 flags2GHz;
158 u64 pscanMask;
159 u16 singleCC;
160};
161
162struct ccmap {
163 char isoName[3];
164 u16 countryCode;
165}; 40};
166 41
167struct country_code_to_enum_rd { 42struct country_code_to_enum_rd {
168 u16 countryCode; 43 u16 countryCode;
169 u16 regDmnEnum; 44 u16 regDmnEnum;
170 const char *isoName; 45 const char *isoName;
171 const char *name;
172 bool allow11g;
173 bool allow11aTurbo;
174 bool allow11gTurbo;
175 bool allow11ng20;
176 bool allow11ng40;
177 bool allow11na20;
178 bool allow11na40;
179 u16 outdoorChanStart;
180};
181
182struct RegDmnFreqBand {
183 u16 lowChannel;
184 u16 highChannel;
185 u8 powerDfs;
186 u8 antennaMax;
187 u8 channelBW;
188 u8 channelSep;
189 u64 useDfs;
190 u64 usePassScan;
191 u8 regClassId;
192};
193
194struct regDomain {
195 u16 regDmnEnum;
196 u8 conformanceTestLimit;
197 u64 dfsMask;
198 u64 pscan;
199 u32 flags;
200 u64 chan11a[BMLEN];
201 u64 chan11a_turbo[BMLEN];
202 u64 chan11a_dyn_turbo[BMLEN];
203 u64 chan11b[BMLEN];
204 u64 chan11g[BMLEN];
205 u64 chan11g_turbo[BMLEN];
206};
207
208struct cmode {
209 u32 mode;
210 u32 flags;
211};
212
213#define YES true
214#define NO false
215
216struct japan_bandcheck {
217 u16 freqbandbit;
218 u32 eepromflagtocheck;
219}; 46};
220 47
221struct common_mode_power { 48struct ath9k_regulatory {
222 u16 lchan; 49 char alpha2[2];
223 u16 hchan; 50 u16 country_code;
224 u8 pwrlvl; 51 u16 max_power_level;
52 u32 tp_scale;
53 u16 current_rd;
54 u16 current_rd_ext;
55 u16 current_rd_inuse;
56 int16_t power_limit;
57 struct reg_dmn_pair_mapping *regpair;
225}; 58};
226 59
227enum CountryCode { 60enum CountryCode {
@@ -406,7 +239,17 @@ enum CountryCode {
406 CTRY_BELGIUM2 = 5002 239 CTRY_BELGIUM2 = 5002
407}; 240};
408 241
409void ath9k_regd_get_current_country(struct ath_hal *ah, 242u16 ath9k_regd_get_rd(struct ath_hw *ah);
243bool ath9k_is_world_regd(struct ath_hw *ah);
244const struct ieee80211_regdomain *ath9k_world_regdomain(struct ath_hw *ah);
245const struct ieee80211_regdomain *ath9k_default_world_regdomain(void);
246void ath9k_reg_apply_world_flags(struct wiphy *wiphy, enum reg_set_by setby);
247void ath9k_reg_apply_radar_flags(struct wiphy *wiphy);
248int ath9k_regd_init(struct ath_hw *ah);
249bool ath9k_regd_is_eeprom_valid(struct ath_hw *ah);
250u32 ath9k_regd_get_ctl(struct ath_hw *ah, struct ath9k_channel *chan);
251int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
252void ath9k_regd_get_current_country(struct ath_hw *ah,
410 struct ath9k_country_entry *ctry); 253 struct ath9k_country_entry *ctry);
411 254
412#endif 255#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index 6df1b3b77c2..b41d0002f3f 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -150,1766 +150,324 @@ enum EnumRd {
150 MKK9_MKKC = 0xFE, 150 MKK9_MKKC = 0xFE,
151 MKK9_MKKA2 = 0xFF, 151 MKK9_MKKA2 = 0xFF,
152 152
153 APL1 = 0x0150,
154 APL2 = 0x0250,
155 APL3 = 0x0350,
156 APL4 = 0x0450,
157 APL5 = 0x0550,
158 APL6 = 0x0650,
159 APL7 = 0x0750,
160 APL8 = 0x0850,
161 APL9 = 0x0950,
162 APL10 = 0x1050,
163
164 ETSI1 = 0x0130,
165 ETSI2 = 0x0230,
166 ETSI3 = 0x0330,
167 ETSI4 = 0x0430,
168 ETSI5 = 0x0530,
169 ETSI6 = 0x0630,
170 ETSIA = 0x0A30,
171 ETSIB = 0x0B30,
172 ETSIC = 0x0C30,
173
174 FCC1 = 0x0110,
175 FCC2 = 0x0120,
176 FCC3 = 0x0160,
177 FCC4 = 0x0165,
178 FCC5 = 0x0510,
179 FCC6 = 0x0610,
180 FCCA = 0x0A10,
181
182 APLD = 0x0D50,
183
184 MKK1 = 0x0140,
185 MKK2 = 0x0240,
186 MKK3 = 0x0340,
187 MKK4 = 0x0440,
188 MKK5 = 0x0540,
189 MKK6 = 0x0640,
190 MKK7 = 0x0740,
191 MKK8 = 0x0840,
192 MKK9 = 0x0940,
193 MKK10 = 0x0B40,
194 MKK11 = 0x1140,
195 MKK12 = 0x1240,
196 MKK13 = 0x0C40,
197 MKK14 = 0x1440,
198 MKK15 = 0x1540,
199 MKKA = 0x0A40,
200 MKKC = 0x0A50,
201
202 NULL1 = 0x0198,
203 WORLD = 0x0199, 153 WORLD = 0x0199,
204 DEBUG_REG_DMN = 0x01ff, 154 DEBUG_REG_DMN = 0x01ff,
205}; 155};
206 156
207enum { 157enum ctl_group {
208 FCC = 0x10, 158 CTL_FCC = 0x10,
209 MKK = 0x40, 159 CTL_MKK = 0x40,
210 ETSI = 0x30, 160 CTL_ETSI = 0x30,
211};
212
213enum {
214 NO_REQ = 0x00000000,
215 DISALLOW_ADHOC_11A = 0x00000001,
216 DISALLOW_ADHOC_11A_TURB = 0x00000002,
217 NEED_NFC = 0x00000004,
218
219 ADHOC_PER_11D = 0x00000008,
220 ADHOC_NO_11A = 0x00000010,
221
222 PUBLIC_SAFETY_DOMAIN = 0x00000020,
223 LIMIT_FRAME_4MS = 0x00000040,
224
225 NO_HOSTAP = 0x00000080,
226
227 REQ_MASK = 0x000000FF,
228}; 161};
229 162
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ 163/* Regpair to CTL band mapping */
231 (~(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233
234static struct reg_dmn_pair_mapping regDomainPairs[] = { 164static struct reg_dmn_pair_mapping regDomainPairs[] = {
235 {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ, 165 /* regpair, 5 GHz CTL, 2 GHz CTL */
236 PSCAN_DEFER, 0}, 166 {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN},
237 {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 167 {NULL1_WORLD, NO_CTL, CTL_ETSI},
238 {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 168 {NULL1_ETSIB, NO_CTL, CTL_ETSI},
239 {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 169 {NULL1_ETSIC, NO_CTL, CTL_ETSI},
240 170
241 {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 171 {FCC2_FCCA, CTL_FCC, CTL_FCC},
242 {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 172 {FCC2_WORLD, CTL_FCC, CTL_ETSI},
243 {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 173 {FCC2_ETSIC, CTL_FCC, CTL_ETSI},
244 {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 174 {FCC3_FCCA, CTL_FCC, CTL_FCC},
245 {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 175 {FCC3_WORLD, CTL_FCC, CTL_ETSI},
246 {FCC4_FCCA, FCC4, FCCA, 176 {FCC4_FCCA, CTL_FCC, CTL_FCC},
247 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 177 {FCC5_FCCA, CTL_FCC, CTL_FCC},
248 0}, 178 {FCC6_FCCA, CTL_FCC, CTL_FCC},
249 {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 179 {FCC6_WORLD, CTL_FCC, CTL_ETSI},
250 {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 180
251 {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 181 {ETSI1_WORLD, CTL_ETSI, CTL_ETSI},
252 182 {ETSI2_WORLD, CTL_ETSI, CTL_ETSI},
253 {ETSI1_WORLD, ETSI1, WORLD, 183 {ETSI3_WORLD, CTL_ETSI, CTL_ETSI},
254 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 184 {ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
255 0}, 185 {ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
256 {ETSI2_WORLD, ETSI2, WORLD, 186 {ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
257 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 187
258 0}, 188 /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
259 {ETSI3_WORLD, ETSI3, WORLD, 189 {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
260 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 190 {FRANCE_RES, CTL_ETSI, CTL_ETSI},
261 0}, 191
262 {ETSI4_WORLD, ETSI4, WORLD, 192 {FCC1_WORLD, CTL_FCC, CTL_ETSI},
263 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 193 {FCC1_FCCA, CTL_FCC, CTL_FCC},
264 0}, 194 {APL1_WORLD, CTL_FCC, CTL_ETSI},
265 {ETSI5_WORLD, ETSI5, WORLD, 195 {APL2_WORLD, CTL_FCC, CTL_ETSI},
266 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 196 {APL3_WORLD, CTL_FCC, CTL_ETSI},
267 0}, 197 {APL4_WORLD, CTL_FCC, CTL_ETSI},
268 {ETSI6_WORLD, ETSI6, WORLD, 198 {APL5_WORLD, CTL_FCC, CTL_ETSI},
269 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 199 {APL6_WORLD, CTL_ETSI, CTL_ETSI},
270 0}, 200 {APL8_WORLD, CTL_ETSI, CTL_ETSI},
271 201 {APL9_WORLD, CTL_ETSI, CTL_ETSI},
272 {ETSI3_ETSIA, ETSI3, WORLD, 202
273 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 203 {APL3_FCCA, CTL_FCC, CTL_FCC},
274 0}, 204 {APL1_ETSIC, CTL_FCC, CTL_ETSI},
275 {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 205 {APL2_ETSIC, CTL_FCC, CTL_ETSI},
276 206 {APL2_APLD, CTL_FCC, NO_CTL},
277 {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 207
278 {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 208 {MKK1_MKKA, CTL_MKK, CTL_MKK},
279 {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 209 {MKK1_MKKB, CTL_MKK, CTL_MKK},
280 {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 210 {MKK1_FCCA, CTL_MKK, CTL_FCC},
281 {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 211 {MKK1_MKKA1, CTL_MKK, CTL_MKK},
282 {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 212 {MKK1_MKKA2, CTL_MKK, CTL_MKK},
283 {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 213 {MKK1_MKKC, CTL_MKK, CTL_MKK},
284 {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 214
285 {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 215 {MKK2_MKKA, CTL_MKK, CTL_MKK},
286 {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 216 {MKK3_MKKA, CTL_MKK, CTL_MKK},
287 217 {MKK3_MKKB, CTL_MKK, CTL_MKK},
288 {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 218 {MKK3_MKKA1, CTL_MKK, CTL_MKK},
289 {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 219 {MKK3_MKKA2, CTL_MKK, CTL_MKK},
290 {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, 220 {MKK3_MKKC, CTL_MKK, CTL_MKK},
291 {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,}, 221 {MKK3_FCCA, CTL_MKK, CTL_FCC},
292 222
293 {MKK1_MKKA, MKK1, MKKA, 223 {MKK4_MKKA, CTL_MKK, CTL_MKK},
294 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 224 {MKK4_MKKB, CTL_MKK, CTL_MKK},
295 PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN}, 225 {MKK4_MKKA1, CTL_MKK, CTL_MKK},
296 {MKK1_MKKB, MKK1, MKKA, 226 {MKK4_MKKA2, CTL_MKK, CTL_MKK},
297 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | 227 {MKK4_MKKC, CTL_MKK, CTL_MKK},
298 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, 228 {MKK4_FCCA, CTL_MKK, CTL_FCC},
299 CTRY_JAPAN1}, 229
300 {MKK1_FCCA, MKK1, FCCA, 230 {MKK5_MKKB, CTL_MKK, CTL_MKK},
301 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 231 {MKK5_MKKA2, CTL_MKK, CTL_MKK},
302 PSCAN_MKK1, CTRY_JAPAN2}, 232 {MKK5_MKKC, CTL_MKK, CTL_MKK},
303 {MKK1_MKKA1, MKK1, MKKA, 233
304 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 234 {MKK6_MKKB, CTL_MKK, CTL_MKK},
305 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4}, 235 {MKK6_MKKA1, CTL_MKK, CTL_MKK},
306 {MKK1_MKKA2, MKK1, MKKA, 236 {MKK6_MKKA2, CTL_MKK, CTL_MKK},
307 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 237 {MKK6_MKKC, CTL_MKK, CTL_MKK},
308 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5}, 238 {MKK6_FCCA, CTL_MKK, CTL_FCC},
309 {MKK1_MKKC, MKK1, MKKC, 239
310 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 240 {MKK7_MKKB, CTL_MKK, CTL_MKK},
311 PSCAN_MKK1, CTRY_JAPAN6}, 241 {MKK7_MKKA1, CTL_MKK, CTL_MKK},
312 242 {MKK7_MKKA2, CTL_MKK, CTL_MKK},
313 {MKK2_MKKA, MKK2, MKKA, 243 {MKK7_MKKC, CTL_MKK, CTL_MKK},
314 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | 244 {MKK7_FCCA, CTL_MKK, CTL_FCC},
315 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G, 245
316 CTRY_JAPAN3}, 246 {MKK8_MKKB, CTL_MKK, CTL_MKK},
317 247 {MKK8_MKKA2, CTL_MKK, CTL_MKK},
318 {MKK3_MKKA, MKK3, MKKA, 248 {MKK8_MKKC, CTL_MKK, CTL_MKK},
319 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 249
320 PSCAN_MKKA, CTRY_JAPAN25}, 250 {MKK9_MKKA, CTL_MKK, CTL_MKK},
321 {MKK3_MKKB, MKK3, MKKA, 251 {MKK9_FCCA, CTL_MKK, CTL_FCC},
322 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | 252 {MKK9_MKKA1, CTL_MKK, CTL_MKK},
323 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G, 253 {MKK9_MKKA2, CTL_MKK, CTL_MKK},
324 CTRY_JAPAN7}, 254 {MKK9_MKKC, CTL_MKK, CTL_MKK},
325 {MKK3_MKKA1, MKK3, MKKA, 255
326 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 256 {MKK10_MKKA, CTL_MKK, CTL_MKK},
327 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26}, 257 {MKK10_FCCA, CTL_MKK, CTL_FCC},
328 {MKK3_MKKA2, MKK3, MKKA, 258 {MKK10_MKKA1, CTL_MKK, CTL_MKK},
329 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 259 {MKK10_MKKA2, CTL_MKK, CTL_MKK},
330 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8}, 260 {MKK10_MKKC, CTL_MKK, CTL_MKK},
331 {MKK3_MKKC, MKK3, MKKC, 261
332 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 262 {MKK11_MKKA, CTL_MKK, CTL_MKK},
333 NO_PSCAN, CTRY_JAPAN9}, 263 {MKK11_FCCA, CTL_MKK, CTL_FCC},
334 {MKK3_FCCA, MKK3, FCCA, 264 {MKK11_MKKA1, CTL_MKK, CTL_MKK},
335 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 265 {MKK11_MKKA2, CTL_MKK, CTL_MKK},
336 NO_PSCAN, CTRY_JAPAN27}, 266 {MKK11_MKKC, CTL_MKK, CTL_MKK},
337 267
338 {MKK4_MKKA, MKK4, MKKA, 268 {MKK12_MKKA, CTL_MKK, CTL_MKK},
339 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 269 {MKK12_FCCA, CTL_MKK, CTL_FCC},
340 PSCAN_MKK3, CTRY_JAPAN36}, 270 {MKK12_MKKA1, CTL_MKK, CTL_MKK},
341 {MKK4_MKKB, MKK4, MKKA, 271 {MKK12_MKKA2, CTL_MKK, CTL_MKK},
342 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | 272 {MKK12_MKKC, CTL_MKK, CTL_MKK},
343 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, 273
344 CTRY_JAPAN10}, 274 {MKK13_MKKB, CTL_MKK, CTL_MKK},
345 {MKK4_MKKA1, MKK4, MKKA, 275 {MKK14_MKKA1, CTL_MKK, CTL_MKK},
346 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 276 {MKK15_MKKA1, CTL_MKK, CTL_MKK},
347 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28}, 277
348 {MKK4_MKKA2, MKK4, MKKA, 278 {WOR0_WORLD, NO_CTL, NO_CTL},
349 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 279 {WOR1_WORLD, NO_CTL, NO_CTL},
350 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11}, 280 {WOR2_WORLD, NO_CTL, NO_CTL},
351 {MKK4_MKKC, MKK4, MKKC, 281 {WOR3_WORLD, NO_CTL, NO_CTL},
352 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 282 {WOR4_WORLD, NO_CTL, NO_CTL},
353 PSCAN_MKK3, CTRY_JAPAN12}, 283 {WOR5_ETSIC, NO_CTL, NO_CTL},
354 {MKK4_FCCA, MKK4, FCCA, 284 {WOR01_WORLD, NO_CTL, NO_CTL},
355 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, 285 {WOR02_WORLD, NO_CTL, NO_CTL},
356 PSCAN_MKK3, CTRY_JAPAN29}, 286 {EU1_WORLD, NO_CTL, NO_CTL},
357 287 {WOR9_WORLD, NO_CTL, NO_CTL},
358 {MKK5_MKKB, MKK5, MKKA, 288 {WORA_WORLD, NO_CTL, NO_CTL},
359 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | 289 {WORB_WORLD, NO_CTL, NO_CTL},
360 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
361 CTRY_JAPAN13},
362 {MKK5_MKKA2, MKK5, MKKA,
363 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
364 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
365 {MKK5_MKKC, MKK5, MKKC,
366 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
367 PSCAN_MKK3, CTRY_JAPAN15},
368
369 {MKK6_MKKB, MKK6, MKKA,
370 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
371 PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
372 {MKK6_MKKA1, MKK6, MKKA,
373 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
374 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
375 {MKK6_MKKA2, MKK6, MKKA,
376 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
377 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
378 {MKK6_MKKC, MKK6, MKKC,
379 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
380 PSCAN_MKK1, CTRY_JAPAN18},
381 {MKK6_FCCA, MKK6, FCCA,
382 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
383 NO_PSCAN, CTRY_JAPAN31},
384
385 {MKK7_MKKB, MKK7, MKKA,
386 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
387 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
388 CTRY_JAPAN19},
389 {MKK7_MKKA1, MKK7, MKKA,
390 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
391 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
392 {MKK7_MKKA2, MKK7, MKKA,
393 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
394 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
395 CTRY_JAPAN20},
396 {MKK7_MKKC, MKK7, MKKC,
397 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
398 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
399 {MKK7_FCCA, MKK7, FCCA,
400 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
401 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
402
403 {MKK8_MKKB, MKK8, MKKA,
404 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
405 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
406 CTRY_JAPAN22},
407 {MKK8_MKKA2, MKK8, MKKA,
408 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
409 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
410 CTRY_JAPAN23},
411 {MKK8_MKKC, MKK8, MKKC,
412 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
413 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
414
415 {MKK9_MKKA, MKK9, MKKA,
416 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
417 LIMIT_FRAME_4MS, NEED_NFC,
418 PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
419 CTRY_JAPAN34},
420 {MKK9_FCCA, MKK9, FCCA,
421 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
422 NO_PSCAN, CTRY_JAPAN37},
423 {MKK9_MKKA1, MKK9, MKKA,
424 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
425 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
426 {MKK9_MKKA2, MKK9, MKKA,
427 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
428 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
429 {MKK9_MKKC, MKK9, MKKC,
430 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
431 NO_PSCAN, CTRY_JAPAN39},
432
433 {MKK10_MKKA, MKK10, MKKA,
434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
435 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35},
436 {MKK10_FCCA, MKK10, FCCA,
437 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
438 NO_PSCAN, CTRY_JAPAN41},
439 {MKK10_MKKA1, MKK10, MKKA,
440 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
441 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
442 {MKK10_MKKA2, MKK10, MKKA,
443 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
444 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
445 {MKK10_MKKC, MKK10, MKKC,
446 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
447 NO_PSCAN, CTRY_JAPAN43},
448
449 {MKK11_MKKA, MKK11, MKKA,
450 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
451 PSCAN_MKK3, CTRY_JAPAN45},
452 {MKK11_FCCA, MKK11, FCCA,
453 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
454 PSCAN_MKK3, CTRY_JAPAN46},
455 {MKK11_MKKA1, MKK11, MKKA,
456 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
457 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
458 {MKK11_MKKA2, MKK11, MKKA,
459 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
460 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
461 {MKK11_MKKC, MKK11, MKKC,
462 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
463 PSCAN_MKK3, CTRY_JAPAN48},
464
465 {MKK12_MKKA, MKK12, MKKA,
466 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
467 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
468 {MKK12_FCCA, MKK12, FCCA,
469 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
470 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
471 {MKK12_MKKA1, MKK12, MKKA,
472 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
473 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G,
474 CTRY_JAPAN52},
475 {MKK12_MKKA2, MKK12, MKKA,
476 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
477 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
478 CTRY_JAPAN54},
479 {MKK12_MKKC, MKK12, MKKC,
480 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
481 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
482
483 {MKK13_MKKB, MKK13, MKKA,
484 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
485 LIMIT_FRAME_4MS, NEED_NFC,
486 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
487 CTRY_JAPAN57},
488
489 {MKK14_MKKA1, MKK14, MKKA,
490 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
491 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
492 {MKK15_MKKA1, MKK15, MKKA,
493 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
494 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
495
496 {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
497 0},
498 {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
499 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
500 0},
501 {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB,
502 NO_REQ, PSCAN_DEFER, 0},
503 {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
504 0},
505 {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
506 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
507 0},
508 {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
509 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
510 0},
511 {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ,
512 PSCAN_DEFER, 0},
513 {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ,
514 PSCAN_DEFER, 0},
515 {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
516 {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
517 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
518 0},
519 {WORA_WORLD, WORA_WORLD, WORA_WORLD,
520 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
521 0},
522 {WORB_WORLD, WORB_WORLD, WORB_WORLD,
523 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
524 0},
525}; 290};
526 291
527#define NO_INTERSECT_REQ 0xFFFFFFFF
528#define NO_UNION_REQ 0
529
530static struct country_code_to_enum_rd allCountries[] = { 292static struct country_code_to_enum_rd allCountries[] = {
531 {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES, 293 {CTRY_DEBUG, NO_ENUMRD, "DB"},
532 YES, YES, 7000}, 294 {CTRY_DEFAULT, FCC1_FCCA, "CO"},
533 {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES, 295 {CTRY_ALBANIA, NULL1_WORLD, "AL"},
534 YES, YES, YES, YES, 7000}, 296 {CTRY_ALGERIA, NULL1_WORLD, "DZ"},
535 {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO, 297 {CTRY_ARGENTINA, APL3_WORLD, "AR"},
536 NO, NO, 7000}, 298 {CTRY_ARMENIA, ETSI4_WORLD, "AM"},
537 {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO, 299 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
538 NO, NO, 7000}, 300 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
539 {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES, 301 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
540 NO, YES, NO, 7000}, 302 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
541 {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES, 303 {CTRY_BAHRAIN, APL6_WORLD, "BH"},
542 YES, NO, NO, 7000}, 304 {CTRY_BELARUS, ETSI1_WORLD, "BY"},
543 {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES, 305 {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
544 YES, YES, YES, 7000}, 306 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
545 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES, 307 {CTRY_BELIZE, APL1_ETSIC, "BZ"},
546 YES, YES, YES, YES, 7000}, 308 {CTRY_BOLIVIA, APL1_ETSIC, "BO"},
547 {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES, 309 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
548 YES, YES, YES, 7000}, 310 {CTRY_BRAZIL, FCC3_WORLD, "BR"},
549 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES, 311 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
550 YES, YES, YES, YES, 7000}, 312 {CTRY_BULGARIA, ETSI6_WORLD, "BG"},
551 {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES, 313 {CTRY_CANADA, FCC2_FCCA, "CA"},
552 YES, NO, 7000}, 314 {CTRY_CANADA2, FCC6_FCCA, "CA"},
553 {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES, 315 {CTRY_CHILE, APL6_WORLD, "CL"},
554 YES, YES, YES, 7000}, 316 {CTRY_CHINA, APL1_WORLD, "CN"},
555 {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES, 317 {CTRY_COLOMBIA, FCC1_FCCA, "CO"},
556 YES, YES, YES, 7000}, 318 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
557 {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES, 319 {CTRY_CROATIA, ETSI3_WORLD, "HR"},
558 YES, YES, YES, 7000}, 320 {CTRY_CYPRUS, ETSI1_WORLD, "CY"},
559 {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES, 321 {CTRY_CZECH, ETSI3_WORLD, "CZ"},
560 YES, YES, 7000}, 322 {CTRY_DENMARK, ETSI1_WORLD, "DK"},
561 {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES, 323 {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO"},
562 YES, YES, 7000}, 324 {CTRY_ECUADOR, FCC1_WORLD, "EC"},
563 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO, 325 {CTRY_EGYPT, ETSI3_WORLD, "EG"},
564 YES, YES, YES, YES, NO, 7000}, 326 {CTRY_EL_SALVADOR, FCC1_WORLD, "SV"},
565 {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO, 327 {CTRY_ESTONIA, ETSI1_WORLD, "EE"},
566 YES, NO, 7000}, 328 {CTRY_FINLAND, ETSI1_WORLD, "FI"},
567 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM", 329 {CTRY_FRANCE, ETSI1_WORLD, "FR"},
568 YES, YES, YES, YES, YES, YES, YES, 7000}, 330 {CTRY_GEORGIA, ETSI4_WORLD, "GE"},
569 {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES, 331 {CTRY_GERMANY, ETSI1_WORLD, "DE"},
570 YES, YES, YES, 7000}, 332 {CTRY_GREECE, ETSI1_WORLD, "GR"},
571 {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES, 333 {CTRY_GUATEMALA, FCC1_FCCA, "GT"},
572 YES, YES, 7000}, 334 {CTRY_HONDURAS, NULL1_WORLD, "HN"},
573 {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES, 335 {CTRY_HONG_KONG, FCC2_WORLD, "HK"},
574 YES, YES, 7000}, 336 {CTRY_HUNGARY, ETSI1_WORLD, "HU"},
575 {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES, 337 {CTRY_ICELAND, ETSI1_WORLD, "IS"},
576 YES, YES, 7000}, 338 {CTRY_INDIA, APL6_WORLD, "IN"},
577 {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES, 339 {CTRY_INDONESIA, APL1_WORLD, "ID"},
578 YES, YES, 7000}, 340 {CTRY_IRAN, APL1_WORLD, "IR"},
579 {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES, 341 {CTRY_IRELAND, ETSI1_WORLD, "IE"},
580 YES, YES, NO, 7000}, 342 {CTRY_ISRAEL, NULL1_WORLD, "IL"},
581 {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES, 343 {CTRY_ITALY, ETSI1_WORLD, "IT"},
582 YES, YES, YES, NO, 7000}, 344 {CTRY_JAMAICA, ETSI1_WORLD, "JM"},
583 {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES, 345
584 YES, YES, NO, 7000}, 346 {CTRY_JAPAN, MKK1_MKKA, "JP"},
585 {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES, 347 {CTRY_JAPAN1, MKK1_MKKB, "JP"},
586 YES, YES, 7000}, 348 {CTRY_JAPAN2, MKK1_FCCA, "JP"},
587 {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES, 349 {CTRY_JAPAN3, MKK2_MKKA, "JP"},
588 YES, YES, YES, YES, 7000}, 350 {CTRY_JAPAN4, MKK1_MKKA1, "JP"},
589 {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES, 351 {CTRY_JAPAN5, MKK1_MKKA2, "JP"},
590 YES, YES, YES, 7000}, 352 {CTRY_JAPAN6, MKK1_MKKC, "JP"},
591 {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC", 353 {CTRY_JAPAN7, MKK3_MKKB, "JP"},
592 YES, YES, YES, YES, YES, YES, YES, 7000}, 354 {CTRY_JAPAN8, MKK3_MKKA2, "JP"},
593 {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES, 355 {CTRY_JAPAN9, MKK3_MKKC, "JP"},
594 YES, NO, 7000}, 356 {CTRY_JAPAN10, MKK4_MKKB, "JP"},
595 {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES, 357 {CTRY_JAPAN11, MKK4_MKKA2, "JP"},
596 YES, NO, 7000}, 358 {CTRY_JAPAN12, MKK4_MKKC, "JP"},
597 {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES, 359 {CTRY_JAPAN13, MKK5_MKKB, "JP"},
598 YES, YES, YES, NO, 7000}, 360 {CTRY_JAPAN14, MKK5_MKKA2, "JP"},
599 {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES, 361 {CTRY_JAPAN15, MKK5_MKKC, "JP"},
600 YES, YES, YES, 7000}, 362 {CTRY_JAPAN16, MKK6_MKKB, "JP"},
601 {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES, 363 {CTRY_JAPAN17, MKK6_MKKA2, "JP"},
602 YES, YES, YES, 7000}, 364 {CTRY_JAPAN18, MKK6_MKKC, "JP"},
603 {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES, 365 {CTRY_JAPAN19, MKK7_MKKB, "JP"},
604 YES, YES, 7000}, 366 {CTRY_JAPAN20, MKK7_MKKA2, "JP"},
605 {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES, 367 {CTRY_JAPAN21, MKK7_MKKC, "JP"},
606 YES, YES, YES, 7000}, 368 {CTRY_JAPAN22, MKK8_MKKB, "JP"},
607 {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES, 369 {CTRY_JAPAN23, MKK8_MKKA2, "JP"},
608 YES, YES, YES, 7000}, 370 {CTRY_JAPAN24, MKK8_MKKC, "JP"},
609 {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES, 371 {CTRY_JAPAN25, MKK3_MKKA, "JP"},
610 YES, YES, 7000}, 372 {CTRY_JAPAN26, MKK3_MKKA1, "JP"},
611 {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES, 373 {CTRY_JAPAN27, MKK3_FCCA, "JP"},
612 YES, YES, YES, 7000}, 374 {CTRY_JAPAN28, MKK4_MKKA1, "JP"},
613 {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES, 375 {CTRY_JAPAN29, MKK4_FCCA, "JP"},
614 YES, NO, NO, 7000}, 376 {CTRY_JAPAN30, MKK6_MKKA1, "JP"},
615 {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES, 377 {CTRY_JAPAN31, MKK6_FCCA, "JP"},
616 YES, YES, YES, 7000}, 378 {CTRY_JAPAN32, MKK7_MKKA1, "JP"},
617 {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES, 379 {CTRY_JAPAN33, MKK7_FCCA, "JP"},
618 YES, YES, YES, 7000}, 380 {CTRY_JAPAN34, MKK9_MKKA, "JP"},
619 {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES, 381 {CTRY_JAPAN35, MKK10_MKKA, "JP"},
620 YES, YES, YES, 7000}, 382 {CTRY_JAPAN36, MKK4_MKKA, "JP"},
621 {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES, 383 {CTRY_JAPAN37, MKK9_FCCA, "JP"},
622 YES, NO, 7000}, 384 {CTRY_JAPAN38, MKK9_MKKA1, "JP"},
623 {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES, 385 {CTRY_JAPAN39, MKK9_MKKC, "JP"},
624 YES, YES, NO, 7000}, 386 {CTRY_JAPAN40, MKK9_MKKA2, "JP"},
625 {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES, 387 {CTRY_JAPAN41, MKK10_FCCA, "JP"},
626 YES, 7000}, 388 {CTRY_JAPAN42, MKK10_MKKA1, "JP"},
627 {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES, 389 {CTRY_JAPAN43, MKK10_MKKC, "JP"},
628 YES, YES, YES, 7000}, 390 {CTRY_JAPAN44, MKK10_MKKA2, "JP"},
629 {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES, 391 {CTRY_JAPAN45, MKK11_MKKA, "JP"},
630 NO, NO, 7000}, 392 {CTRY_JAPAN46, MKK11_FCCA, "JP"},
631 {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES, 393 {CTRY_JAPAN47, MKK11_MKKA1, "JP"},
632 YES, YES, 7000}, 394 {CTRY_JAPAN48, MKK11_MKKC, "JP"},
633 {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES, 395 {CTRY_JAPAN49, MKK11_MKKA2, "JP"},
634 YES, YES, YES, 7000}, 396 {CTRY_JAPAN50, MKK12_MKKA, "JP"},
635 397 {CTRY_JAPAN51, MKK12_FCCA, "JP"},
636 {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES, 398 {CTRY_JAPAN52, MKK12_MKKA1, "JP"},
637 YES, 7000}, 399 {CTRY_JAPAN53, MKK12_MKKC, "JP"},
638 {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES, 400 {CTRY_JAPAN54, MKK12_MKKA2, "JP"},
639 YES, YES, 7000}, 401 {CTRY_JAPAN57, MKK13_MKKB, "JP"},
640 {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES, 402 {CTRY_JAPAN58, MKK14_MKKA1, "JP"},
641 YES, YES, 7000}, 403 {CTRY_JAPAN59, MKK15_MKKA1, "JP"},
642 {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES, 404
643 YES, YES, 7000}, 405 {CTRY_JORDAN, ETSI2_WORLD, "JO"},
644 {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES, 406 {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ"},
645 YES, YES, 7000}, 407 {CTRY_KOREA_NORTH, APL9_WORLD, "KP"},
646 {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES, 408 {CTRY_KOREA_ROC, APL9_WORLD, "KR"},
647 YES, YES, 7000}, 409 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
648 {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES, 410 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
649 YES, YES, 7000}, 411 {CTRY_KUWAIT, NULL1_WORLD, "KW"},
650 412 {CTRY_LATVIA, ETSI1_WORLD, "LV"},
651 {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES, 413 {CTRY_LEBANON, NULL1_WORLD, "LB"},
652 YES, YES, 7000}, 414 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
653 {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES, 415 {CTRY_LITHUANIA, ETSI1_WORLD, "LT"},
654 YES, YES, 7000}, 416 {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"},
655 {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES, 417 {CTRY_MACAU, FCC2_WORLD, "MO"},
656 YES, YES, 7000}, 418 {CTRY_MACEDONIA, NULL1_WORLD, "MK"},
657 419 {CTRY_MALAYSIA, APL8_WORLD, "MY"},
658 {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES, 420 {CTRY_MALTA, ETSI1_WORLD, "MT"},
659 YES, YES, 7000}, 421 {CTRY_MEXICO, FCC1_FCCA, "MX"},
660 {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES, 422 {CTRY_MONACO, ETSI4_WORLD, "MC"},
661 YES, YES, 7000}, 423 {CTRY_MOROCCO, NULL1_WORLD, "MA"},
662 {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES, 424 {CTRY_NEPAL, APL1_WORLD, "NP"},
663 YES, YES, 7000}, 425 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
664 426 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
665 {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES, 427 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
666 YES, YES, 7000}, 428 {CTRY_NORWAY, ETSI1_WORLD, "NO"},
667 {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES, 429 {CTRY_OMAN, APL6_WORLD, "OM"},
668 YES, YES, 7000}, 430 {CTRY_PAKISTAN, NULL1_WORLD, "PK"},
669 {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES, 431 {CTRY_PANAMA, FCC1_FCCA, "PA"},
670 YES, YES, 7000}, 432 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
671 433 {CTRY_PERU, APL1_WORLD, "PE"},
672 {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES, 434 {CTRY_PHILIPPINES, APL1_WORLD, "PH"},
673 YES, YES, 7000}, 435 {CTRY_POLAND, ETSI1_WORLD, "PL"},
674 {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES, 436 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
675 YES, YES, 7000}, 437 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
676 {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES, 438 {CTRY_QATAR, NULL1_WORLD, "QA"},
677 YES, YES, 7000}, 439 {CTRY_ROMANIA, NULL1_WORLD, "RO"},
678 440 {CTRY_RUSSIA, NULL1_WORLD, "RU"},
679 {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES, 441 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
680 YES, YES, 7000}, 442 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
681 {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES, 443 {CTRY_SINGAPORE, APL6_WORLD, "SG"},
682 YES, YES, 7000}, 444 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
683 {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES, 445 {CTRY_SLOVENIA, ETSI1_WORLD, "SI"},
684 YES, YES, 7000}, 446 {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA"},
685 447 {CTRY_SPAIN, ETSI1_WORLD, "ES"},
686 {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES, 448 {CTRY_SRI_LANKA, FCC3_WORLD, "LK"},
687 YES, YES, 7000}, 449 {CTRY_SWEDEN, ETSI1_WORLD, "SE"},
688 {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES, 450 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
689 YES, YES, 7000}, 451 {CTRY_SYRIA, NULL1_WORLD, "SY"},
690 {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES, 452 {CTRY_TAIWAN, APL3_FCCA, "TW"},
691 YES, YES, 7000}, 453 {CTRY_THAILAND, NULL1_WORLD, "TH"},
692 454 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"},
693 {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES, 455 {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
694 YES, YES, 7000}, 456 {CTRY_TURKEY, ETSI3_WORLD, "TR"},
695 {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES, 457 {CTRY_UKRAINE, NULL1_WORLD, "UA"},
696 YES, YES, 7000}, 458 {CTRY_UAE, NULL1_WORLD, "AE"},
697 {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES, 459 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
698 YES, YES, 7000}, 460 {CTRY_UNITED_STATES, FCC3_FCCA, "US"},
699 {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES, 461 /* This "PS" is for US public safety actually... to support this we
700 YES, YES, 7000}, 462 * would need to assign new special alpha2 to CRDA db as with the world
701 {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES, 463 * regdomain and use another alpha2 */
702 YES, YES, 7000}, 464 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
703 {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES, 465 {CTRY_URUGUAY, APL2_WORLD, "UY"},
704 YES, YES, 7000}, 466 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
705 {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES, 467 {CTRY_VENEZUELA, APL2_ETSIC, "VE"},
706 YES, YES, 7000}, 468 {CTRY_VIET_NAM, NULL1_WORLD, "VN"},
707 {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES, 469 {CTRY_YEMEN, NULL1_WORLD, "YE"},
708 YES, YES, 7000}, 470 {CTRY_ZIMBABWE, NULL1_WORLD, "ZW"},
709 {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES,
710 YES, YES, 7000},
711 {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES,
712 YES, YES, 7000},
713 {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES,
714 YES, YES, 7000},
715 {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES,
716 YES, YES, 7000},
717 {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES,
718 YES, YES, 7000},
719 {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES,
720 YES, YES, 7000},
721 {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES,
722 YES, YES, 7000},
723 {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES,
724 YES, YES, 7000},
725 {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES,
726 YES, YES, 7000},
727 {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES,
728 YES, YES, 7000},
729 {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES,
730 YES, YES, 7000},
731 {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES,
732 YES, YES, 7000},
733 {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES,
734 YES, YES, 7000},
735 {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES,
736 YES, YES, 7000},
737 {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES,
738 YES, YES, 7000},
739 {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES,
740 YES, YES, 7000},
741 {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES,
742 YES, YES, 7000},
743 {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES,
744 YES, YES, 7000},
745 {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES,
746 YES, YES, 7000},
747 {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES,
748 YES, YES, 7000},
749 {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES,
750 YES, YES, 7000},
751 {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES,
752 YES, YES, 7000},
753
754 {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES,
755 YES, YES, 7000},
756 {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES,
757 YES, YES, 7000},
758 {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES,
759 YES, YES, 7000},
760
761 {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES,
762 YES, NO, 7000},
763 {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES,
764 YES, YES, NO, NO, 7000},
765 {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO,
766 YES, YES, YES, YES, 7000},
767 {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO,
768 YES, NO, YES, NO, 7000},
769 {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO,
770 YES, NO, YES, NO, 7000},
771 {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO,
772 YES, NO, YES, NO, 7000},
773 {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES,
774 NO, NO, 7000},
775 {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES,
776 YES, YES, 7000},
777 {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES,
778 YES, NO, NO, 7000},
779 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO,
780 YES, YES, YES, YES, YES, 7000},
781 {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES,
782 YES, YES, YES, 7000},
783 {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES,
784 YES, YES, YES, YES, 7000},
785 {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES,
786 YES, YES, 7000},
787 {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES,
788 YES, NO, NO, 7000},
789 {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO,
790 YES, NO, 7000},
791 {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES,
792 YES, YES, 7000},
793 {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES,
794 YES, YES, 7000},
795 {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES,
796 YES, YES, 7000},
797 {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES,
798 YES, NO, NO, 7000},
799 {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES,
800 YES, YES, 7000},
801 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES,
802 YES, YES, YES, YES, 7000},
803 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN",
804 "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000},
805 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES,
806 YES, YES, YES, NO, 7000},
807 {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES,
808 YES, YES, 7000},
809 {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES,
810 NO, 7000},
811 {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES,
812 YES, NO, NO, 7000},
813 {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES,
814 YES, YES, 7000},
815 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES,
816 YES, YES, YES, YES, YES, YES, 7000},
817 {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES,
818 NO, 7000},
819 {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES,
820 YES, YES, YES, YES, 7000},
821 {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES,
822 YES, YES, 7000},
823 {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES,
824 YES, YES, YES, 7000},
825 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES,
826 YES, YES, YES, YES, 7000},
827 {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES,
828 NO, NO, 7000},
829 {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES,
830 YES, NO, NO, 7000},
831 {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES,
832 NO, NO, 7000},
833 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO,
834 YES, YES, YES, NO, NO, 7000},
835 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO",
836 YES, NO, YES, YES, YES, YES, YES, 7000},
837 {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES,
838 YES, YES, YES, 7000},
839 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES,
840 YES, YES, YES, YES, 7000},
841 {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES,
842 YES, YES, YES, 7000},
843 {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES,
844 YES, YES, YES, NO, 7000},
845 {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES,
846 YES, YES, 7000},
847 {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES,
848 YES, YES, NO, 7000},
849 {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES,
850 YES, YES, 7000},
851 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES,
852 YES, YES, YES, YES, 7000},
853 {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES,
854 NO, NO, 7000},
855 {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES,
856 YES, YES, 7000},
857 {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES,
858 YES, NO, NO, 7000},
859 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO",
860 YES, NO, YES, YES, YES, YES, NO, 7000},
861 {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES,
862 YES, YES, NO, 7000},
863 {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES,
864 YES, NO, 7000},
865 {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES,
866 YES, NO, NO, 7000},
867 {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES,
868 YES, YES, NO, NO, 7000},
869 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO,
870 YES, YES, YES, YES, YES, 7000},
871 {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES,
872 YES, YES, YES, YES, YES, 5825},
873 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
874 "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES,
875 YES, 7000},
876 {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES,
877 YES, NO, 7000},
878 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES,
879 YES, YES, YES, YES, 7000},
880 {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES,
881 YES, YES, NO, 7000},
882 {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES,
883 YES, NO, NO, 7000},
884 {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES,
885 NO, NO, 7000},
886 {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES,
887 YES, NO, NO, 7000}
888};
889
890enum {
891 NO_DFS = 0x0000000000000000ULL,
892 DFS_FCC3 = 0x0000000000000001ULL,
893 DFS_ETSI = 0x0000000000000002ULL,
894 DFS_MKK4 = 0x0000000000000004ULL,
895};
896
897enum {
898 F1_4915_4925,
899 F1_4935_4945,
900 F1_4920_4980,
901 F1_4942_4987,
902 F1_4945_4985,
903 F1_4950_4980,
904 F1_5035_5040,
905 F1_5040_5080,
906 F1_5055_5055,
907
908 F1_5120_5240,
909
910 F1_5170_5230,
911 F2_5170_5230,
912
913 F1_5180_5240,
914 F2_5180_5240,
915 F3_5180_5240,
916 F4_5180_5240,
917 F5_5180_5240,
918 F6_5180_5240,
919 F7_5180_5240,
920 F8_5180_5240,
921
922 F1_5180_5320,
923
924 F1_5240_5280,
925
926 F1_5260_5280,
927
928 F1_5260_5320,
929 F2_5260_5320,
930 F3_5260_5320,
931 F4_5260_5320,
932 F5_5260_5320,
933 F6_5260_5320,
934
935 F1_5260_5700,
936
937 F1_5280_5320,
938
939 F1_5500_5580,
940
941 F1_5500_5620,
942
943 F1_5500_5700,
944 F2_5500_5700,
945 F3_5500_5700,
946 F4_5500_5700,
947 F5_5500_5700,
948
949 F1_5660_5700,
950
951 F1_5745_5805,
952 F2_5745_5805,
953 F3_5745_5805,
954
955 F1_5745_5825,
956 F2_5745_5825,
957 F3_5745_5825,
958 F4_5745_5825,
959 F5_5745_5825,
960 F6_5745_5825,
961
962 W1_4920_4980,
963 W1_5040_5080,
964 W1_5170_5230,
965 W1_5180_5240,
966 W1_5260_5320,
967 W1_5745_5825,
968 W1_5500_5700,
969 A_DEMO_ALL_CHANNELS
970};
971
972static struct RegDmnFreqBand regDmn5GhzFreq[] = {
973 {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
974 {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
975 {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},
976 {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},
977 {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},
978 {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},
979 {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
980 {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},
981 {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
982
983 {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
984
985 {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
986 {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
987
988 {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
989 {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},
990 {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
991 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
992 {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
993 {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0},
994 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},
995 {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
996
997 {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
998
999 {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},
1000
1001 {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1002 PSCAN_FCC | PSCAN_ETSI, 0},
1003
1004 {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1005 PSCAN_FCC | PSCAN_ETSI, 0},
1006
1007 {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1008 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
1009
1010
1011 {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI,
1012 PSCAN_FCC | PSCAN_ETSI, 2},
1013 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2},
1014 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1015 {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1016
1017 {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},
1018
1019 {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1020
1021 {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1022
1023 {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1024
1025 {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4},
1026 {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1027 PSCAN_FCC | PSCAN_ETSI, 0},
1028 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1029 PSCAN_FCC | PSCAN_ETSI, 0},
1030 {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1031 PSCAN_MKK3 | PSCAN_FCC, 0},
1032 {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1033
1034 {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1035
1036 {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1037 {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1038 {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
1039 {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1040 {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1041 {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1042 {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1043 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},
1044 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1045
1046
1047 {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1048 {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1049 {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1050 {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1051 {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1052 {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1053 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1054 {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1055};
1056
1057enum {
1058 T1_5130_5650,
1059 T1_5150_5670,
1060
1061 T1_5200_5200,
1062 T2_5200_5200,
1063 T3_5200_5200,
1064 T4_5200_5200,
1065 T5_5200_5200,
1066 T6_5200_5200,
1067 T7_5200_5200,
1068 T8_5200_5200,
1069
1070 T1_5200_5280,
1071 T2_5200_5280,
1072 T3_5200_5280,
1073 T4_5200_5280,
1074 T5_5200_5280,
1075 T6_5200_5280,
1076
1077 T1_5200_5240,
1078 T1_5210_5210,
1079 T2_5210_5210,
1080 T3_5210_5210,
1081 T4_5210_5210,
1082 T5_5210_5210,
1083 T6_5210_5210,
1084 T7_5210_5210,
1085 T8_5210_5210,
1086 T9_5210_5210,
1087 T10_5210_5210,
1088 T1_5240_5240,
1089
1090 T1_5210_5250,
1091 T1_5210_5290,
1092 T2_5210_5290,
1093 T3_5210_5290,
1094
1095 T1_5280_5280,
1096 T2_5280_5280,
1097 T1_5290_5290,
1098 T2_5290_5290,
1099 T3_5290_5290,
1100 T1_5250_5290,
1101 T2_5250_5290,
1102 T3_5250_5290,
1103 T4_5250_5290,
1104
1105 T1_5540_5660,
1106 T2_5540_5660,
1107 T3_5540_5660,
1108 T1_5760_5800,
1109 T2_5760_5800,
1110 T3_5760_5800,
1111 T4_5760_5800,
1112 T5_5760_5800,
1113 T6_5760_5800,
1114 T7_5760_5800,
1115
1116 T1_5765_5805,
1117 T2_5765_5805,
1118 T3_5765_5805,
1119 T4_5765_5805,
1120 T5_5765_5805,
1121 T6_5765_5805,
1122 T7_5765_5805,
1123 T8_5765_5805,
1124 T9_5765_5805,
1125
1126 WT1_5210_5250,
1127 WT1_5290_5290,
1128 WT1_5540_5660,
1129 WT1_5760_5800,
1130};
1131
1132enum {
1133 F1_2312_2372,
1134 F2_2312_2372,
1135
1136 F1_2412_2472,
1137 F2_2412_2472,
1138 F3_2412_2472,
1139
1140 F1_2412_2462,
1141 F2_2412_2462,
1142
1143 F1_2432_2442,
1144
1145 F1_2457_2472,
1146
1147 F1_2467_2472,
1148
1149 F1_2484_2484,
1150 F2_2484_2484,
1151
1152 F1_2512_2732,
1153
1154 W1_2312_2372,
1155 W1_2412_2412,
1156 W1_2417_2432,
1157 W1_2437_2442,
1158 W1_2447_2457,
1159 W1_2462_2462,
1160 W1_2467_2467,
1161 W2_2467_2467,
1162 W1_2472_2472,
1163 W2_2472_2472,
1164 W1_2484_2484,
1165 W2_2484_2484,
1166};
1167
1168static struct RegDmnFreqBand regDmn2GhzFreq[] = {
1169 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1170 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1171
1172 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1173 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1174 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1175
1176 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1177 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1178
1179 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1180
1181 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1182
1183 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1184
1185 {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1186 {2484, 2484, 20, 0, 20, 5, NO_DFS,
1187 PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0},
1188
1189 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1190
1191 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1192 {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1193 {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1194 {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1195 {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1196 {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1197 {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1198 {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1199 {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1200 {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1201 {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1202 {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1203};
1204
1205enum {
1206 G1_2312_2372,
1207 G2_2312_2372,
1208
1209 G1_2412_2472,
1210 G2_2412_2472,
1211 G3_2412_2472,
1212
1213 G1_2412_2462,
1214 G2_2412_2462,
1215
1216 G1_2432_2442,
1217
1218 G1_2457_2472,
1219
1220 G1_2512_2732,
1221
1222 G1_2467_2472,
1223
1224 WG1_2312_2372,
1225 WG1_2412_2462,
1226 WG1_2467_2472,
1227 WG2_2467_2472,
1228 G_DEMO_ALL_CHANNELS
1229};
1230
1231static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = {
1232 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1233 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1234
1235 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1236 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1237 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1238
1239 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1240 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1241
1242 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1243
1244 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1245
1246 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1247
1248 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1249
1250 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1251 {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1252 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1253 {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1254 {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1255};
1256
1257enum {
1258 T1_2312_2372,
1259 T1_2437_2437,
1260 T2_2437_2437,
1261 T3_2437_2437,
1262 T1_2512_2732
1263}; 471};
1264 472
1265static struct regDomain regDomains[] = {
1266
1267 {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
1268 BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1269 -1),
1270 BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1271 -1),
1272 BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1,
1273 -1, -1, -1, -1, -1, -1),
1274 BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1,
1275 -1, -1, -1, -1, -1, -1),
1276 BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1277 -1),
1278 BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1,
1279 -1, -1, -1, -1)},
1280
1281 {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1282 BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1283 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1284 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1285 BMZERO,
1286 BMZERO,
1287 BMZERO},
1288
1289 {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1290 BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1291 BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1292 BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1293 BMZERO,
1294 BMZERO,
1295 BMZERO},
1296
1297 {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1298 BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1299 -1),
1300 BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1301 -1),
1302 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1303 BMZERO,
1304 BMZERO,
1305 BMZERO},
1306
1307 {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1308 BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1309 -1),
1310 BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1311 -1),
1312 BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1313 -1),
1314 BMZERO,
1315 BMZERO,
1316 BMZERO},
1317
1318 {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1319 BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1320 BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1321 BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1322 BMZERO,
1323 BMZERO,
1324 BMZERO},
1325
1326 {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
1327 BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1,
1328 -1, -1, -1, -1),
1329 BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1,
1330 -1, -1, -1, -1),
1331 BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1332 -1),
1333 BMZERO,
1334 BMZERO,
1335 BMZERO},
1336
1337 {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ,
1338 BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1339 -1, -1, -1, -1),
1340 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1341 -1),
1342 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1343 -1),
1344 BMZERO,
1345 BMZERO,
1346 BMZERO},
1347
1348 {APL8, ETSI, NO_DFS, NO_PSCAN,
1349 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1350 BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1351 -1),
1352 BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1353 -1),
1354 BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1355 -1),
1356 BMZERO,
1357 BMZERO,
1358 BMZERO},
1359
1360 {APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
1361 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1362 BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1,
1363 -1, -1, -1, -1),
1364 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1365 -1),
1366 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1367 -1),
1368 BMZERO,
1369 BMZERO,
1370 BMZERO},
1371
1372 {APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
1373 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1374 BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1375 -1, -1, -1, -1),
1376 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1377 -1),
1378 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1379 -1),
1380 BMZERO,
1381 BMZERO,
1382 BMZERO},
1383
1384 {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
1385 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1386 BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1,
1387 -1, -1, -1, -1),
1388 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1389 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1390 -1),
1391 BMZERO,
1392 BMZERO,
1393 BMZERO},
1394
1395 {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
1396 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1397 BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1398 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1399 BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1400 BMZERO,
1401 BMZERO,
1402 BMZERO},
1403
1404 {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
1405 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1406 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1407 -1),
1408 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1409 BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1410 BMZERO,
1411 BMZERO,
1412 BMZERO},
1413
1414 {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
1415 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1416 BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1417 -1),
1418 BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1419 BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1420 BMZERO,
1421 BMZERO,
1422 BMZERO},
1423
1424 {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
1425 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1426 BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1427 BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1428 BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1429 BMZERO,
1430 BMZERO,
1431 BMZERO},
1432
1433 {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
1434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1435 BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1,
1436 -1, -1, -1, -1),
1437 BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1438 BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1439 BMZERO,
1440 BMZERO,
1441 BMZERO},
1442
1443 {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1444 BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1,
1445 -1, -1, -1, -1),
1446 BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1,
1447 -1, -1, -1, -1),
1448 BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1,
1449 -1, -1, -1, -1),
1450 BMZERO,
1451 BMZERO,
1452 BMZERO},
1453
1454 {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1455 BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1,
1456 -1, -1, -1, -1),
1457 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1458 -1, -1, -1, -1),
1459 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1460 -1, -1, -1, -1, -1, -1),
1461 BMZERO,
1462 BMZERO,
1463 BMZERO},
1464
1465 {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1466 BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1,
1467 -1, -1, -1, -1, -1, -1),
1468 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1469 -1),
1470 BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1471 -1),
1472 BMZERO,
1473 BMZERO,
1474 BMZERO},
1475
1476 {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1477 BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1,
1478 -1, -1, -1, -1),
1479 BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1,
1480 -1, -1, -1, -1),
1481 BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1,
1482 -1, -1, -1, -1),
1483 BMZERO,
1484 BMZERO,
1485 BMZERO},
1486
1487 {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1488 BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1489 -1),
1490 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1491 -1),
1492 BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1493 -1),
1494 BMZERO,
1495 BMZERO,
1496 BMZERO},
1497
1498 {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
1499 BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700,
1500 F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
1501 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1502 -1, -1, -1, -1),
1503 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1504 -1, -1, -1, -1, -1, -1),
1505 BMZERO,
1506 BMZERO,
1507 BMZERO},
1508
1509 {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1510 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1511 -1, -1, -1, -1, -1, -1),
1512 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1513 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1514 BMZERO,
1515 BMZERO,
1516 BMZERO},
1517
1518 {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB,
1519 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1520 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1521 F2_5260_5320, F4_5500_5700, -1, -1),
1522 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1523 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1524 BMZERO,
1525 BMZERO,
1526 BMZERO},
1527
1528
1529 {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1530 BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1531 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1532 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1533 BMZERO,
1534 BMZERO,
1535 BMZERO},
1536
1537
1538 {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1539 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1540 -1),
1541 BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1542 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1543 BMZERO,
1544 BMZERO,
1545 BMZERO},
1546
1547
1548 {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1549 BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1,
1550 -1, -1, -1, -1),
1551 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1552 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1553 -1),
1554 BMZERO,
1555 BMZERO,
1556 BMZERO},
1557
1558
1559 {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1560 BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1561 -1),
1562 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1563 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1564 BMZERO,
1565 BMZERO,
1566 BMZERO},
1567
1568
1569 {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1570 DISALLOW_ADHOC_11A_TURB,
1571 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1,
1572 -1, -1, -1, -1),
1573 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1574 BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1575 BMZERO,
1576 BMZERO,
1577 BMZERO},
1578
1579
1580 {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1581 DISALLOW_ADHOC_11A_TURB,
1582 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1583 -1, -1, -1, -1, -1, -1),
1584 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1585 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1586 -1),
1587 BMZERO,
1588 BMZERO,
1589 BMZERO},
1590
1591
1592 {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3,
1593 DISALLOW_ADHOC_11A_TURB,
1594 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1595 F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1),
1596 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1597 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1598 BMZERO,
1599 BMZERO,
1600 BMZERO},
1601
1602
1603 {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3,
1604 DISALLOW_ADHOC_11A_TURB,
1605 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1606 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1,
1607 -1, -1),
1608 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1609 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1610 BMZERO,
1611 BMZERO,
1612 BMZERO},
1613
1614
1615 {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1616 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1617 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320,
1618 F4_5500_5700, -1, -1, -1),
1619 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1620 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1621 BMZERO,
1622 BMZERO,
1623 BMZERO},
1624
1625
1626 {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1627 DISALLOW_ADHOC_11A_TURB,
1628 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1629 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1630 F2_5260_5320, F4_5500_5700, -1, -1),
1631 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1632 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1633 BMZERO,
1634 BMZERO,
1635 BMZERO},
1636
1637
1638 {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1639 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1640 BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1641 -1, -1, -1, -1, -1, -1),
1642 BMZERO,
1643 BMZERO,
1644 BMZERO,
1645 BMZERO,
1646 BMZERO},
1647
1648
1649 {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1650 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1651 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1,
1652 -1, -1),
1653 BMZERO,
1654 BMZERO,
1655 BMZERO,
1656 BMZERO,
1657 BMZERO},
1658
1659
1660 {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1661 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1662 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240,
1663 F2_5260_5320, -1, -1, -1),
1664 BMZERO,
1665 BMZERO,
1666 BMZERO,
1667 BMZERO,
1668 BMZERO},
1669
1670
1671 {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1672 BMZERO,
1673 BMZERO,
1674 BMZERO,
1675 BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1676 -1),
1677 BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1678 -1),
1679 BMZERO},
1680
1681 {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
1682 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1683 BMZERO,
1684 BMZERO,
1685 BMZERO,
1686 BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1687 BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1688 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1689
1690 {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
1691 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1692 BMZERO,
1693 BMZERO,
1694 BMZERO,
1695 BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1696 BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1697 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1698
1699 {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
1700 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1701 BMZERO,
1702 BMZERO,
1703 BMZERO,
1704 BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1705 BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1706 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1707
1708 {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1709 BMZERO,
1710 BMZERO,
1711 BMZERO,
1712 BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1713 BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1714 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1715
1716 {MKKA, MKK, NO_DFS,
1717 PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G |
1718 PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
1719 BMZERO,
1720 BMZERO,
1721 BMZERO,
1722 BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1,
1723 -1, -1, -1, -1),
1724 BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1725 -1),
1726 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1727
1728 {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
1729 BMZERO,
1730 BMZERO,
1731 BMZERO,
1732 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1733 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1734 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1735
1736 {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
1737 BMZERO,
1738 BMZERO,
1739 BMZERO,
1740 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1741 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1742 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1743
1744 {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1745 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1746 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1747 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1748 -1, -1, -1, -1, -1),
1749 BMZERO,
1750 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1751 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1752 -1, -1),
1753 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1754 -1, -1),
1755 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1756
1757 {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1758 ADHOC_PER_11D,
1759 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1760 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1761 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1762 -1, -1, -1, -1, -1),
1763 BMZERO,
1764 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1765 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1766 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1767 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1768
1769 {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1770 ADHOC_PER_11D,
1771 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1772 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1773 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1774 -1, -1, -1, -1, -1),
1775 BMZERO,
1776 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1777 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1778 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1779 -1, -1),
1780 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1781
1782 {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1783 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1784 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1785 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1786 -1, -1, -1, -1, -1),
1787 BMZERO,
1788 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
1789 W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
1790 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1791 -1, -1),
1792 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1793
1794 {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1795 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1796 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1797 BMZERO,
1798 BMZERO,
1799 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1800 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1801 -1, -1),
1802 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1803 -1, -1),
1804 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1805
1806 {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1807 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1808 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1809 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1810 -1, -1, -1, -1, -1),
1811 BMZERO,
1812 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1813 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1814 -1, -1),
1815 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1816 -1, -1),
1817 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1818
1819 {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1820 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1,
1821 -1, -1, -1, -1, -1, -1),
1822 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1823 -1, -1, -1, -1, -1),
1824 BMZERO,
1825 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1826 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1827 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1828 -1, -1),
1829 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1830
1831 {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1832 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1833 -1, -1, -1, -1),
1834 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1835 -1, -1, -1, -1, -1),
1836 BMZERO,
1837 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1838 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1839 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1840 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1841
1842 {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1843 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1844 -1, -1, -1, -1),
1845 BMZERO,
1846 BMZERO,
1847 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1848 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1849 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1850 -1, -1),
1851 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1852
1853 {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1854 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1855 -1, -1, -1, -1, -1, -1),
1856 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1857 -1, -1, -1, -1, -1),
1858 BMZERO,
1859 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1860 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1861 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1862 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1863
1864 {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1865 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1866 -1, -1, -1, -1, -1, -1),
1867 BMZERO,
1868 BMZERO,
1869 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1870 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1871 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1872 -1, -1),
1873 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1874
1875 {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1876 BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1,
1877 -1, -1, -1, -1),
1878 BMZERO,
1879 BMZERO,
1880 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1881 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1882 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1883 -1, -1),
1884 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1885
1886 {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1887 BMZERO,
1888 BMZERO,
1889 BMZERO,
1890 BMZERO,
1891 BMZERO,
1892 BMZERO}
1893};
1894
1895static const struct cmode modes[] = {
1896 {ATH9K_MODE_11A, CHANNEL_A},
1897 {ATH9K_MODE_11B, CHANNEL_B},
1898 {ATH9K_MODE_11G, CHANNEL_G},
1899 {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20},
1900 {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS},
1901 {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS},
1902 {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20},
1903 {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS},
1904 {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS},
1905};
1906
1907static struct japan_bandcheck j_bandcheck[] = {
1908 {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD},
1909 {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
1910 {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2},
1911 {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND}
1912};
1913
1914
1915#endif 473#endif
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index c92f0c6e4ad..3f70b1e58ae 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "core.h" 17#include "ath9k.h"
18 18
19#define BITS_PER_BYTE 8 19#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22 20#define OFDM_PLCP_BITS 22
@@ -55,94 +55,19 @@ static u32 bits_per_symbol[][2] = {
55 55
56#define IS_HT_RATE(_rate) ((_rate) & 0x80) 56#define IS_HT_RATE(_rate) ((_rate) & 0x80)
57 57
58/* 58static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
59 * Insert a chain of ath_buf (descriptors) on a txq and 59 struct ath_atx_tid *tid,
60 * assume the descriptors are already chained together by caller. 60 struct list_head *bf_head);
61 * NB: must be called with txq lock held 61static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
62 */ 62 struct list_head *bf_q,
63 63 int txok, int sendbar);
64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head) 65 struct list_head *head);
66{ 66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
67 struct ath_hal *ah = sc->sc_ah;
68 struct ath_buf *bf;
69
70 /*
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
73 */
74
75 if (list_empty(head))
76 return;
77
78 bf = list_first_entry(head, struct ath_buf, list);
79
80 list_splice_tail_init(head, &txq->axq_q);
81 txq->axq_depth++;
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
84
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
87
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
93 } else {
94 *txq->axq_link = bf->bf_daddr;
95 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
96 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
98 }
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
101}
102
103static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
105{
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
109 int hdrlen, padsize;
110
111 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
112 67
113 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK || 68/*********************/
114 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) { 69/* Aggregation logic */
115 kfree(tx_info_priv); 70/*********************/
116 tx_info->rate_driver_data[0] = NULL;
117 }
118
119 if (tx_status->flags & ATH_TX_BAR) {
120 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
121 tx_status->flags &= ~ATH_TX_BAR;
122 }
123
124 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
125 /* Frame was ACKed */
126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
127 }
128
129 tx_info->status.rates[0].count = tx_status->retries + 1;
130
131 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
132 padsize = hdrlen & 3;
133 if (padsize && hdrlen >= 24) {
134 /*
135 * Remove MAC header padding before giving the frame back to
136 * mac80211.
137 */
138 memmove(skb->data + padsize, skb->data, hdrlen);
139 skb_pull(skb, padsize);
140 }
141
142 ieee80211_tx_status(hw, skb);
143}
144
145/* Check if it's okay to send out aggregates */
146 71
147static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) 72static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
148{ 73{
@@ -156,232 +81,19 @@ static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
156 return 0; 81 return 0;
157} 82}
158 83
159static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
160 struct ath_beacon_config *conf)
161{
162 struct ieee80211_hw *hw = sc->hw;
163
164 /* fill in beacon config data */
165
166 conf->beacon_interval = hw->conf.beacon_int;
167 conf->listen_interval = 100;
168 conf->dtim_count = 1;
169 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
170}
171
172/* Calculate Atheros packet type from IEEE80211 packet header */
173
174static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
175{
176 struct ieee80211_hdr *hdr;
177 enum ath9k_pkt_type htype;
178 __le16 fc;
179
180 hdr = (struct ieee80211_hdr *)skb->data;
181 fc = hdr->frame_control;
182
183 if (ieee80211_is_beacon(fc))
184 htype = ATH9K_PKT_TYPE_BEACON;
185 else if (ieee80211_is_probe_resp(fc))
186 htype = ATH9K_PKT_TYPE_PROBE_RESP;
187 else if (ieee80211_is_atim(fc))
188 htype = ATH9K_PKT_TYPE_ATIM;
189 else if (ieee80211_is_pspoll(fc))
190 htype = ATH9K_PKT_TYPE_PSPOLL;
191 else
192 htype = ATH9K_PKT_TYPE_NORMAL;
193
194 return htype;
195}
196
197static bool is_pae(struct sk_buff *skb)
198{
199 struct ieee80211_hdr *hdr;
200 __le16 fc;
201
202 hdr = (struct ieee80211_hdr *)skb->data;
203 fc = hdr->frame_control;
204
205 if (ieee80211_is_data(fc)) {
206 if (ieee80211_is_nullfunc(fc) ||
207 /* Port Access Entity (IEEE 802.1X) */
208 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
209 return true;
210 }
211 }
212
213 return false;
214}
215
216static int get_hw_crypto_keytype(struct sk_buff *skb)
217{
218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
219
220 if (tx_info->control.hw_key) {
221 if (tx_info->control.hw_key->alg == ALG_WEP)
222 return ATH9K_KEY_TYPE_WEP;
223 else if (tx_info->control.hw_key->alg == ALG_TKIP)
224 return ATH9K_KEY_TYPE_TKIP;
225 else if (tx_info->control.hw_key->alg == ALG_CCMP)
226 return ATH9K_KEY_TYPE_AES;
227 }
228
229 return ATH9K_KEY_TYPE_CLEAR;
230}
231
232/* Called only when tx aggregation is enabled and HT is supported */
233
234static void assign_aggr_tid_seqno(struct sk_buff *skb,
235 struct ath_buf *bf)
236{
237 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
238 struct ieee80211_hdr *hdr;
239 struct ath_node *an;
240 struct ath_atx_tid *tid;
241 __le16 fc;
242 u8 *qc;
243
244 if (!tx_info->control.sta)
245 return;
246
247 an = (struct ath_node *)tx_info->control.sta->drv_priv;
248 hdr = (struct ieee80211_hdr *)skb->data;
249 fc = hdr->frame_control;
250
251 /* Get tidno */
252
253 if (ieee80211_is_data_qos(fc)) {
254 qc = ieee80211_get_qos_ctl(hdr);
255 bf->bf_tidno = qc[0] & 0xf;
256 }
257
258 /* Get seqno */
259 /* For HT capable stations, we save tidno for later use.
260 * We also override seqno set by upper layer with the one
261 * in tx aggregation state.
262 *
263 * If fragmentation is on, the sequence number is
264 * not overridden, since it has been
265 * incremented by the fragmentation routine.
266 *
267 * FIXME: check if the fragmentation threshold exceeds
268 * IEEE80211 max.
269 */
270 tid = ATH_AN_2_TID(an, bf->bf_tidno);
271 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
272 IEEE80211_SEQ_SEQ_SHIFT);
273 bf->bf_seqno = tid->seq_next;
274 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
275}
276
277static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
278 struct ath_txq *txq)
279{
280 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
281 int flags = 0;
282
283 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
284 flags |= ATH9K_TXDESC_INTREQ;
285
286 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
287 flags |= ATH9K_TXDESC_NOACK;
288 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
289 flags |= ATH9K_TXDESC_RTSENA;
290
291 return flags;
292}
293
294static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
295{
296 struct ath_buf *bf = NULL;
297
298 spin_lock_bh(&sc->tx.txbuflock);
299
300 if (unlikely(list_empty(&sc->tx.txbuf))) {
301 spin_unlock_bh(&sc->tx.txbuflock);
302 return NULL;
303 }
304
305 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
306 list_del(&bf->list);
307
308 spin_unlock_bh(&sc->tx.txbuflock);
309
310 return bf;
311}
312
313/* To complete a chain of buffers associated a frame */
314
315static void ath_tx_complete_buf(struct ath_softc *sc,
316 struct ath_buf *bf,
317 struct list_head *bf_q,
318 int txok, int sendbar)
319{
320 struct sk_buff *skb = bf->bf_mpdu;
321 struct ath_xmit_status tx_status;
322 unsigned long flags;
323
324 /*
325 * Set retry information.
326 * NB: Don't use the information in the descriptor, because the frame
327 * could be software retried.
328 */
329 tx_status.retries = bf->bf_retries;
330 tx_status.flags = 0;
331
332 if (sendbar)
333 tx_status.flags = ATH_TX_BAR;
334
335 if (!txok) {
336 tx_status.flags |= ATH_TX_ERROR;
337
338 if (bf_isxretried(bf))
339 tx_status.flags |= ATH_TX_XRETRY;
340 }
341
342 /* Unmap this frame */
343 pci_unmap_single(sc->pdev,
344 bf->bf_dmacontext,
345 skb->len,
346 PCI_DMA_TODEVICE);
347 /* complete this frame */
348 ath_tx_complete(sc, skb, &tx_status);
349
350 /*
351 * Return the list of ath_buf of this mpdu to free queue
352 */
353 spin_lock_irqsave(&sc->tx.txbuflock, flags);
354 list_splice_tail_init(bf_q, &sc->tx.txbuf);
355 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
356}
357
358/*
359 * queue up a dest/ac pair for tx scheduling
360 * NB: must be called with txq lock held
361 */
362
363static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 84static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
364{ 85{
365 struct ath_atx_ac *ac = tid->ac; 86 struct ath_atx_ac *ac = tid->ac;
366 87
367 /*
368 * if tid is paused, hold off
369 */
370 if (tid->paused) 88 if (tid->paused)
371 return; 89 return;
372 90
373 /*
374 * add tid to ac atmost once
375 */
376 if (tid->sched) 91 if (tid->sched)
377 return; 92 return;
378 93
379 tid->sched = true; 94 tid->sched = true;
380 list_add_tail(&tid->list, &ac->tid_q); 95 list_add_tail(&tid->list, &ac->tid_q);
381 96
382 /*
383 * add node ac to txq atmost once
384 */
385 if (ac->sched) 97 if (ac->sched)
386 return; 98 return;
387 99
@@ -389,22 +101,16 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
389 list_add_tail(&ac->list, &txq->axq_acq); 101 list_add_tail(&ac->list, &txq->axq_acq);
390} 102}
391 103
392/* pause a tid */
393
394static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 104static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
395{ 105{
396 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 106 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
397 107
398 spin_lock_bh(&txq->axq_lock); 108 spin_lock_bh(&txq->axq_lock);
399
400 tid->paused++; 109 tid->paused++;
401
402 spin_unlock_bh(&txq->axq_lock); 110 spin_unlock_bh(&txq->axq_lock);
403} 111}
404 112
405/* resume a tid and schedule aggregate */ 113static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
406
407void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
408{ 114{
409 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 115 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
410 116
@@ -419,63 +125,39 @@ void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
419 if (list_empty(&tid->buf_q)) 125 if (list_empty(&tid->buf_q))
420 goto unlock; 126 goto unlock;
421 127
422 /*
423 * Add this TID to scheduler and try to send out aggregates
424 */
425 ath_tx_queue_tid(txq, tid); 128 ath_tx_queue_tid(txq, tid);
426 ath_txq_schedule(sc, txq); 129 ath_txq_schedule(sc, txq);
427unlock: 130unlock:
428 spin_unlock_bh(&txq->axq_lock); 131 spin_unlock_bh(&txq->axq_lock);
429} 132}
430 133
431/* Compute the number of bad frames */ 134static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
432
433static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
434 int txok)
435{ 135{
436 struct ath_buf *bf_last = bf->bf_lastbf; 136 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
437 struct ath_desc *ds = bf_last->bf_desc; 137 struct ath_buf *bf;
438 u16 seq_st = 0; 138 struct list_head bf_head;
439 u32 ba[WME_BA_BMP_SIZE >> 5]; 139 INIT_LIST_HEAD(&bf_head);
440 int ba_index;
441 int nbad = 0;
442 int isaggr = 0;
443
444 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
445 return 0;
446 140
447 isaggr = bf_isaggr(bf); 141 ASSERT(tid->paused > 0);
448 if (isaggr) { 142 spin_lock_bh(&txq->axq_lock);
449 seq_st = ATH_DS_BA_SEQ(ds);
450 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
451 }
452 143
453 while (bf) { 144 tid->paused--;
454 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
455 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
456 nbad++;
457 145
458 bf = bf->bf_next; 146 if (tid->paused > 0) {
147 spin_unlock_bh(&txq->axq_lock);
148 return;
459 } 149 }
460 150
461 return nbad; 151 while (!list_empty(&tid->buf_q)) {
462} 152 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
463 153 ASSERT(!bf_isretried(bf));
464static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 154 list_move_tail(&bf->list, &bf_head);
465{ 155 ath_tx_send_normal(sc, txq, tid, &bf_head);
466 struct sk_buff *skb; 156 }
467 struct ieee80211_hdr *hdr;
468
469 bf->bf_state.bf_type |= BUF_RETRY;
470 bf->bf_retries++;
471 157
472 skb = bf->bf_mpdu; 158 spin_unlock_bh(&txq->axq_lock);
473 hdr = (struct ieee80211_hdr *)skb->data;
474 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
475} 159}
476 160
477/* Update block ack window */
478
479static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 161static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
480 int seqno) 162 int seqno)
481{ 163{
@@ -492,290 +174,142 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
492 } 174 }
493} 175}
494 176
495/* 177static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
496 * ath_pkt_dur - compute packet duration (NB: not NAV) 178 struct ath_buf *bf)
497 *
498 * rix - rate index
499 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
500 * width - 0 for 20 MHz, 1 for 40 MHz
501 * half_gi - to use 4us v/s 3.6 us for symbol time
502 */
503static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
504 int width, int half_gi, bool shortPreamble)
505{ 179{
506 struct ath_rate_table *rate_table = sc->cur_rate_table; 180 int index, cindex;
507 u32 nbits, nsymbits, duration, nsymbols;
508 u8 rc;
509 int streams, pktlen;
510
511 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
512 rc = rate_table->info[rix].ratecode;
513
514 /* for legacy rates, use old function to compute packet duration */
515 if (!IS_HT_RATE(rc))
516 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
517 rix, shortPreamble);
518 181
519 /* find number of symbols: PLCP + data */ 182 if (bf_isretried(bf))
520 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 183 return;
521 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
522 nsymbols = (nbits + nsymbits - 1) / nsymbits;
523 184
524 if (!half_gi) 185 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
525 duration = SYMBOL_TIME(nsymbols); 186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
526 else
527 duration = SYMBOL_TIME_HALFGI(nsymbols);
528 187
529 /* addup duration for legacy/ht training and signal fields */ 188 ASSERT(tid->tx_buf[cindex] == NULL);
530 streams = HT_RC_2_STREAMS(rc); 189 tid->tx_buf[cindex] = bf;
531 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
532 190
533 return duration; 191 if (index >= ((tid->baw_tail - tid->baw_head) &
192 (ATH_TID_MAX_BUFS - 1))) {
193 tid->baw_tail = cindex;
194 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
195 }
534} 196}
535 197
536/* Rate module function to set rate related fields in tx descriptor */ 198/*
199 * TODO: For frame(s) that are in the retry state, we will reuse the
200 * sequence number(s) without setting the retry bit. The
201 * alternative is to give up on these and BAR the receiver's window
202 * forward.
203 */
204static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
205 struct ath_atx_tid *tid)
537 206
538static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
539{ 207{
540 struct ath_hal *ah = sc->sc_ah; 208 struct ath_buf *bf;
541 struct ath_rate_table *rt; 209 struct list_head bf_head;
542 struct ath_desc *ds = bf->bf_desc; 210 INIT_LIST_HEAD(&bf_head);
543 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
544 struct ath9k_11n_rate_series series[4];
545 struct sk_buff *skb;
546 struct ieee80211_tx_info *tx_info;
547 struct ieee80211_tx_rate *rates;
548 struct ieee80211_hdr *hdr;
549 int i, flags, rtsctsena = 0;
550 u32 ctsduration = 0;
551 u8 rix = 0, cix, ctsrate = 0;
552 __le16 fc;
553
554 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
555
556 skb = (struct sk_buff *)bf->bf_mpdu;
557 hdr = (struct ieee80211_hdr *)skb->data;
558 fc = hdr->frame_control;
559 tx_info = IEEE80211_SKB_CB(skb);
560 rates = tx_info->control.rates;
561
562 if (ieee80211_has_morefrags(fc) ||
563 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
564 rates[1].count = rates[2].count = rates[3].count = 0;
565 rates[1].idx = rates[2].idx = rates[3].idx = 0;
566 rates[0].count = ATH_TXMAXTRY;
567 }
568 211
569 /* get the cix for the lowest valid rix */ 212 for (;;) {
570 rt = sc->cur_rate_table; 213 if (list_empty(&tid->buf_q))
571 for (i = 3; i >= 0; i--) {
572 if (rates[i].count && (rates[i].idx >= 0)) {
573 rix = rates[i].idx;
574 break; 214 break;
575 }
576 }
577
578 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
579 cix = rt->info[rix].ctrl_rate;
580
581 /*
582 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
583 * just CTS. Note that this is only done for OFDM/HT unicast frames.
584 */
585 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
586 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
587 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
588 if (sc->sc_protmode == PROT_M_RTSCTS)
589 flags = ATH9K_TXDESC_RTSENA;
590 else if (sc->sc_protmode == PROT_M_CTSONLY)
591 flags = ATH9K_TXDESC_CTSENA;
592
593 cix = rt->info[sc->sc_protrix].ctrl_rate;
594 rtsctsena = 1;
595 }
596
597 /* For 11n, the default behavior is to enable RTS for hw retried frames.
598 * We enable the global flag here and let rate series flags determine
599 * which rates will actually use RTS.
600 */
601 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
602 /* 802.11g protection not needed, use our default behavior */
603 if (!rtsctsena)
604 flags = ATH9K_TXDESC_RTSENA;
605 }
606
607 /* Set protection if aggregate protection on */
608 if (sc->sc_config.ath_aggr_prot &&
609 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
610 flags = ATH9K_TXDESC_RTSENA;
611 cix = rt->info[sc->sc_protrix].ctrl_rate;
612 rtsctsena = 1;
613 }
614
615 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
616 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
617 flags &= ~(ATH9K_TXDESC_RTSENA);
618
619 /*
620 * CTS transmit rate is derived from the transmit rate by looking in the
621 * h/w rate table. We must also factor in whether or not a short
622 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
623 */
624 ctsrate = rt->info[cix].ratecode |
625 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
626
627 for (i = 0; i < 4; i++) {
628 if (!rates[i].count || (rates[i].idx < 0))
629 continue;
630 215
631 rix = rates[i].idx; 216 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
632 217 list_move_tail(&bf->list, &bf_head);
633 series[i].Rate = rt->info[rix].ratecode |
634 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
635
636 series[i].Tries = rates[i].count;
637
638 series[i].RateFlags = (
639 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
640 ATH9K_RATESERIES_RTS_CTS : 0) |
641 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
642 ATH9K_RATESERIES_2040 : 0) |
643 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
644 ATH9K_RATESERIES_HALFGI : 0);
645
646 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
647 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
648 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
649 bf_isshpreamble(bf));
650 218
651 series[i].ChSel = sc->sc_tx_chainmask; 219 if (bf_isretried(bf))
220 ath_tx_update_baw(sc, tid, bf->bf_seqno);
652 221
653 if (rtsctsena) 222 spin_unlock(&txq->axq_lock);
654 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 223 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
224 spin_lock(&txq->axq_lock);
655 } 225 }
656 226
657 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 227 tid->seq_next = tid->seq_start;
658 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf), 228 tid->baw_tail = tid->baw_head;
659 ctsrate, ctsduration,
660 series, 4, flags);
661
662 if (sc->sc_config.ath_aggr_prot && flags)
663 ath9k_hw_set11n_burstduration(ah, ds, 8192);
664} 229}
665 230
666/* 231static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
667 * Function to send a normal HT (non-AMPDU) frame
668 * NB: must be called with txq lock held
669 */
670static int ath_tx_send_normal(struct ath_softc *sc,
671 struct ath_txq *txq,
672 struct ath_atx_tid *tid,
673 struct list_head *bf_head)
674{ 232{
675 struct ath_buf *bf; 233 struct sk_buff *skb;
676 234 struct ieee80211_hdr *hdr;
677 BUG_ON(list_empty(bf_head));
678
679 bf = list_first_entry(bf_head, struct ath_buf, list);
680 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
681
682 /* update starting sequence number for subsequent ADDBA request */
683 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
684 235
685 /* Queue to h/w without aggregation */ 236 bf->bf_state.bf_type |= BUF_RETRY;
686 bf->bf_nframes = 1; 237 bf->bf_retries++;
687 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
688 ath_buf_set_rate(sc, bf);
689 ath_tx_txqaddbuf(sc, txq, bf_head);
690 238
691 return 0; 239 skb = bf->bf_mpdu;
240 hdr = (struct ieee80211_hdr *)skb->data;
241 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
692} 242}
693 243
694/* flush tid's software queue and send frames as non-ampdu's */ 244static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
695
696static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
697{ 245{
698 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 246 struct ath_buf *tbf;
699 struct ath_buf *bf;
700 struct list_head bf_head;
701 INIT_LIST_HEAD(&bf_head);
702
703 ASSERT(tid->paused > 0);
704 spin_lock_bh(&txq->axq_lock);
705 247
706 tid->paused--; 248 spin_lock_bh(&sc->tx.txbuflock);
249 ASSERT(!list_empty((&sc->tx.txbuf)));
250 tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
251 list_del(&tbf->list);
252 spin_unlock_bh(&sc->tx.txbuflock);
707 253
708 if (tid->paused > 0) { 254 ATH_TXBUF_RESET(tbf);
709 spin_unlock_bh(&txq->axq_lock);
710 return;
711 }
712 255
713 while (!list_empty(&tid->buf_q)) { 256 tbf->bf_mpdu = bf->bf_mpdu;
714 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 257 tbf->bf_buf_addr = bf->bf_buf_addr;
715 ASSERT(!bf_isretried(bf)); 258 *(tbf->bf_desc) = *(bf->bf_desc);
716 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 259 tbf->bf_state = bf->bf_state;
717 ath_tx_send_normal(sc, txq, tid, &bf_head); 260 tbf->bf_dmacontext = bf->bf_dmacontext;
718 }
719 261
720 spin_unlock_bh(&txq->axq_lock); 262 return tbf;
721} 263}
722 264
723/* Completion routine of an aggregate */ 265static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
724 266 struct ath_buf *bf, struct list_head *bf_q,
725static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, 267 int txok)
726 struct ath_txq *txq,
727 struct ath_buf *bf,
728 struct list_head *bf_q,
729 int txok)
730{ 268{
731 struct ath_node *an = NULL; 269 struct ath_node *an = NULL;
732 struct sk_buff *skb; 270 struct sk_buff *skb;
733 struct ieee80211_tx_info *tx_info; 271 struct ieee80211_sta *sta;
272 struct ieee80211_hdr *hdr;
734 struct ath_atx_tid *tid = NULL; 273 struct ath_atx_tid *tid = NULL;
735 struct ath_buf *bf_last = bf->bf_lastbf; 274 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
736 struct ath_desc *ds = bf_last->bf_desc; 275 struct ath_desc *ds = bf_last->bf_desc;
737 struct ath_buf *bf_next, *bf_lastq = NULL;
738 struct list_head bf_head, bf_pending; 276 struct list_head bf_head, bf_pending;
739 u16 seq_st = 0; 277 u16 seq_st = 0;
740 u32 ba[WME_BA_BMP_SIZE >> 5]; 278 u32 ba[WME_BA_BMP_SIZE >> 5];
741 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 279 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
742 280
743 skb = (struct sk_buff *)bf->bf_mpdu; 281 skb = (struct sk_buff *)bf->bf_mpdu;
744 tx_info = IEEE80211_SKB_CB(skb); 282 hdr = (struct ieee80211_hdr *)skb->data;
745 283
746 if (tx_info->control.sta) { 284 rcu_read_lock();
747 an = (struct ath_node *)tx_info->control.sta->drv_priv; 285
748 tid = ATH_AN_2_TID(an, bf->bf_tidno); 286 sta = ieee80211_find_sta(sc->hw, hdr->addr1);
287 if (!sta) {
288 rcu_read_unlock();
289 return;
749 } 290 }
750 291
292 an = (struct ath_node *)sta->drv_priv;
293 tid = ATH_AN_2_TID(an, bf->bf_tidno);
294
751 isaggr = bf_isaggr(bf); 295 isaggr = bf_isaggr(bf);
752 if (isaggr) { 296 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
753 if (txok) {
754 if (ATH_DS_TX_BA(ds)) {
755 /*
756 * extract starting sequence and
757 * block-ack bitmap
758 */
759 seq_st = ATH_DS_BA_SEQ(ds);
760 memcpy(ba,
761 ATH_DS_BA_BITMAP(ds),
762 WME_BA_BMP_SIZE >> 3);
763 } else {
764 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
765 297
766 /* 298 if (isaggr && txok) {
767 * AR5416 can become deaf/mute when BA 299 if (ATH_DS_TX_BA(ds)) {
768 * issue happens. Chip needs to be reset. 300 seq_st = ATH_DS_BA_SEQ(ds);
769 * But AP code may have sychronization issues 301 memcpy(ba, ATH_DS_BA_BITMAP(ds),
770 * when perform internal reset in this routine. 302 WME_BA_BMP_SIZE >> 3);
771 * Only enable reset in STA mode for now.
772 */
773 if (sc->sc_ah->ah_opmode ==
774 NL80211_IFTYPE_STATION)
775 needreset = 1;
776 }
777 } else { 303 } else {
778 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 304 /*
305 * AR5416 can become deaf/mute when BA
306 * issue happens. Chip needs to be reset.
307 * But AP code may have sychronization issues
308 * when perform internal reset in this routine.
309 * Only enable reset in STA mode for now.
310 */
311 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
312 needreset = 1;
779 } 313 }
780 } 314 }
781 315
@@ -792,7 +326,6 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
792 } else if (!isaggr && txok) { 326 } else if (!isaggr && txok) {
793 /* transmit completion */ 327 /* transmit completion */
794 } else { 328 } else {
795
796 if (!(tid->state & AGGR_CLEANUP) && 329 if (!(tid->state & AGGR_CLEANUP) &&
797 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 330 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
798 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 331 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
@@ -811,37 +344,12 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
811 txfail = 1; 344 txfail = 1;
812 } 345 }
813 } 346 }
814 /*
815 * Remove ath_buf's of this sub-frame from aggregate queue.
816 */
817 if (bf_next == NULL) { /* last subframe in the aggregate */
818 ASSERT(bf->bf_lastfrm == bf_last);
819 347
820 /* 348 if (bf_next == NULL) {
821 * The last descriptor of the last sub frame could be 349 INIT_LIST_HEAD(&bf_head);
822 * a holding descriptor for h/w. If that's the case,
823 * bf->bf_lastfrm won't be in the bf_q.
824 * Make sure we handle bf_q properly here.
825 */
826
827 if (!list_empty(bf_q)) {
828 bf_lastq = list_entry(bf_q->prev,
829 struct ath_buf, list);
830 list_cut_position(&bf_head,
831 bf_q, &bf_lastq->list);
832 } else {
833 /*
834 * XXX: if the last subframe only has one
835 * descriptor which is also being used as
836 * a holding descriptor. Then the ath_buf
837 * is not in the bf_q at all.
838 */
839 INIT_LIST_HEAD(&bf_head);
840 }
841 } else { 350 } else {
842 ASSERT(!list_empty(bf_q)); 351 ASSERT(!list_empty(bf_q));
843 list_cut_position(&bf_head, 352 list_move_tail(&bf->list, &bf_head);
844 bf_q, &bf->bf_lastfrm->list);
845 } 353 }
846 354
847 if (!txpending) { 355 if (!txpending) {
@@ -853,62 +361,22 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
853 ath_tx_update_baw(sc, tid, bf->bf_seqno); 361 ath_tx_update_baw(sc, tid, bf->bf_seqno);
854 spin_unlock_bh(&txq->axq_lock); 362 spin_unlock_bh(&txq->axq_lock);
855 363
856 /* complete this sub-frame */
857 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); 364 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
858 } else { 365 } else {
859 /* 366 /* retry the un-acked ones */
860 * retry the un-acked ones
861 */
862 /*
863 * XXX: if the last descriptor is holding descriptor,
864 * in order to requeue the frame to software queue, we
865 * need to allocate a new descriptor and
866 * copy the content of holding descriptor to it.
867 */
868 if (bf->bf_next == NULL && 367 if (bf->bf_next == NULL &&
869 bf_last->bf_status & ATH_BUFSTATUS_STALE) { 368 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
870 struct ath_buf *tbf; 369 struct ath_buf *tbf;
871 370
872 /* allocate new descriptor */ 371 tbf = ath_clone_txbuf(sc, bf_last);
873 spin_lock_bh(&sc->tx.txbuflock); 372 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
874 ASSERT(!list_empty((&sc->tx.txbuf)));
875 tbf = list_first_entry(&sc->tx.txbuf,
876 struct ath_buf, list);
877 list_del(&tbf->list);
878 spin_unlock_bh(&sc->tx.txbuflock);
879
880 ATH_TXBUF_RESET(tbf);
881
882 /* copy descriptor content */
883 tbf->bf_mpdu = bf_last->bf_mpdu;
884 tbf->bf_buf_addr = bf_last->bf_buf_addr;
885 *(tbf->bf_desc) = *(bf_last->bf_desc);
886
887 /* link it to the frame */
888 if (bf_lastq) {
889 bf_lastq->bf_desc->ds_link =
890 tbf->bf_daddr;
891 bf->bf_lastfrm = tbf;
892 ath9k_hw_cleartxdesc(sc->sc_ah,
893 bf->bf_lastfrm->bf_desc);
894 } else {
895 tbf->bf_state = bf_last->bf_state;
896 tbf->bf_lastfrm = tbf;
897 ath9k_hw_cleartxdesc(sc->sc_ah,
898 tbf->bf_lastfrm->bf_desc);
899
900 /* copy the DMA context */
901 tbf->bf_dmacontext =
902 bf_last->bf_dmacontext;
903 }
904 list_add_tail(&tbf->list, &bf_head); 373 list_add_tail(&tbf->list, &bf_head);
905 } else { 374 } else {
906 /* 375 /*
907 * Clear descriptor status words for 376 * Clear descriptor status words for
908 * software retry 377 * software retry
909 */ 378 */
910 ath9k_hw_cleartxdesc(sc->sc_ah, 379 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
911 bf->bf_lastfrm->bf_desc);
912 } 380 }
913 381
914 /* 382 /*
@@ -922,332 +390,33 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
922 } 390 }
923 391
924 if (tid->state & AGGR_CLEANUP) { 392 if (tid->state & AGGR_CLEANUP) {
925 /* check to see if we're done with cleaning the h/w queue */
926 spin_lock_bh(&txq->axq_lock);
927
928 if (tid->baw_head == tid->baw_tail) { 393 if (tid->baw_head == tid->baw_tail) {
929 tid->state &= ~AGGR_ADDBA_COMPLETE; 394 tid->state &= ~AGGR_ADDBA_COMPLETE;
930 tid->addba_exchangeattempts = 0; 395 tid->addba_exchangeattempts = 0;
931 spin_unlock_bh(&txq->axq_lock);
932
933 tid->state &= ~AGGR_CLEANUP; 396 tid->state &= ~AGGR_CLEANUP;
934 397
935 /* send buffered frames as singles */ 398 /* send buffered frames as singles */
936 ath_tx_flush_tid(sc, tid); 399 ath_tx_flush_tid(sc, tid);
937 } else 400 }
938 spin_unlock_bh(&txq->axq_lock); 401 rcu_read_unlock();
939
940 return; 402 return;
941 } 403 }
942 404
943 /* 405 /* prepend un-acked frames to the beginning of the pending frame queue */
944 * prepend un-acked frames to the beginning of the pending frame queue
945 */
946 if (!list_empty(&bf_pending)) { 406 if (!list_empty(&bf_pending)) {
947 spin_lock_bh(&txq->axq_lock); 407 spin_lock_bh(&txq->axq_lock);
948 /* Note: we _prepend_, we _do_not_ at to
949 * the end of the queue ! */
950 list_splice(&bf_pending, &tid->buf_q); 408 list_splice(&bf_pending, &tid->buf_q);
951 ath_tx_queue_tid(txq, tid); 409 ath_tx_queue_tid(txq, tid);
952 spin_unlock_bh(&txq->axq_lock); 410 spin_unlock_bh(&txq->axq_lock);
953 } 411 }
954 412
413 rcu_read_unlock();
414
955 if (needreset) 415 if (needreset)
956 ath_reset(sc, false); 416 ath_reset(sc, false);
957
958 return;
959}
960
961static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
962{
963 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
964 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
965 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
966
967 tx_info_priv->update_rc = false;
968 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
969 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
970
971 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
972 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
973 if (bf_isdata(bf)) {
974 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
975 sizeof(tx_info_priv->tx));
976 tx_info_priv->n_frames = bf->bf_nframes;
977 tx_info_priv->n_bad_frames = nbad;
978 tx_info_priv->update_rc = true;
979 }
980 }
981}
982
983/* Process completed xmit descriptors from the specified queue */
984
985static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
986{
987 struct ath_hal *ah = sc->sc_ah;
988 struct ath_buf *bf, *lastbf, *bf_held = NULL;
989 struct list_head bf_head;
990 struct ath_desc *ds;
991 int txok, nbad = 0;
992 int status;
993
994 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
995 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
996 txq->axq_link);
997
998 for (;;) {
999 spin_lock_bh(&txq->axq_lock);
1000 if (list_empty(&txq->axq_q)) {
1001 txq->axq_link = NULL;
1002 txq->axq_linkbuf = NULL;
1003 spin_unlock_bh(&txq->axq_lock);
1004 break;
1005 }
1006 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1007
1008 /*
1009 * There is a race condition that a BH gets scheduled
1010 * after sw writes TxE and before hw re-load the last
1011 * descriptor to get the newly chained one.
1012 * Software must keep the last DONE descriptor as a
1013 * holding descriptor - software does so by marking
1014 * it with the STALE flag.
1015 */
1016 bf_held = NULL;
1017 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1018 bf_held = bf;
1019 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1020 /* FIXME:
1021 * The holding descriptor is the last
1022 * descriptor in queue. It's safe to remove
1023 * the last holding descriptor in BH context.
1024 */
1025 spin_unlock_bh(&txq->axq_lock);
1026 break;
1027 } else {
1028 /* Lets work with the next buffer now */
1029 bf = list_entry(bf_held->list.next,
1030 struct ath_buf, list);
1031 }
1032 }
1033
1034 lastbf = bf->bf_lastbf;
1035 ds = lastbf->bf_desc; /* NB: last decriptor */
1036
1037 status = ath9k_hw_txprocdesc(ah, ds);
1038 if (status == -EINPROGRESS) {
1039 spin_unlock_bh(&txq->axq_lock);
1040 break;
1041 }
1042 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1043 txq->axq_lastdsWithCTS = NULL;
1044 if (ds == txq->axq_gatingds)
1045 txq->axq_gatingds = NULL;
1046
1047 /*
1048 * Remove ath_buf's of the same transmit unit from txq,
1049 * however leave the last descriptor back as the holding
1050 * descriptor for hw.
1051 */
1052 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1053 INIT_LIST_HEAD(&bf_head);
1054
1055 if (!list_is_singular(&lastbf->list))
1056 list_cut_position(&bf_head,
1057 &txq->axq_q, lastbf->list.prev);
1058
1059 txq->axq_depth--;
1060
1061 if (bf_isaggr(bf))
1062 txq->axq_aggr_depth--;
1063
1064 txok = (ds->ds_txstat.ts_status == 0);
1065
1066 spin_unlock_bh(&txq->axq_lock);
1067
1068 if (bf_held) {
1069 list_del(&bf_held->list);
1070 spin_lock_bh(&sc->tx.txbuflock);
1071 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1072 spin_unlock_bh(&sc->tx.txbuflock);
1073 }
1074
1075 if (!bf_isampdu(bf)) {
1076 /*
1077 * This frame is sent out as a single frame.
1078 * Use hardware retry status for this frame.
1079 */
1080 bf->bf_retries = ds->ds_txstat.ts_longretry;
1081 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1082 bf->bf_state.bf_type |= BUF_XRETRY;
1083 nbad = 0;
1084 } else {
1085 nbad = ath_tx_num_badfrms(sc, bf, txok);
1086 }
1087
1088 ath_tx_rc_status(bf, ds, nbad);
1089
1090 /*
1091 * Complete this transmit unit
1092 */
1093 if (bf_isampdu(bf))
1094 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1095 else
1096 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1097
1098 /* Wake up mac80211 queue */
1099
1100 spin_lock_bh(&txq->axq_lock);
1101 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1102 (ATH_TXBUF - 20)) {
1103 int qnum;
1104 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1105 if (qnum != -1) {
1106 ieee80211_wake_queue(sc->hw, qnum);
1107 txq->stopped = 0;
1108 }
1109
1110 }
1111
1112 /*
1113 * schedule any pending packets if aggregation is enabled
1114 */
1115 if (sc->sc_flags & SC_OP_TXAGGR)
1116 ath_txq_schedule(sc, txq);
1117 spin_unlock_bh(&txq->axq_lock);
1118 }
1119}
1120
1121static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1122{
1123 struct ath_hal *ah = sc->sc_ah;
1124
1125 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1126 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1127 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1128 txq->axq_link);
1129}
1130
1131/* Drain only the data queues */
1132
1133static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1134{
1135 struct ath_hal *ah = sc->sc_ah;
1136 int i, status, npend = 0;
1137
1138 if (!(sc->sc_flags & SC_OP_INVALID)) {
1139 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1140 if (ATH_TXQ_SETUP(sc, i)) {
1141 ath_tx_stopdma(sc, &sc->tx.txq[i]);
1142 /* The TxDMA may not really be stopped.
1143 * Double check the hal tx pending count */
1144 npend += ath9k_hw_numtxpending(ah,
1145 sc->tx.txq[i].axq_qnum);
1146 }
1147 }
1148 }
1149
1150 if (npend) {
1151 /* TxDMA not stopped, reset the hal */
1152 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
1153
1154 spin_lock_bh(&sc->sc_resetlock);
1155 if (!ath9k_hw_reset(ah,
1156 sc->sc_ah->ah_curchan,
1157 sc->tx_chan_width,
1158 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1159 sc->sc_ht_extprotspacing, true, &status)) {
1160
1161 DPRINTF(sc, ATH_DBG_FATAL,
1162 "Unable to reset hardware; hal status %u\n",
1163 status);
1164 }
1165 spin_unlock_bh(&sc->sc_resetlock);
1166 }
1167
1168 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1169 if (ATH_TXQ_SETUP(sc, i))
1170 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
1171 }
1172}
1173
1174/* Add a sub-frame to block ack window */
1175
1176static void ath_tx_addto_baw(struct ath_softc *sc,
1177 struct ath_atx_tid *tid,
1178 struct ath_buf *bf)
1179{
1180 int index, cindex;
1181
1182 if (bf_isretried(bf))
1183 return;
1184
1185 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1187
1188 ASSERT(tid->tx_buf[cindex] == NULL);
1189 tid->tx_buf[cindex] = bf;
1190
1191 if (index >= ((tid->baw_tail - tid->baw_head) &
1192 (ATH_TID_MAX_BUFS - 1))) {
1193 tid->baw_tail = cindex;
1194 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1195 }
1196} 417}
1197 418
1198/* 419static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
1199 * Function to send an A-MPDU
1200 * NB: must be called with txq lock held
1201 */
1202static int ath_tx_send_ampdu(struct ath_softc *sc,
1203 struct ath_atx_tid *tid,
1204 struct list_head *bf_head,
1205 struct ath_tx_control *txctl)
1206{
1207 struct ath_buf *bf;
1208
1209 BUG_ON(list_empty(bf_head));
1210
1211 bf = list_first_entry(bf_head, struct ath_buf, list);
1212 bf->bf_state.bf_type |= BUF_AMPDU;
1213
1214 /*
1215 * Do not queue to h/w when any of the following conditions is true:
1216 * - there are pending frames in software queue
1217 * - the TID is currently paused for ADDBA/BAR request
1218 * - seqno is not within block-ack window
1219 * - h/w queue depth exceeds low water mark
1220 */
1221 if (!list_empty(&tid->buf_q) || tid->paused ||
1222 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1223 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1224 /*
1225 * Add this frame to software queue for scheduling later
1226 * for aggregation.
1227 */
1228 list_splice_tail_init(bf_head, &tid->buf_q);
1229 ath_tx_queue_tid(txctl->txq, tid);
1230 return 0;
1231 }
1232
1233 /* Add sub-frame to BAW */
1234 ath_tx_addto_baw(sc, tid, bf);
1235
1236 /* Queue to h/w without aggregation */
1237 bf->bf_nframes = 1;
1238 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1239 ath_buf_set_rate(sc, bf);
1240 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1241
1242 return 0;
1243}
1244
1245/*
1246 * looks up the rate
1247 * returns aggr limit based on lowest of the rates
1248 */
1249static u32 ath_lookup_rate(struct ath_softc *sc,
1250 struct ath_buf *bf,
1251 struct ath_atx_tid *tid) 420 struct ath_atx_tid *tid)
1252{ 421{
1253 struct ath_rate_table *rate_table = sc->cur_rate_table; 422 struct ath_rate_table *rate_table = sc->cur_rate_table;
@@ -1255,15 +424,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1255 struct ieee80211_tx_info *tx_info; 424 struct ieee80211_tx_info *tx_info;
1256 struct ieee80211_tx_rate *rates; 425 struct ieee80211_tx_rate *rates;
1257 struct ath_tx_info_priv *tx_info_priv; 426 struct ath_tx_info_priv *tx_info_priv;
1258 u32 max_4ms_framelen, frame_length; 427 u32 max_4ms_framelen, frmlen;
1259 u16 aggr_limit, legacy = 0, maxampdu; 428 u16 aggr_limit, legacy = 0, maxampdu;
1260 int i; 429 int i;
1261 430
1262 skb = (struct sk_buff *)bf->bf_mpdu; 431 skb = (struct sk_buff *)bf->bf_mpdu;
1263 tx_info = IEEE80211_SKB_CB(skb); 432 tx_info = IEEE80211_SKB_CB(skb);
1264 rates = tx_info->control.rates; 433 rates = tx_info->control.rates;
1265 tx_info_priv = 434 tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1266 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1267 435
1268 /* 436 /*
1269 * Find the lowest frame length among the rate series that will have a 437 * Find the lowest frame length among the rate series that will have a
@@ -1279,9 +447,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1279 break; 447 break;
1280 } 448 }
1281 449
1282 frame_length = 450 frmlen = rate_table->info[rates[i].idx].max_4ms_framelen;
1283 rate_table->info[rates[i].idx].max_4ms_framelen; 451 max_4ms_framelen = min(max_4ms_framelen, frmlen);
1284 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1285 } 452 }
1286 } 453 }
1287 454
@@ -1293,8 +460,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1293 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 460 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1294 return 0; 461 return 0;
1295 462
1296 aggr_limit = min(max_4ms_framelen, 463 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_DEFAULT);
1297 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1298 464
1299 /* 465 /*
1300 * h/w can accept aggregates upto 16 bit lengths (65535). 466 * h/w can accept aggregates upto 16 bit lengths (65535).
@@ -1309,14 +475,12 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1309} 475}
1310 476
1311/* 477/*
1312 * returns the number of delimiters to be added to 478 * Returns the number of delimiters to be added to
1313 * meet the minimum required mpdudensity. 479 * meet the minimum required mpdudensity.
1314 * caller should make sure that the rate is HT rate . 480 * caller should make sure that the rate is HT rate .
1315 */ 481 */
1316static int ath_compute_num_delims(struct ath_softc *sc, 482static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
1317 struct ath_atx_tid *tid, 483 struct ath_buf *bf, u16 frmlen)
1318 struct ath_buf *bf,
1319 u16 frmlen)
1320{ 484{
1321 struct ath_rate_table *rt = sc->cur_rate_table; 485 struct ath_rate_table *rt = sc->cur_rate_table;
1322 struct sk_buff *skb = bf->bf_mpdu; 486 struct sk_buff *skb = bf->bf_mpdu;
@@ -1370,9 +534,7 @@ static int ath_compute_num_delims(struct ath_softc *sc,
1370 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; 534 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1371 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 535 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1372 536
1373 /* Is frame shorter than required minimum length? */
1374 if (frmlen < minlen) { 537 if (frmlen < minlen) {
1375 /* Get the minimum number of delimiters required. */
1376 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 538 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1377 ndelim = max(mindelim, ndelim); 539 ndelim = max(mindelim, ndelim);
1378 } 540 }
@@ -1380,37 +542,23 @@ static int ath_compute_num_delims(struct ath_softc *sc,
1380 return ndelim; 542 return ndelim;
1381} 543}
1382 544
1383/*
1384 * For aggregation from software buffer queue.
1385 * NB: must be called with txq lock held
1386 */
1387static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 545static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1388 struct ath_atx_tid *tid, 546 struct ath_atx_tid *tid,
1389 struct list_head *bf_q, 547 struct list_head *bf_q)
1390 struct ath_buf **bf_last,
1391 struct aggr_rifs_param *param,
1392 int *prev_frames)
1393{ 548{
1394#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 549#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1395 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; 550 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
1396 struct list_head bf_head; 551 int rl = 0, nframes = 0, ndelim, prev_al = 0;
1397 int rl = 0, nframes = 0, ndelim;
1398 u16 aggr_limit = 0, al = 0, bpad = 0, 552 u16 aggr_limit = 0, al = 0, bpad = 0,
1399 al_delta, h_baw = tid->baw_size / 2; 553 al_delta, h_baw = tid->baw_size / 2;
1400 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 554 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1401 int prev_al = 0;
1402 INIT_LIST_HEAD(&bf_head);
1403
1404 BUG_ON(list_empty(&tid->buf_q));
1405 555
1406 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 556 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1407 557
1408 do { 558 do {
1409 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 559 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1410 560
1411 /* 561 /* do not step over block-ack window */
1412 * do not step over block-ack window
1413 */
1414 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 562 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1415 status = ATH_AGGR_BAW_CLOSED; 563 status = ATH_AGGR_BAW_CLOSED;
1416 break; 564 break;
@@ -1421,29 +569,23 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1421 rl = 1; 569 rl = 1;
1422 } 570 }
1423 571
1424 /* 572 /* do not exceed aggregation limit */
1425 * do not exceed aggregation limit
1426 */
1427 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 573 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1428 574
1429 if (nframes && (aggr_limit < 575 if (nframes &&
1430 (al + bpad + al_delta + prev_al))) { 576 (aggr_limit < (al + bpad + al_delta + prev_al))) {
1431 status = ATH_AGGR_LIMITED; 577 status = ATH_AGGR_LIMITED;
1432 break; 578 break;
1433 } 579 }
1434 580
1435 /* 581 /* do not exceed subframe limit */
1436 * do not exceed subframe limit 582 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1437 */
1438 if ((nframes + *prev_frames) >=
1439 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1440 status = ATH_AGGR_LIMITED; 583 status = ATH_AGGR_LIMITED;
1441 break; 584 break;
1442 } 585 }
586 nframes++;
1443 587
1444 /* 588 /* add padding for previous frame to aggregation length */
1445 * add padding for previous frame to aggregation length
1446 */
1447 al += bpad + al_delta; 589 al += bpad + al_delta;
1448 590
1449 /* 591 /*
@@ -1451,69 +593,35 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1451 * density for this node. 593 * density for this node.
1452 */ 594 */
1453 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 595 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1454
1455 bpad = PADBYTES(al_delta) + (ndelim << 2); 596 bpad = PADBYTES(al_delta) + (ndelim << 2);
1456 597
1457 bf->bf_next = NULL; 598 bf->bf_next = NULL;
1458 bf->bf_lastfrm->bf_desc->ds_link = 0; 599 bf->bf_desc->ds_link = 0;
1459 600
1460 /* 601 /* link buffers of this frame to the aggregate */
1461 * this packet is part of an aggregate
1462 * - remove all descriptors belonging to this frame from
1463 * software queue
1464 * - add it to block ack window
1465 * - set up descriptors for aggregation
1466 */
1467 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1468 ath_tx_addto_baw(sc, tid, bf); 602 ath_tx_addto_baw(sc, tid, bf);
1469 603 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
1470 list_for_each_entry(tbf, &bf_head, list) { 604 list_move_tail(&bf->list, bf_q);
1471 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1472 tbf->bf_desc, ndelim);
1473 }
1474
1475 /*
1476 * link buffers of this frame to the aggregate
1477 */
1478 list_splice_tail_init(&bf_head, bf_q);
1479 nframes++;
1480
1481 if (bf_prev) { 605 if (bf_prev) {
1482 bf_prev->bf_next = bf; 606 bf_prev->bf_next = bf;
1483 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr; 607 bf_prev->bf_desc->ds_link = bf->bf_daddr;
1484 } 608 }
1485 bf_prev = bf; 609 bf_prev = bf;
1486
1487#ifdef AGGR_NOSHORT
1488 /*
1489 * terminate aggregation on a small packet boundary
1490 */
1491 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1492 status = ATH_AGGR_SHORTPKT;
1493 break;
1494 }
1495#endif
1496 } while (!list_empty(&tid->buf_q)); 610 } while (!list_empty(&tid->buf_q));
1497 611
1498 bf_first->bf_al = al; 612 bf_first->bf_al = al;
1499 bf_first->bf_nframes = nframes; 613 bf_first->bf_nframes = nframes;
1500 *bf_last = bf_prev; 614
1501 return status; 615 return status;
1502#undef PADBYTES 616#undef PADBYTES
1503} 617}
1504 618
1505/* 619static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1506 * process pending frames possibly doing a-mpdu aggregation 620 struct ath_atx_tid *tid)
1507 * NB: must be called with txq lock held
1508 */
1509static void ath_tx_sched_aggr(struct ath_softc *sc,
1510 struct ath_txq *txq, struct ath_atx_tid *tid)
1511{ 621{
1512 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; 622 struct ath_buf *bf;
1513 enum ATH_AGGR_STATUS status; 623 enum ATH_AGGR_STATUS status;
1514 struct list_head bf_q; 624 struct list_head bf_q;
1515 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1516 int prev_frames = 0;
1517 625
1518 do { 626 do {
1519 if (list_empty(&tid->buf_q)) 627 if (list_empty(&tid->buf_q))
@@ -1521,382 +629,187 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1521 629
1522 INIT_LIST_HEAD(&bf_q); 630 INIT_LIST_HEAD(&bf_q);
1523 631
1524 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param, 632 status = ath_tx_form_aggr(sc, tid, &bf_q);
1525 &prev_frames);
1526 633
1527 /* 634 /*
1528 * no frames picked up to be aggregated; block-ack 635 * no frames picked up to be aggregated;
1529 * window is not open 636 * block-ack window is not open.
1530 */ 637 */
1531 if (list_empty(&bf_q)) 638 if (list_empty(&bf_q))
1532 break; 639 break;
1533 640
1534 bf = list_first_entry(&bf_q, struct ath_buf, list); 641 bf = list_first_entry(&bf_q, struct ath_buf, list);
1535 bf_last = list_entry(bf_q.prev, struct ath_buf, list); 642 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1536 bf->bf_lastbf = bf_last;
1537 643
1538 /* 644 /* if only one frame, send as non-aggregate */
1539 * if only one frame, send as non-aggregate
1540 */
1541 if (bf->bf_nframes == 1) { 645 if (bf->bf_nframes == 1) {
1542 ASSERT(bf->bf_lastfrm == bf_last);
1543
1544 bf->bf_state.bf_type &= ~BUF_AGGR; 646 bf->bf_state.bf_type &= ~BUF_AGGR;
1545 /* 647 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
1546 * clear aggr bits for every descriptor
1547 * XXX TODO: is there a way to optimize it?
1548 */
1549 list_for_each_entry(tbf, &bf_q, list) {
1550 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1551 }
1552
1553 ath_buf_set_rate(sc, bf); 648 ath_buf_set_rate(sc, bf);
1554 ath_tx_txqaddbuf(sc, txq, &bf_q); 649 ath_tx_txqaddbuf(sc, txq, &bf_q);
1555 continue; 650 continue;
1556 } 651 }
1557 652
1558 /* 653 /* setup first desc of aggregate */
1559 * setup first desc with rate and aggr info
1560 */
1561 bf->bf_state.bf_type |= BUF_AGGR; 654 bf->bf_state.bf_type |= BUF_AGGR;
1562 ath_buf_set_rate(sc, bf); 655 ath_buf_set_rate(sc, bf);
1563 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 656 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1564 657
1565 /* 658 /* anchor last desc of aggregate */
1566 * anchor last frame of aggregate correctly 659 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
1567 */
1568 ASSERT(bf_lastaggr);
1569 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1570 tbf = bf_lastaggr;
1571 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1572
1573 /* XXX: We don't enter into this loop, consider removing this */
1574 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1575 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1576 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1577 }
1578 660
1579 txq->axq_aggr_depth++; 661 txq->axq_aggr_depth++;
1580
1581 /*
1582 * Normal aggregate, queue to hardware
1583 */
1584 ath_tx_txqaddbuf(sc, txq, &bf_q); 662 ath_tx_txqaddbuf(sc, txq, &bf_q);
1585 663
1586 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 664 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1587 status != ATH_AGGR_BAW_CLOSED); 665 status != ATH_AGGR_BAW_CLOSED);
1588} 666}
1589 667
1590/* Called with txq lock held */ 668int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1591 669 u16 tid, u16 *ssn)
1592static void ath_tid_drain(struct ath_softc *sc,
1593 struct ath_txq *txq,
1594 struct ath_atx_tid *tid)
1595
1596{
1597 struct ath_buf *bf;
1598 struct list_head bf_head;
1599 INIT_LIST_HEAD(&bf_head);
1600
1601 for (;;) {
1602 if (list_empty(&tid->buf_q))
1603 break;
1604 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1605
1606 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1607
1608 /* update baw for software retried frame */
1609 if (bf_isretried(bf))
1610 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1611
1612 /*
1613 * do not indicate packets while holding txq spinlock.
1614 * unlock is intentional here
1615 */
1616 spin_unlock(&txq->axq_lock);
1617
1618 /* complete this sub-frame */
1619 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1620
1621 spin_lock(&txq->axq_lock);
1622 }
1623
1624 /*
1625 * TODO: For frame(s) that are in the retry state, we will reuse the
1626 * sequence number(s) without setting the retry bit. The
1627 * alternative is to give up on these and BAR the receiver's window
1628 * forward.
1629 */
1630 tid->seq_next = tid->seq_start;
1631 tid->baw_tail = tid->baw_head;
1632}
1633
1634/*
1635 * Drain all pending buffers
1636 * NB: must be called with txq lock held
1637 */
1638static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1639 struct ath_txq *txq)
1640{
1641 struct ath_atx_ac *ac, *ac_tmp;
1642 struct ath_atx_tid *tid, *tid_tmp;
1643
1644 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1645 list_del(&ac->list);
1646 ac->sched = false;
1647 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1648 list_del(&tid->list);
1649 tid->sched = false;
1650 ath_tid_drain(sc, txq, tid);
1651 }
1652 }
1653}
1654
1655static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1656 struct sk_buff *skb,
1657 struct ath_tx_control *txctl)
1658{ 670{
1659 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 671 struct ath_atx_tid *txtid;
1660 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 672 struct ath_node *an;
1661 struct ath_tx_info_priv *tx_info_priv;
1662 int hdrlen;
1663 __le16 fc;
1664
1665 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1666 if (unlikely(!tx_info_priv))
1667 return -ENOMEM;
1668 tx_info->rate_driver_data[0] = tx_info_priv;
1669 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1670 fc = hdr->frame_control;
1671
1672 ATH_TXBUF_RESET(bf);
1673
1674 /* Frame type */
1675
1676 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1677
1678 ieee80211_is_data(fc) ?
1679 (bf->bf_state.bf_type |= BUF_DATA) :
1680 (bf->bf_state.bf_type &= ~BUF_DATA);
1681 ieee80211_is_back_req(fc) ?
1682 (bf->bf_state.bf_type |= BUF_BAR) :
1683 (bf->bf_state.bf_type &= ~BUF_BAR);
1684 ieee80211_is_pspoll(fc) ?
1685 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1686 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1687 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1688 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1689 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1690 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1691 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1692 (bf->bf_state.bf_type |= BUF_HT) :
1693 (bf->bf_state.bf_type &= ~BUF_HT);
1694
1695 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1696
1697 /* Crypto */
1698
1699 bf->bf_keytype = get_hw_crypto_keytype(skb);
1700
1701 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1702 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1703 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1704 } else {
1705 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1706 }
1707
1708 /* Assign seqno, tidno */
1709
1710 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
1711 assign_aggr_tid_seqno(skb, bf);
1712 673
1713 /* DMA setup */ 674 an = (struct ath_node *)sta->drv_priv;
1714 bf->bf_mpdu = skb;
1715 675
1716 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data, 676 if (sc->sc_flags & SC_OP_TXAGGR) {
1717 skb->len, PCI_DMA_TODEVICE); 677 txtid = ATH_AN_2_TID(an, tid);
1718 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) { 678 txtid->state |= AGGR_ADDBA_PROGRESS;
1719 bf->bf_mpdu = NULL; 679 ath_tx_pause_tid(sc, txtid);
1720 DPRINTF(sc, ATH_DBG_CONFIG, 680 *ssn = txtid->seq_start;
1721 "pci_dma_mapping_error() on TX\n");
1722 return -ENOMEM;
1723 } 681 }
1724 682
1725 bf->bf_buf_addr = bf->bf_dmacontext;
1726 return 0; 683 return 0;
1727} 684}
1728 685
1729/* FIXME: tx power */ 686int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1730static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1731 struct ath_tx_control *txctl)
1732{ 687{
1733 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 688 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1734 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 689 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1735 struct ath_node *an = NULL; 690 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
691 struct ath_buf *bf;
1736 struct list_head bf_head; 692 struct list_head bf_head;
1737 struct ath_desc *ds;
1738 struct ath_atx_tid *tid;
1739 struct ath_hal *ah = sc->sc_ah;
1740 int frm_type;
1741
1742 frm_type = get_hw_packet_type(skb);
1743
1744 INIT_LIST_HEAD(&bf_head); 693 INIT_LIST_HEAD(&bf_head);
1745 list_add_tail(&bf->list, &bf_head);
1746
1747 /* setup descriptor */
1748
1749 ds = bf->bf_desc;
1750 ds->ds_link = 0;
1751 ds->ds_data = bf->bf_buf_addr;
1752
1753 /* Formulate first tx descriptor with tx controls */
1754
1755 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1756 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1757
1758 ath9k_hw_filltxdesc(ah, ds,
1759 skb->len, /* segment length */
1760 true, /* first segment */
1761 true, /* last segment */
1762 ds); /* first descriptor */
1763 694
1764 bf->bf_lastfrm = bf; 695 if (txtid->state & AGGR_CLEANUP)
696 return 0;
1765 697
1766 spin_lock_bh(&txctl->txq->axq_lock); 698 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
699 txtid->addba_exchangeattempts = 0;
700 return 0;
701 }
1767 702
1768 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 703 ath_tx_pause_tid(sc, txtid);
1769 tx_info->control.sta) {
1770 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1771 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1772 704
1773 if (ath_aggr_query(sc, an, bf->bf_tidno)) { 705 /* drop all software retried frames and mark this TID */
1774 /* 706 spin_lock_bh(&txq->axq_lock);
1775 * Try aggregation if it's a unicast data frame 707 while (!list_empty(&txtid->buf_q)) {
1776 * and the destination is HT capable. 708 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
1777 */ 709 if (!bf_isretried(bf)) {
1778 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1779 } else {
1780 /* 710 /*
1781 * Send this frame as regular when ADDBA 711 * NB: it's based on the assumption that
1782 * exchange is neither complete nor pending. 712 * software retried frame will always stay
713 * at the head of software queue.
1783 */ 714 */
1784 ath_tx_send_normal(sc, txctl->txq, 715 break;
1785 tid, &bf_head);
1786 } 716 }
1787 } else { 717 list_move_tail(&bf->list, &bf_head);
1788 bf->bf_lastbf = bf; 718 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
1789 bf->bf_nframes = 1; 719 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
720 }
721 spin_unlock_bh(&txq->axq_lock);
1790 722
1791 ath_buf_set_rate(sc, bf); 723 if (txtid->baw_head != txtid->baw_tail) {
1792 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); 724 txtid->state |= AGGR_CLEANUP;
725 } else {
726 txtid->state &= ~AGGR_ADDBA_COMPLETE;
727 txtid->addba_exchangeattempts = 0;
728 ath_tx_flush_tid(sc, txtid);
1793 } 729 }
1794 730
1795 spin_unlock_bh(&txctl->txq->axq_lock); 731 return 0;
1796} 732}
1797 733
1798/* Upon failure caller should free skb */ 734void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1799int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1800 struct ath_tx_control *txctl)
1801{ 735{
1802 struct ath_buf *bf; 736 struct ath_atx_tid *txtid;
1803 int r; 737 struct ath_node *an;
1804 738
1805 /* Check if a tx buffer is available */ 739 an = (struct ath_node *)sta->drv_priv;
1806 740
1807 bf = ath_tx_get_buffer(sc); 741 if (sc->sc_flags & SC_OP_TXAGGR) {
1808 if (!bf) { 742 txtid = ATH_AN_2_TID(an, tid);
1809 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n"); 743 txtid->baw_size =
1810 return -1; 744 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
745 txtid->state |= AGGR_ADDBA_COMPLETE;
746 txtid->state &= ~AGGR_ADDBA_PROGRESS;
747 ath_tx_resume_tid(sc, txtid);
1811 } 748 }
749}
1812 750
1813 r = ath_tx_setup_buffer(sc, bf, skb, txctl); 751bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
1814 if (unlikely(r)) { 752{
1815 struct ath_txq *txq = txctl->txq; 753 struct ath_atx_tid *txtid;
1816
1817 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
1818 754
1819 /* upon ath_tx_processq() this TX queue will be resumed, we 755 if (!(sc->sc_flags & SC_OP_TXAGGR))
1820 * guarantee this will happen by knowing beforehand that 756 return false;
1821 * we will at least have to run TX completionon one buffer
1822 * on the queue */
1823 spin_lock_bh(&txq->axq_lock);
1824 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1825 ieee80211_stop_queue(sc->hw,
1826 skb_get_queue_mapping(skb));
1827 txq->stopped = 1;
1828 }
1829 spin_unlock_bh(&txq->axq_lock);
1830 757
1831 spin_lock_bh(&sc->tx.txbuflock); 758 txtid = ATH_AN_2_TID(an, tidno);
1832 list_add_tail(&bf->list, &sc->tx.txbuf);
1833 spin_unlock_bh(&sc->tx.txbuflock);
1834 759
1835 return r; 760 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
761 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
762 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
763 txtid->addba_exchangeattempts++;
764 return true;
765 }
1836 } 766 }
1837 767
1838 ath_tx_start_dma(sc, bf, txctl); 768 return false;
1839
1840 return 0;
1841} 769}
1842 770
1843/* Initialize TX queue and h/w */ 771/********************/
772/* Queue Management */
773/********************/
1844 774
1845int ath_tx_init(struct ath_softc *sc, int nbufs) 775static u32 ath_txq_depth(struct ath_softc *sc, int qnum)
1846{ 776{
1847 int error = 0; 777 return sc->tx.txq[qnum].axq_depth;
1848 778}
1849 do {
1850 spin_lock_init(&sc->tx.txbuflock);
1851
1852 /* Setup tx descriptors */
1853 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
1854 "tx", nbufs, 1);
1855 if (error != 0) {
1856 DPRINTF(sc, ATH_DBG_FATAL,
1857 "Failed to allocate tx descriptors: %d\n",
1858 error);
1859 break;
1860 }
1861
1862 /* XXX allocate beacon state together with vap */
1863 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
1864 "beacon", ATH_BCBUF, 1);
1865 if (error != 0) {
1866 DPRINTF(sc, ATH_DBG_FATAL,
1867 "Failed to allocate beacon descriptors: %d\n",
1868 error);
1869 break;
1870 }
1871 779
1872 } while (0); 780static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
781 struct ath_beacon_config *conf)
782{
783 struct ieee80211_hw *hw = sc->hw;
1873 784
1874 if (error != 0) 785 /* fill in beacon config data */
1875 ath_tx_cleanup(sc);
1876 786
1877 return error; 787 conf->beacon_interval = hw->conf.beacon_int;
788 conf->listen_interval = 100;
789 conf->dtim_count = 1;
790 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1878} 791}
1879 792
1880/* Reclaim all tx queue resources */ 793static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1881 794 struct ath_txq *txq)
1882int ath_tx_cleanup(struct ath_softc *sc)
1883{ 795{
1884 /* cleanup beacon descriptors */ 796 struct ath_atx_ac *ac, *ac_tmp;
1885 if (sc->beacon.bdma.dd_desc_len != 0) 797 struct ath_atx_tid *tid, *tid_tmp;
1886 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
1887
1888 /* cleanup tx descriptors */
1889 if (sc->tx.txdma.dd_desc_len != 0)
1890 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
1891 798
1892 return 0; 799 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
800 list_del(&ac->list);
801 ac->sched = false;
802 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
803 list_del(&tid->list);
804 tid->sched = false;
805 ath_tid_drain(sc, txq, tid);
806 }
807 }
1893} 808}
1894 809
1895/* Setup a h/w transmit queue */
1896
1897struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 810struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1898{ 811{
1899 struct ath_hal *ah = sc->sc_ah; 812 struct ath_hw *ah = sc->sc_ah;
1900 struct ath9k_tx_queue_info qi; 813 struct ath9k_tx_queue_info qi;
1901 int qnum; 814 int qnum;
1902 815
@@ -1959,43 +872,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1959 return &sc->tx.txq[qnum]; 872 return &sc->tx.txq[qnum];
1960} 873}
1961 874
1962/* Reclaim resources for a setup queue */ 875static int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1963
1964void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1965{
1966 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1967 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1968}
1969
1970/*
1971 * Setup a hardware data transmit queue for the specified
1972 * access control. The hal may not support all requested
1973 * queues in which case it will return a reference to a
1974 * previously setup queue. We record the mapping from ac's
1975 * to h/w queues for use by ath_tx_start and also track
1976 * the set of h/w queues being used to optimize work in the
1977 * transmit interrupt handler and related routines.
1978 */
1979
1980int ath_tx_setup(struct ath_softc *sc, int haltype)
1981{
1982 struct ath_txq *txq;
1983
1984 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1985 DPRINTF(sc, ATH_DBG_FATAL,
1986 "HAL AC %u out of range, max %zu!\n",
1987 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1988 return 0;
1989 }
1990 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1991 if (txq != NULL) {
1992 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1993 return 1;
1994 } else
1995 return 0;
1996}
1997
1998int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1999{ 876{
2000 int qnum; 877 int qnum;
2001 878
@@ -2021,8 +898,6 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2021 return qnum; 898 return qnum;
2022} 899}
2023 900
2024/* Get a transmit queue, if available */
2025
2026struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) 901struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2027{ 902{
2028 struct ath_txq *txq = NULL; 903 struct ath_txq *txq = NULL;
@@ -2033,7 +908,6 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2033 908
2034 spin_lock_bh(&txq->axq_lock); 909 spin_lock_bh(&txq->axq_lock);
2035 910
2036 /* Try to avoid running out of descriptors */
2037 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 911 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2038 DPRINTF(sc, ATH_DBG_FATAL, 912 DPRINTF(sc, ATH_DBG_FATAL,
2039 "TX queue: %d is full, depth: %d\n", 913 "TX queue: %d is full, depth: %d\n",
@@ -2049,12 +923,10 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2049 return txq; 923 return txq;
2050} 924}
2051 925
2052/* Update parameters for a transmit queue */
2053
2054int ath_txq_update(struct ath_softc *sc, int qnum, 926int ath_txq_update(struct ath_softc *sc, int qnum,
2055 struct ath9k_tx_queue_info *qinfo) 927 struct ath9k_tx_queue_info *qinfo)
2056{ 928{
2057 struct ath_hal *ah = sc->sc_ah; 929 struct ath_hw *ah = sc->sc_ah;
2058 int error = 0; 930 int error = 0;
2059 struct ath9k_tx_queue_info qi; 931 struct ath9k_tx_queue_info qi;
2060 932
@@ -2082,7 +954,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2082 "Unable to update hardware queue %u!\n", qnum); 954 "Unable to update hardware queue %u!\n", qnum);
2083 error = -EIO; 955 error = -EIO;
2084 } else { 956 } else {
2085 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ 957 ath9k_hw_resettxqueue(ah, qnum);
2086 } 958 }
2087 959
2088 return error; 960 return error;
@@ -2098,49 +970,32 @@ int ath_cabq_update(struct ath_softc *sc)
2098 /* 970 /*
2099 * Ensure the readytime % is within the bounds. 971 * Ensure the readytime % is within the bounds.
2100 */ 972 */
2101 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 973 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2102 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 974 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2103 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 975 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2104 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 976 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2105 977
2106 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf); 978 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2107 qi.tqi_readyTime = 979 qi.tqi_readyTime =
2108 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100; 980 (conf.beacon_interval * sc->config.cabqReadytime) / 100;
2109 ath_txq_update(sc, qnum, &qi); 981 ath_txq_update(sc, qnum, &qi);
2110 982
2111 return 0; 983 return 0;
2112} 984}
2113 985
2114/* Deferred processing of transmit interrupt */ 986/*
2115 987 * Drain a given TX queue (could be Beacon or Data)
2116void ath_tx_tasklet(struct ath_softc *sc) 988 *
2117{ 989 * This assumes output has been stopped and
2118 int i; 990 * we do not need to block ath_tx_tasklet.
2119 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 991 */
2120 992void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
2121 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2122
2123 /*
2124 * Process each active queue.
2125 */
2126 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2127 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2128 ath_tx_processq(sc, &sc->tx.txq[i]);
2129 }
2130}
2131
2132void ath_tx_draintxq(struct ath_softc *sc,
2133 struct ath_txq *txq, bool retry_tx)
2134{ 993{
2135 struct ath_buf *bf, *lastbf; 994 struct ath_buf *bf, *lastbf;
2136 struct list_head bf_head; 995 struct list_head bf_head;
2137 996
2138 INIT_LIST_HEAD(&bf_head); 997 INIT_LIST_HEAD(&bf_head);
2139 998
2140 /*
2141 * NB: this assumes output has been stopped and
2142 * we do not need to block ath_tx_tasklet
2143 */
2144 for (;;) { 999 for (;;) {
2145 spin_lock_bh(&txq->axq_lock); 1000 spin_lock_bh(&txq->axq_lock);
2146 1001
@@ -2175,7 +1030,7 @@ void ath_tx_draintxq(struct ath_softc *sc,
2175 spin_unlock_bh(&txq->axq_lock); 1030 spin_unlock_bh(&txq->axq_lock);
2176 1031
2177 if (bf_isampdu(bf)) 1032 if (bf_isampdu(bf))
2178 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); 1033 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
2179 else 1034 else
2180 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 1035 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2181 } 1036 }
@@ -2190,44 +1045,272 @@ void ath_tx_draintxq(struct ath_softc *sc,
2190 } 1045 }
2191} 1046}
2192 1047
2193/* Drain the transmit queues and reclaim resources */ 1048void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1049{
1050 struct ath_hw *ah = sc->sc_ah;
1051 struct ath_txq *txq;
1052 int i, npend = 0;
2194 1053
2195void ath_draintxq(struct ath_softc *sc, bool retry_tx) 1054 if (sc->sc_flags & SC_OP_INVALID)
1055 return;
1056
1057 /* Stop beacon queue */
1058 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1059
1060 /* Stop data queues */
1061 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1062 if (ATH_TXQ_SETUP(sc, i)) {
1063 txq = &sc->tx.txq[i];
1064 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1065 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1066 }
1067 }
1068
1069 if (npend) {
1070 int r;
1071
1072 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
1073
1074 spin_lock_bh(&sc->sc_resetlock);
1075 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
1076 if (r)
1077 DPRINTF(sc, ATH_DBG_FATAL,
1078 "Unable to reset hardware; reset status %u\n",
1079 r);
1080 spin_unlock_bh(&sc->sc_resetlock);
1081 }
1082
1083 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1084 if (ATH_TXQ_SETUP(sc, i))
1085 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1086 }
1087}
1088
1089void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1090{
1091 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1092 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1093}
1094
1095void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2196{ 1096{
2197 /* stop beacon queue. The beacon will be freed when 1097 struct ath_atx_ac *ac;
2198 * we go to INIT state */ 1098 struct ath_atx_tid *tid;
2199 if (!(sc->sc_flags & SC_OP_INVALID)) { 1099
2200 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1100 if (list_empty(&txq->axq_acq))
2201 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n", 1101 return;
2202 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq)); 1102
1103 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1104 list_del(&ac->list);
1105 ac->sched = false;
1106
1107 do {
1108 if (list_empty(&ac->tid_q))
1109 return;
1110
1111 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1112 list_del(&tid->list);
1113 tid->sched = false;
1114
1115 if (tid->paused)
1116 continue;
1117
1118 if ((txq->axq_depth % 2) == 0)
1119 ath_tx_sched_aggr(sc, txq, tid);
1120
1121 /*
1122 * add tid to round-robin queue if more frames
1123 * are pending for the tid
1124 */
1125 if (!list_empty(&tid->buf_q))
1126 ath_tx_queue_tid(txq, tid);
1127
1128 break;
1129 } while (!list_empty(&ac->tid_q));
1130
1131 if (!list_empty(&ac->tid_q)) {
1132 if (!ac->sched) {
1133 ac->sched = true;
1134 list_add_tail(&ac->list, &txq->axq_acq);
1135 }
2203 } 1136 }
1137}
1138
1139int ath_tx_setup(struct ath_softc *sc, int haltype)
1140{
1141 struct ath_txq *txq;
2204 1142
2205 ath_drain_txdataq(sc, retry_tx); 1143 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1144 DPRINTF(sc, ATH_DBG_FATAL,
1145 "HAL AC %u out of range, max %zu!\n",
1146 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1147 return 0;
1148 }
1149 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1150 if (txq != NULL) {
1151 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1152 return 1;
1153 } else
1154 return 0;
2206} 1155}
2207 1156
2208u32 ath_txq_depth(struct ath_softc *sc, int qnum) 1157/***********/
1158/* TX, DMA */
1159/***********/
1160
1161/*
1162 * Insert a chain of ath_buf (descriptors) on a txq and
1163 * assume the descriptors are already chained together by caller.
1164 */
1165static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1166 struct list_head *head)
2209{ 1167{
2210 return sc->tx.txq[qnum].axq_depth; 1168 struct ath_hw *ah = sc->sc_ah;
1169 struct ath_buf *bf;
1170
1171 /*
1172 * Insert the frame on the outbound list and
1173 * pass it on to the hardware.
1174 */
1175
1176 if (list_empty(head))
1177 return;
1178
1179 bf = list_first_entry(head, struct ath_buf, list);
1180
1181 list_splice_tail_init(head, &txq->axq_q);
1182 txq->axq_depth++;
1183 txq->axq_totalqueued++;
1184 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
1185
1186 DPRINTF(sc, ATH_DBG_QUEUE,
1187 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1188
1189 if (txq->axq_link == NULL) {
1190 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1191 DPRINTF(sc, ATH_DBG_XMIT,
1192 "TXDP[%u] = %llx (%p)\n",
1193 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1194 } else {
1195 *txq->axq_link = bf->bf_daddr;
1196 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1197 txq->axq_qnum, txq->axq_link,
1198 ito64(bf->bf_daddr), bf->bf_desc);
1199 }
1200 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1201 ath9k_hw_txstart(ah, txq->axq_qnum);
2211} 1202}
2212 1203
2213u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) 1204static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
2214{ 1205{
2215 return sc->tx.txq[qnum].axq_aggr_depth; 1206 struct ath_buf *bf = NULL;
1207
1208 spin_lock_bh(&sc->tx.txbuflock);
1209
1210 if (unlikely(list_empty(&sc->tx.txbuf))) {
1211 spin_unlock_bh(&sc->tx.txbuflock);
1212 return NULL;
1213 }
1214
1215 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
1216 list_del(&bf->list);
1217
1218 spin_unlock_bh(&sc->tx.txbuflock);
1219
1220 return bf;
2216} 1221}
2217 1222
2218bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) 1223static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1224 struct list_head *bf_head,
1225 struct ath_tx_control *txctl)
2219{ 1226{
2220 struct ath_atx_tid *txtid; 1227 struct ath_buf *bf;
2221 1228
2222 if (!(sc->sc_flags & SC_OP_TXAGGR)) 1229 bf = list_first_entry(bf_head, struct ath_buf, list);
2223 return false; 1230 bf->bf_state.bf_type |= BUF_AMPDU;
2224 1231
2225 txtid = ATH_AN_2_TID(an, tidno); 1232 /*
1233 * Do not queue to h/w when any of the following conditions is true:
1234 * - there are pending frames in software queue
1235 * - the TID is currently paused for ADDBA/BAR request
1236 * - seqno is not within block-ack window
1237 * - h/w queue depth exceeds low water mark
1238 */
1239 if (!list_empty(&tid->buf_q) || tid->paused ||
1240 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1241 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1242 /*
1243 * Add this frame to software queue for scheduling later
1244 * for aggregation.
1245 */
1246 list_move_tail(&bf->list, &tid->buf_q);
1247 ath_tx_queue_tid(txctl->txq, tid);
1248 return;
1249 }
2226 1250
2227 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1251 /* Add sub-frame to BAW */
2228 if (!(txtid->state & AGGR_ADDBA_PROGRESS) && 1252 ath_tx_addto_baw(sc, tid, bf);
2229 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { 1253
2230 txtid->addba_exchangeattempts++; 1254 /* Queue to h/w without aggregation */
1255 bf->bf_nframes = 1;
1256 bf->bf_lastbf = bf;
1257 ath_buf_set_rate(sc, bf);
1258 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1259}
1260
1261static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1262 struct ath_atx_tid *tid,
1263 struct list_head *bf_head)
1264{
1265 struct ath_buf *bf;
1266
1267 bf = list_first_entry(bf_head, struct ath_buf, list);
1268 bf->bf_state.bf_type &= ~BUF_AMPDU;
1269
1270 /* update starting sequence number for subsequent ADDBA request */
1271 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1272
1273 bf->bf_nframes = 1;
1274 bf->bf_lastbf = bf;
1275 ath_buf_set_rate(sc, bf);
1276 ath_tx_txqaddbuf(sc, txq, bf_head);
1277}
1278
1279static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1280{
1281 struct ieee80211_hdr *hdr;
1282 enum ath9k_pkt_type htype;
1283 __le16 fc;
1284
1285 hdr = (struct ieee80211_hdr *)skb->data;
1286 fc = hdr->frame_control;
1287
1288 if (ieee80211_is_beacon(fc))
1289 htype = ATH9K_PKT_TYPE_BEACON;
1290 else if (ieee80211_is_probe_resp(fc))
1291 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1292 else if (ieee80211_is_atim(fc))
1293 htype = ATH9K_PKT_TYPE_ATIM;
1294 else if (ieee80211_is_pspoll(fc))
1295 htype = ATH9K_PKT_TYPE_PSPOLL;
1296 else
1297 htype = ATH9K_PKT_TYPE_NORMAL;
1298
1299 return htype;
1300}
1301
1302static bool is_pae(struct sk_buff *skb)
1303{
1304 struct ieee80211_hdr *hdr;
1305 __le16 fc;
1306
1307 hdr = (struct ieee80211_hdr *)skb->data;
1308 fc = hdr->frame_control;
1309
1310 if (ieee80211_is_data(fc)) {
1311 if (ieee80211_is_nullfunc(fc) ||
1312 /* Port Access Entity (IEEE 802.1X) */
1313 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
2231 return true; 1314 return true;
2232 } 1315 }
2233 } 1316 }
@@ -2235,175 +1318,747 @@ bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2235 return false; 1318 return false;
2236} 1319}
2237 1320
2238/* Start TX aggregation */ 1321static int get_hw_crypto_keytype(struct sk_buff *skb)
1322{
1323 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2239 1324
2240int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1325 if (tx_info->control.hw_key) {
2241 u16 tid, u16 *ssn) 1326 if (tx_info->control.hw_key->alg == ALG_WEP)
1327 return ATH9K_KEY_TYPE_WEP;
1328 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1329 return ATH9K_KEY_TYPE_TKIP;
1330 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1331 return ATH9K_KEY_TYPE_AES;
1332 }
1333
1334 return ATH9K_KEY_TYPE_CLEAR;
1335}
1336
1337static void assign_aggr_tid_seqno(struct sk_buff *skb,
1338 struct ath_buf *bf)
2242{ 1339{
2243 struct ath_atx_tid *txtid; 1340 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1341 struct ieee80211_hdr *hdr;
2244 struct ath_node *an; 1342 struct ath_node *an;
1343 struct ath_atx_tid *tid;
1344 __le16 fc;
1345 u8 *qc;
2245 1346
2246 an = (struct ath_node *)sta->drv_priv; 1347 if (!tx_info->control.sta)
1348 return;
2247 1349
2248 if (sc->sc_flags & SC_OP_TXAGGR) { 1350 an = (struct ath_node *)tx_info->control.sta->drv_priv;
2249 txtid = ATH_AN_2_TID(an, tid); 1351 hdr = (struct ieee80211_hdr *)skb->data;
2250 txtid->state |= AGGR_ADDBA_PROGRESS; 1352 fc = hdr->frame_control;
2251 ath_tx_pause_tid(sc, txtid); 1353
1354 if (ieee80211_is_data_qos(fc)) {
1355 qc = ieee80211_get_qos_ctl(hdr);
1356 bf->bf_tidno = qc[0] & 0xf;
2252 } 1357 }
2253 1358
2254 return 0; 1359 /*
1360 * For HT capable stations, we save tidno for later use.
1361 * We also override seqno set by upper layer with the one
1362 * in tx aggregation state.
1363 *
1364 * If fragmentation is on, the sequence number is
1365 * not overridden, since it has been
1366 * incremented by the fragmentation routine.
1367 *
1368 * FIXME: check if the fragmentation threshold exceeds
1369 * IEEE80211 max.
1370 */
1371 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1372 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
1373 IEEE80211_SEQ_SEQ_SHIFT);
1374 bf->bf_seqno = tid->seq_next;
1375 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
2255} 1376}
2256 1377
2257/* Stop tx aggregation */ 1378static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
2258 1379 struct ath_txq *txq)
2259int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2260{ 1380{
2261 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1381 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1382 int flags = 0;
2262 1383
2263 ath_tx_aggr_teardown(sc, an, tid); 1384 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
2264 return 0; 1385 flags |= ATH9K_TXDESC_INTREQ;
1386
1387 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1388 flags |= ATH9K_TXDESC_NOACK;
1389
1390 return flags;
2265} 1391}
2266 1392
2267/* Resume tx aggregation */ 1393/*
1394 * rix - rate index
1395 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1396 * width - 0 for 20 MHz, 1 for 40 MHz
1397 * half_gi - to use 4us v/s 3.6 us for symbol time
1398 */
1399static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1400 int width, int half_gi, bool shortPreamble)
1401{
1402 struct ath_rate_table *rate_table = sc->cur_rate_table;
1403 u32 nbits, nsymbits, duration, nsymbols;
1404 u8 rc;
1405 int streams, pktlen;
1406
1407 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1408 rc = rate_table->info[rix].ratecode;
2268 1409
2269void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1410 /* for legacy rates, use old function to compute packet duration */
1411 if (!IS_HT_RATE(rc))
1412 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1413 rix, shortPreamble);
1414
1415 /* find number of symbols: PLCP + data */
1416 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1417 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1418 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1419
1420 if (!half_gi)
1421 duration = SYMBOL_TIME(nsymbols);
1422 else
1423 duration = SYMBOL_TIME_HALFGI(nsymbols);
1424
1425 /* addup duration for legacy/ht training and signal fields */
1426 streams = HT_RC_2_STREAMS(rc);
1427 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1428
1429 return duration;
1430}
1431
1432static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
2270{ 1433{
2271 struct ath_atx_tid *txtid; 1434 struct ath_rate_table *rt = sc->cur_rate_table;
2272 struct ath_node *an; 1435 struct ath9k_11n_rate_series series[4];
1436 struct sk_buff *skb;
1437 struct ieee80211_tx_info *tx_info;
1438 struct ieee80211_tx_rate *rates;
1439 struct ieee80211_hdr *hdr;
1440 int i, flags = 0;
1441 u8 rix = 0, ctsrate = 0;
1442 bool is_pspoll;
2273 1443
2274 an = (struct ath_node *)sta->drv_priv; 1444 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
2275 1445
2276 if (sc->sc_flags & SC_OP_TXAGGR) { 1446 skb = (struct sk_buff *)bf->bf_mpdu;
2277 txtid = ATH_AN_2_TID(an, tid); 1447 tx_info = IEEE80211_SKB_CB(skb);
2278 txtid->baw_size = 1448 rates = tx_info->control.rates;
2279 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1449 hdr = (struct ieee80211_hdr *)skb->data;
2280 txtid->state |= AGGR_ADDBA_COMPLETE; 1450 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
2281 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1451
2282 ath_tx_resume_tid(sc, txtid); 1452 /*
1453 * We check if Short Preamble is needed for the CTS rate by
1454 * checking the BSS's global flag.
1455 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1456 */
1457 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1458 ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode |
1459 rt->info[tx_info->control.rts_cts_rate_idx].short_preamble;
1460 else
1461 ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode;
1462
1463 /*
1464 * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
1465 * Check the first rate in the series to decide whether RTS/CTS
1466 * or CTS-to-self has to be used.
1467 */
1468 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1469 flags = ATH9K_TXDESC_CTSENA;
1470 else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
1471 flags = ATH9K_TXDESC_RTSENA;
1472
1473 /* FIXME: Handle aggregation protection */
1474 if (sc->config.ath_aggr_prot &&
1475 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1476 flags = ATH9K_TXDESC_RTSENA;
2283 } 1477 }
1478
1479 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1480 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1481 flags &= ~(ATH9K_TXDESC_RTSENA);
1482
1483 for (i = 0; i < 4; i++) {
1484 if (!rates[i].count || (rates[i].idx < 0))
1485 continue;
1486
1487 rix = rates[i].idx;
1488 series[i].Tries = rates[i].count;
1489 series[i].ChSel = sc->tx_chainmask;
1490
1491 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1492 series[i].Rate = rt->info[rix].ratecode |
1493 rt->info[rix].short_preamble;
1494 else
1495 series[i].Rate = rt->info[rix].ratecode;
1496
1497 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
1498 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1499 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1500 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1501 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1502 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1503
1504 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1505 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
1506 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
1507 (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE));
1508 }
1509
1510 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1511 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1512 bf->bf_lastbf->bf_desc,
1513 !is_pspoll, ctsrate,
1514 0, series, 4, flags);
1515
1516 if (sc->config.ath_aggr_prot && flags)
1517 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
2284} 1518}
2285 1519
2286/* 1520static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
2287 * Performs transmit side cleanup when TID changes from aggregated to 1521 struct sk_buff *skb,
2288 * unaggregated. 1522 struct ath_tx_control *txctl)
2289 * - Pause the TID and mark cleanup in progress 1523{
2290 * - Discard all retry frames from the s/w queue. 1524 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2291 */ 1525 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1526 struct ath_tx_info_priv *tx_info_priv;
1527 int hdrlen;
1528 __le16 fc;
1529
1530 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1531 if (unlikely(!tx_info_priv))
1532 return -ENOMEM;
1533 tx_info->rate_driver_data[0] = tx_info_priv;
1534 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1535 fc = hdr->frame_control;
1536
1537 ATH_TXBUF_RESET(bf);
1538
1539 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1540
1541 if ((conf_is_ht(&sc->hw->conf) && !is_pae(skb) &&
1542 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)))
1543 bf->bf_state.bf_type |= BUF_HT;
1544
1545 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1546
1547 bf->bf_keytype = get_hw_crypto_keytype(skb);
1548 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1549 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1550 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1551 } else {
1552 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1553 }
1554
1555 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
1556 assign_aggr_tid_seqno(skb, bf);
1557
1558 bf->bf_mpdu = skb;
1559
1560 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1561 skb->len, DMA_TO_DEVICE);
1562 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1563 bf->bf_mpdu = NULL;
1564 DPRINTF(sc, ATH_DBG_CONFIG,
1565 "dma_mapping_error() on TX\n");
1566 return -ENOMEM;
1567 }
1568
1569 bf->bf_buf_addr = bf->bf_dmacontext;
1570 return 0;
1571}
2292 1572
2293void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) 1573/* FIXME: tx power */
1574static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1575 struct ath_tx_control *txctl)
2294{ 1576{
2295 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1577 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
2296 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 1578 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2297 struct ath_buf *bf; 1579 struct ath_node *an = NULL;
2298 struct list_head bf_head; 1580 struct list_head bf_head;
1581 struct ath_desc *ds;
1582 struct ath_atx_tid *tid;
1583 struct ath_hw *ah = sc->sc_ah;
1584 int frm_type;
1585
1586 frm_type = get_hw_packet_type(skb);
1587
2299 INIT_LIST_HEAD(&bf_head); 1588 INIT_LIST_HEAD(&bf_head);
1589 list_add_tail(&bf->list, &bf_head);
2300 1590
2301 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */ 1591 ds = bf->bf_desc;
2302 return; 1592 ds->ds_link = 0;
1593 ds->ds_data = bf->bf_buf_addr;
2303 1594
2304 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1595 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
2305 txtid->addba_exchangeattempts = 0; 1596 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
2306 return;
2307 }
2308 1597
2309 /* TID must be paused first */ 1598 ath9k_hw_filltxdesc(ah, ds,
2310 ath_tx_pause_tid(sc, txtid); 1599 skb->len, /* segment length */
1600 true, /* first segment */
1601 true, /* last segment */
1602 ds); /* first descriptor */
2311 1603
2312 /* drop all software retried frames and mark this TID */ 1604 spin_lock_bh(&txctl->txq->axq_lock);
2313 spin_lock_bh(&txq->axq_lock); 1605
2314 while (!list_empty(&txtid->buf_q)) { 1606 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
2315 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 1607 tx_info->control.sta) {
2316 if (!bf_isretried(bf)) { 1608 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1609 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1610
1611 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
2317 /* 1612 /*
2318 * NB: it's based on the assumption that 1613 * Try aggregation if it's a unicast data frame
2319 * software retried frame will always stay 1614 * and the destination is HT capable.
2320 * at the head of software queue.
2321 */ 1615 */
2322 break; 1616 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1617 } else {
1618 /*
1619 * Send this frame as regular when ADDBA
1620 * exchange is neither complete nor pending.
1621 */
1622 ath_tx_send_normal(sc, txctl->txq,
1623 tid, &bf_head);
2323 } 1624 }
2324 list_cut_position(&bf_head, 1625 } else {
2325 &txtid->buf_q, &bf->bf_lastfrm->list); 1626 bf->bf_lastbf = bf;
2326 ath_tx_update_baw(sc, txtid, bf->bf_seqno); 1627 bf->bf_nframes = 1;
2327 1628
2328 /* complete this sub-frame */ 1629 ath_buf_set_rate(sc, bf);
2329 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 1630 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
2330 } 1631 }
2331 1632
2332 if (txtid->baw_head != txtid->baw_tail) { 1633 spin_unlock_bh(&txctl->txq->axq_lock);
2333 spin_unlock_bh(&txq->axq_lock); 1634}
2334 txtid->state |= AGGR_CLEANUP; 1635
2335 } else { 1636/* Upon failure caller should free skb */
2336 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1637int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
2337 txtid->addba_exchangeattempts = 0; 1638 struct ath_tx_control *txctl)
1639{
1640 struct ath_buf *bf;
1641 int r;
1642
1643 bf = ath_tx_get_buffer(sc);
1644 if (!bf) {
1645 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
1646 return -1;
1647 }
1648
1649 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1650 if (unlikely(r)) {
1651 struct ath_txq *txq = txctl->txq;
1652
1653 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
1654
1655 /* upon ath_tx_processq() this TX queue will be resumed, we
1656 * guarantee this will happen by knowing beforehand that
1657 * we will at least have to run TX completionon one buffer
1658 * on the queue */
1659 spin_lock_bh(&txq->axq_lock);
1660 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1661 ieee80211_stop_queue(sc->hw,
1662 skb_get_queue_mapping(skb));
1663 txq->stopped = 1;
1664 }
2338 spin_unlock_bh(&txq->axq_lock); 1665 spin_unlock_bh(&txq->axq_lock);
2339 ath_tx_flush_tid(sc, txtid); 1666
1667 spin_lock_bh(&sc->tx.txbuflock);
1668 list_add_tail(&bf->list, &sc->tx.txbuf);
1669 spin_unlock_bh(&sc->tx.txbuflock);
1670
1671 return r;
2340 } 1672 }
2341}
2342 1673
2343/* 1674 ath_tx_start_dma(sc, bf, txctl);
2344 * Tx scheduling logic
2345 * NB: must be called with txq lock held
2346 */
2347 1675
2348void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1676 return 0;
1677}
1678
1679void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2349{ 1680{
2350 struct ath_atx_ac *ac; 1681 int hdrlen, padsize;
2351 struct ath_atx_tid *tid; 1682 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1683 struct ath_tx_control txctl;
2352 1684
2353 /* nothing to schedule */ 1685 memset(&txctl, 0, sizeof(struct ath_tx_control));
2354 if (list_empty(&txq->axq_acq))
2355 return;
2356 /*
2357 * get the first node/ac pair on the queue
2358 */
2359 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2360 list_del(&ac->list);
2361 ac->sched = false;
2362 1686
2363 /* 1687 /*
2364 * process a single tid per destination 1688 * As a temporary workaround, assign seq# here; this will likely need
1689 * to be cleaned up to work better with Beacon transmission and virtual
1690 * BSSes.
2365 */ 1691 */
2366 do { 1692 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2367 /* nothing to schedule */ 1693 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2368 if (list_empty(&ac->tid_q)) 1694 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1695 sc->tx.seq_no += 0x10;
1696 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1697 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1698 }
1699
1700 /* Add the padding after the header if this is not already done */
1701 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1702 if (hdrlen & 3) {
1703 padsize = hdrlen % 4;
1704 if (skb_headroom(skb) < padsize) {
1705 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
1706 dev_kfree_skb_any(skb);
2369 return; 1707 return;
1708 }
1709 skb_push(skb, padsize);
1710 memmove(skb->data, skb->data + padsize, hdrlen);
1711 }
2370 1712
2371 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1713 txctl.txq = sc->beacon.cabq;
2372 list_del(&tid->list);
2373 tid->sched = false;
2374 1714
2375 if (tid->paused) /* check next tid to keep h/w busy */ 1715 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2376 continue;
2377 1716
2378 if ((txq->axq_depth % 2) == 0) 1717 if (ath_tx_start(sc, skb, &txctl) != 0) {
2379 ath_tx_sched_aggr(sc, txq, tid); 1718 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
1719 goto exit;
1720 }
1721
1722 return;
1723exit:
1724 dev_kfree_skb_any(skb);
1725}
1726
1727/*****************/
1728/* TX Completion */
1729/*****************/
1730
1731static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1732 struct ath_xmit_status *tx_status)
1733{
1734 struct ieee80211_hw *hw = sc->hw;
1735 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1736 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1737 int hdrlen, padsize;
1738
1739 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
2380 1740
1741 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1742 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1743 kfree(tx_info_priv);
1744 tx_info->rate_driver_data[0] = NULL;
1745 }
1746
1747 if (tx_status->flags & ATH_TX_BAR) {
1748 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1749 tx_status->flags &= ~ATH_TX_BAR;
1750 }
1751
1752 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
1753 /* Frame was ACKed */
1754 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1755 }
1756
1757 tx_info->status.rates[0].count = tx_status->retries + 1;
1758
1759 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1760 padsize = hdrlen & 3;
1761 if (padsize && hdrlen >= 24) {
2381 /* 1762 /*
2382 * add tid to round-robin queue if more frames 1763 * Remove MAC header padding before giving the frame back to
2383 * are pending for the tid 1764 * mac80211.
2384 */ 1765 */
2385 if (!list_empty(&tid->buf_q)) 1766 memmove(skb->data + padsize, skb->data, hdrlen);
2386 ath_tx_queue_tid(txq, tid); 1767 skb_pull(skb, padsize);
1768 }
2387 1769
2388 /* only schedule one TID at a time */ 1770 ieee80211_tx_status(hw, skb);
2389 break; 1771}
2390 } while (!list_empty(&ac->tid_q)); 1772
1773static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1774 struct list_head *bf_q,
1775 int txok, int sendbar)
1776{
1777 struct sk_buff *skb = bf->bf_mpdu;
1778 struct ath_xmit_status tx_status;
1779 unsigned long flags;
2391 1780
2392 /* 1781 /*
2393 * schedule AC if more TIDs need processing 1782 * Set retry information.
1783 * NB: Don't use the information in the descriptor, because the frame
1784 * could be software retried.
2394 */ 1785 */
2395 if (!list_empty(&ac->tid_q)) { 1786 tx_status.retries = bf->bf_retries;
1787 tx_status.flags = 0;
1788
1789 if (sendbar)
1790 tx_status.flags = ATH_TX_BAR;
1791
1792 if (!txok) {
1793 tx_status.flags |= ATH_TX_ERROR;
1794
1795 if (bf_isxretried(bf))
1796 tx_status.flags |= ATH_TX_XRETRY;
1797 }
1798
1799 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1800 ath_tx_complete(sc, skb, &tx_status);
1801
1802 /*
1803 * Return the list of ath_buf of this mpdu to free queue
1804 */
1805 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1806 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1807 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1808}
1809
1810static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1811 int txok)
1812{
1813 struct ath_buf *bf_last = bf->bf_lastbf;
1814 struct ath_desc *ds = bf_last->bf_desc;
1815 u16 seq_st = 0;
1816 u32 ba[WME_BA_BMP_SIZE >> 5];
1817 int ba_index;
1818 int nbad = 0;
1819 int isaggr = 0;
1820
1821 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
1822 return 0;
1823
1824 isaggr = bf_isaggr(bf);
1825 if (isaggr) {
1826 seq_st = ATH_DS_BA_SEQ(ds);
1827 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
1828 }
1829
1830 while (bf) {
1831 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1832 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1833 nbad++;
1834
1835 bf = bf->bf_next;
1836 }
1837
1838 return nbad;
1839}
1840
1841static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
1842{
1843 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1844 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1845 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1846 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1847
1848 tx_info_priv->update_rc = false;
1849 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1850 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1851
1852 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1853 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1854 if (ieee80211_is_data(hdr->frame_control)) {
1855 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
1856 sizeof(tx_info_priv->tx));
1857 tx_info_priv->n_frames = bf->bf_nframes;
1858 tx_info_priv->n_bad_frames = nbad;
1859 tx_info_priv->update_rc = true;
1860 }
1861 }
1862}
1863
1864static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
1865{
1866 int qnum;
1867
1868 spin_lock_bh(&txq->axq_lock);
1869 if (txq->stopped &&
1870 ath_txq_depth(sc, txq->axq_qnum) <= (ATH_TXBUF - 20)) {
1871 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1872 if (qnum != -1) {
1873 ieee80211_wake_queue(sc->hw, qnum);
1874 txq->stopped = 0;
1875 }
1876 }
1877 spin_unlock_bh(&txq->axq_lock);
1878}
1879
1880static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1881{
1882 struct ath_hw *ah = sc->sc_ah;
1883 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1884 struct list_head bf_head;
1885 struct ath_desc *ds;
1886 int txok, nbad = 0;
1887 int status;
1888
1889 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1890 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1891 txq->axq_link);
1892
1893 for (;;) {
1894 spin_lock_bh(&txq->axq_lock);
1895 if (list_empty(&txq->axq_q)) {
1896 txq->axq_link = NULL;
1897 txq->axq_linkbuf = NULL;
1898 spin_unlock_bh(&txq->axq_lock);
1899 break;
1900 }
1901 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1902
2396 /* 1903 /*
2397 * add dest ac to txq if not already added 1904 * There is a race condition that a BH gets scheduled
1905 * after sw writes TxE and before hw re-load the last
1906 * descriptor to get the newly chained one.
1907 * Software must keep the last DONE descriptor as a
1908 * holding descriptor - software does so by marking
1909 * it with the STALE flag.
2398 */ 1910 */
2399 if (!ac->sched) { 1911 bf_held = NULL;
2400 ac->sched = true; 1912 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2401 list_add_tail(&ac->list, &txq->axq_acq); 1913 bf_held = bf;
1914 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1915 txq->axq_link = NULL;
1916 txq->axq_linkbuf = NULL;
1917 spin_unlock_bh(&txq->axq_lock);
1918
1919 /*
1920 * The holding descriptor is the last
1921 * descriptor in queue. It's safe to remove
1922 * the last holding descriptor in BH context.
1923 */
1924 spin_lock_bh(&sc->tx.txbuflock);
1925 list_move_tail(&bf_held->list, &sc->tx.txbuf);
1926 spin_unlock_bh(&sc->tx.txbuflock);
1927
1928 break;
1929 } else {
1930 bf = list_entry(bf_held->list.next,
1931 struct ath_buf, list);
1932 }
1933 }
1934
1935 lastbf = bf->bf_lastbf;
1936 ds = lastbf->bf_desc;
1937
1938 status = ath9k_hw_txprocdesc(ah, ds);
1939 if (status == -EINPROGRESS) {
1940 spin_unlock_bh(&txq->axq_lock);
1941 break;
1942 }
1943 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1944 txq->axq_lastdsWithCTS = NULL;
1945 if (ds == txq->axq_gatingds)
1946 txq->axq_gatingds = NULL;
1947
1948 /*
1949 * Remove ath_buf's of the same transmit unit from txq,
1950 * however leave the last descriptor back as the holding
1951 * descriptor for hw.
1952 */
1953 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1954 INIT_LIST_HEAD(&bf_head);
1955 if (!list_is_singular(&lastbf->list))
1956 list_cut_position(&bf_head,
1957 &txq->axq_q, lastbf->list.prev);
1958
1959 txq->axq_depth--;
1960 if (bf_isaggr(bf))
1961 txq->axq_aggr_depth--;
1962
1963 txok = (ds->ds_txstat.ts_status == 0);
1964 spin_unlock_bh(&txq->axq_lock);
1965
1966 if (bf_held) {
1967 spin_lock_bh(&sc->tx.txbuflock);
1968 list_move_tail(&bf_held->list, &sc->tx.txbuf);
1969 spin_unlock_bh(&sc->tx.txbuflock);
1970 }
1971
1972 if (!bf_isampdu(bf)) {
1973 /*
1974 * This frame is sent out as a single frame.
1975 * Use hardware retry status for this frame.
1976 */
1977 bf->bf_retries = ds->ds_txstat.ts_longretry;
1978 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1979 bf->bf_state.bf_type |= BUF_XRETRY;
1980 nbad = 0;
1981 } else {
1982 nbad = ath_tx_num_badfrms(sc, bf, txok);
2402 } 1983 }
1984
1985 ath_tx_rc_status(bf, ds, nbad);
1986
1987 if (bf_isampdu(bf))
1988 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
1989 else
1990 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1991
1992 ath_wake_mac80211_queue(sc, txq);
1993
1994 spin_lock_bh(&txq->axq_lock);
1995 if (sc->sc_flags & SC_OP_TXAGGR)
1996 ath_txq_schedule(sc, txq);
1997 spin_unlock_bh(&txq->axq_lock);
1998 }
1999}
2000
2001
2002void ath_tx_tasklet(struct ath_softc *sc)
2003{
2004 int i;
2005 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2006
2007 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2008
2009 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2010 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2011 ath_tx_processq(sc, &sc->tx.txq[i]);
2403 } 2012 }
2404} 2013}
2405 2014
2406/* Initialize per-node transmit state */ 2015/*****************/
2016/* Init, Cleanup */
2017/*****************/
2018
2019int ath_tx_init(struct ath_softc *sc, int nbufs)
2020{
2021 int error = 0;
2022
2023 do {
2024 spin_lock_init(&sc->tx.txbuflock);
2025
2026 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2027 "tx", nbufs, 1);
2028 if (error != 0) {
2029 DPRINTF(sc, ATH_DBG_FATAL,
2030 "Failed to allocate tx descriptors: %d\n",
2031 error);
2032 break;
2033 }
2034
2035 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2036 "beacon", ATH_BCBUF, 1);
2037 if (error != 0) {
2038 DPRINTF(sc, ATH_DBG_FATAL,
2039 "Failed to allocate beacon descriptors: %d\n",
2040 error);
2041 break;
2042 }
2043
2044 } while (0);
2045
2046 if (error != 0)
2047 ath_tx_cleanup(sc);
2048
2049 return error;
2050}
2051
2052int ath_tx_cleanup(struct ath_softc *sc)
2053{
2054 if (sc->beacon.bdma.dd_desc_len != 0)
2055 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2056
2057 if (sc->tx.txdma.dd_desc_len != 0)
2058 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2059
2060 return 0;
2061}
2407 2062
2408void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2063void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2409{ 2064{
@@ -2411,9 +2066,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2411 struct ath_atx_ac *ac; 2066 struct ath_atx_ac *ac;
2412 int tidno, acno; 2067 int tidno, acno;
2413 2068
2414 /*
2415 * Init per tid tx state
2416 */
2417 for (tidno = 0, tid = &an->tid[tidno]; 2069 for (tidno = 0, tid = &an->tid[tidno];
2418 tidno < WME_NUM_TID; 2070 tidno < WME_NUM_TID;
2419 tidno++, tid++) { 2071 tidno++, tid++) {
@@ -2423,22 +2075,16 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2423 tid->baw_size = WME_MAX_BA; 2075 tid->baw_size = WME_MAX_BA;
2424 tid->baw_head = tid->baw_tail = 0; 2076 tid->baw_head = tid->baw_tail = 0;
2425 tid->sched = false; 2077 tid->sched = false;
2426 tid->paused = false; 2078 tid->paused = false;
2427 tid->state &= ~AGGR_CLEANUP; 2079 tid->state &= ~AGGR_CLEANUP;
2428 INIT_LIST_HEAD(&tid->buf_q); 2080 INIT_LIST_HEAD(&tid->buf_q);
2429
2430 acno = TID_TO_WME_AC(tidno); 2081 acno = TID_TO_WME_AC(tidno);
2431 tid->ac = &an->ac[acno]; 2082 tid->ac = &an->ac[acno];
2432
2433 /* ADDBA state */
2434 tid->state &= ~AGGR_ADDBA_COMPLETE; 2083 tid->state &= ~AGGR_ADDBA_COMPLETE;
2435 tid->state &= ~AGGR_ADDBA_PROGRESS; 2084 tid->state &= ~AGGR_ADDBA_PROGRESS;
2436 tid->addba_exchangeattempts = 0; 2085 tid->addba_exchangeattempts = 0;
2437 } 2086 }
2438 2087
2439 /*
2440 * Init per ac tx state
2441 */
2442 for (acno = 0, ac = &an->ac[acno]; 2088 for (acno = 0, ac = &an->ac[acno];
2443 acno < WME_NUM_AC; acno++, ac++) { 2089 acno < WME_NUM_AC; acno++, ac++) {
2444 ac->sched = false; 2090 ac->sched = false;
@@ -2465,14 +2111,13 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2465 } 2111 }
2466} 2112}
2467 2113
2468/* Cleanupthe pending buffers for the node. */
2469
2470void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2114void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2471{ 2115{
2472 int i; 2116 int i;
2473 struct ath_atx_ac *ac, *ac_tmp; 2117 struct ath_atx_ac *ac, *ac_tmp;
2474 struct ath_atx_tid *tid, *tid_tmp; 2118 struct ath_atx_tid *tid, *tid_tmp;
2475 struct ath_txq *txq; 2119 struct ath_txq *txq;
2120
2476 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2121 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2477 if (ATH_TXQ_SETUP(sc, i)) { 2122 if (ATH_TXQ_SETUP(sc, i)) {
2478 txq = &sc->tx.txq[i]; 2123 txq = &sc->tx.txq[i];
@@ -2503,51 +2148,3 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2503 } 2148 }
2504 } 2149 }
2505} 2150}
2506
2507void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2508{
2509 int hdrlen, padsize;
2510 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2511 struct ath_tx_control txctl;
2512
2513 memset(&txctl, 0, sizeof(struct ath_tx_control));
2514
2515 /*
2516 * As a temporary workaround, assign seq# here; this will likely need
2517 * to be cleaned up to work better with Beacon transmission and virtual
2518 * BSSes.
2519 */
2520 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2522 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2523 sc->tx.seq_no += 0x10;
2524 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2525 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2526 }
2527
2528 /* Add the padding after the header if this is not already done */
2529 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2530 if (hdrlen & 3) {
2531 padsize = hdrlen % 4;
2532 if (skb_headroom(skb) < padsize) {
2533 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
2534 dev_kfree_skb_any(skb);
2535 return;
2536 }
2537 skb_push(skb, padsize);
2538 memmove(skb->data, skb->data + padsize, hdrlen);
2539 }
2540
2541 txctl.txq = sc->beacon.cabq;
2542
2543 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2544
2545 if (ath_tx_start(sc, skb, &txctl) != 0) {
2546 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
2547 goto exit;
2548 }
2549
2550 return;
2551exit:
2552 dev_kfree_skb_any(skb);
2553}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 4223672c443..91930a2c3c6 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2204,9 +2204,6 @@ static int atmel_get_frag(struct net_device *dev,
2204 return 0; 2204 return 0;
2205} 2205}
2206 2206
2207static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2208 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
2209
2210static int atmel_set_freq(struct net_device *dev, 2207static int atmel_set_freq(struct net_device *dev,
2211 struct iw_request_info *info, 2208 struct iw_request_info *info,
2212 struct iw_freq *fwrq, 2209 struct iw_freq *fwrq,
@@ -2216,16 +2213,12 @@ static int atmel_set_freq(struct net_device *dev,
2216 int rc = -EINPROGRESS; /* Call commit handler */ 2213 int rc = -EINPROGRESS; /* Call commit handler */
2217 2214
2218 /* If setting by frequency, convert to a channel */ 2215 /* If setting by frequency, convert to a channel */
2219 if ((fwrq->e == 1) && 2216 if (fwrq->e == 1) {
2220 (fwrq->m >= (int) 241200000) &&
2221 (fwrq->m <= (int) 248700000)) {
2222 int f = fwrq->m / 100000; 2217 int f = fwrq->m / 100000;
2223 int c = 0; 2218
2224 while ((c < 14) && (f != frequency_list[c]))
2225 c++;
2226 /* Hack to fall through... */ 2219 /* Hack to fall through... */
2227 fwrq->e = 0; 2220 fwrq->e = 0;
2228 fwrq->m = c + 1; 2221 fwrq->m = ieee80211_freq_to_dsss_chan(f);
2229 } 2222 }
2230 /* Setting by channel number */ 2223 /* Setting by channel number */
2231 if ((fwrq->m > 1000) || (fwrq->e > 0)) 2224 if ((fwrq->m > 1000) || (fwrq->e > 0))
@@ -2384,8 +2377,11 @@ static int atmel_get_range(struct net_device *dev,
2384 if (range->num_channels != 0) { 2377 if (range->num_channels != 0) {
2385 for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) { 2378 for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
2386 range->freq[k].i = i; /* List index */ 2379 range->freq[k].i = i; /* List index */
2387 range->freq[k].m = frequency_list[i - 1] * 100000; 2380
2388 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ 2381 /* Values in MHz -> * 10^5 * 10 */
2382 range->freq[k].m = (ieee80211_dsss_chan_to_freq(i) *
2383 100000);
2384 range->freq[k++].e = 1;
2389 } 2385 }
2390 range->num_frequency = k; 2386 range->num_frequency = k;
2391 } 2387 }
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 1f81d36f87c..aab71a70ba7 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -110,10 +110,18 @@ config B43_DEBUG
110 bool "Broadcom 43xx debugging" 110 bool "Broadcom 43xx debugging"
111 depends on B43 111 depends on B43
112 ---help--- 112 ---help---
113 Broadcom 43xx debugging messages. 113 Broadcom 43xx debugging.
114 114
115 Say Y, if you want to find out why the driver does not 115 This adds additional runtime sanity checks and statistics to the driver.
116 work for you. 116 These checks and statistics might me expensive and hurt runtime performance
117 of your system.
118 This also adds the b43 debugfs interface.
119
120 Do not enable this, unless you are debugging the driver.
121
122 Say N, if you are a distributor or user building a release kernel
123 for production use.
124 Only say Y, if you are debugging a problem in the b43 driver sourcecode.
117 125
118config B43_FORCE_PIO 126config B43_FORCE_PIO
119 bool "Force usage of PIO instead of DMA" 127 bool "Force usage of PIO instead of DMA"
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 14a02b3aea5..281ef831035 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -6,6 +6,7 @@ b43-y += phy_g.o
6b43-y += phy_a.o 6b43-y += phy_a.o
7b43-$(CONFIG_B43_NPHY) += phy_n.o 7b43-$(CONFIG_B43_NPHY) += phy_n.o
8b43-$(CONFIG_B43_PHY_LP) += phy_lp.o 8b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
9b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o
9b43-y += sysfs.o 10b43-y += sysfs.o
10b43-y += xmit.o 11b43-y += xmit.o
11b43-y += lo.o 12b43-y += lo.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index a53c378e748..b4573101278 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -120,6 +120,9 @@
120#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 120#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
121#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 121#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
122#define B43_MMIO_POWERUP_DELAY 0x6A8 122#define B43_MMIO_POWERUP_DELAY 0x6A8
123#define B43_MMIO_BTCOEX_CTL 0x6B4 /* Bluetooth Coexistence Control */
124#define B43_MMIO_BTCOEX_STAT 0x6B6 /* Bluetooth Coexistence Status */
125#define B43_MMIO_BTCOEX_TXCTL 0x6B8 /* Bluetooth Coexistence Transmit Control */
123 126
124/* SPROM boardflags_lo values */ 127/* SPROM boardflags_lo values */
125#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ 128#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
@@ -655,10 +658,39 @@ struct b43_wl {
655 struct work_struct txpower_adjust_work; 658 struct work_struct txpower_adjust_work;
656}; 659};
657 660
661/* The type of the firmware file. */
662enum b43_firmware_file_type {
663 B43_FWTYPE_PROPRIETARY,
664 B43_FWTYPE_OPENSOURCE,
665 B43_NR_FWTYPES,
666};
667
668/* Context data for fetching firmware. */
669struct b43_request_fw_context {
670 /* The device we are requesting the fw for. */
671 struct b43_wldev *dev;
672 /* The type of firmware to request. */
673 enum b43_firmware_file_type req_type;
674 /* Error messages for each firmware type. */
675 char errors[B43_NR_FWTYPES][128];
676 /* Temporary buffer for storing the firmware name. */
677 char fwname[64];
678 /* A fatal error occured while requesting. Firmware reqest
679 * can not continue, as any other reqest will also fail. */
680 int fatal_failure;
681};
682
658/* In-memory representation of a cached microcode file. */ 683/* In-memory representation of a cached microcode file. */
659struct b43_firmware_file { 684struct b43_firmware_file {
660 const char *filename; 685 const char *filename;
661 const struct firmware *data; 686 const struct firmware *data;
687 /* Type of the firmware file name. Note that this does only indicate
688 * the type by the firmware name. NOT the file contents.
689 * If you want to check for proprietary vs opensource, use (struct b43_firmware)->opensource
690 * instead! The (struct b43_firmware)->opensource flag is derived from the actual firmware
691 * binary code, not just the filename.
692 */
693 enum b43_firmware_file_type type;
662}; 694};
663 695
664/* Pointers to the firmware data and meta information about it. */ 696/* Pointers to the firmware data and meta information about it. */
@@ -677,7 +709,8 @@ struct b43_firmware {
677 /* Firmware patchlevel */ 709 /* Firmware patchlevel */
678 u16 patch; 710 u16 patch;
679 711
680 /* Set to true, if we are using an opensource firmware. */ 712 /* Set to true, if we are using an opensource firmware.
713 * Use this to check for proprietary vs opensource. */
681 bool opensource; 714 bool opensource;
682 /* Set to true, if the core needs a PCM firmware, but 715 /* Set to true, if the core needs a PCM firmware, but
683 * we failed to load one. This is always false for 716 * we failed to load one. This is always false for
@@ -848,12 +881,9 @@ void b43err(struct b43_wl *wl, const char *fmt, ...)
848 __attribute__ ((format(printf, 2, 3))); 881 __attribute__ ((format(printf, 2, 3)));
849void b43warn(struct b43_wl *wl, const char *fmt, ...) 882void b43warn(struct b43_wl *wl, const char *fmt, ...)
850 __attribute__ ((format(printf, 2, 3))); 883 __attribute__ ((format(printf, 2, 3)));
851#if B43_DEBUG
852void b43dbg(struct b43_wl *wl, const char *fmt, ...) 884void b43dbg(struct b43_wl *wl, const char *fmt, ...)
853 __attribute__ ((format(printf, 2, 3))); 885 __attribute__ ((format(printf, 2, 3)));
854#else /* DEBUG */ 886
855# define b43dbg(wl, fmt...) do { /* nothing */ } while (0)
856#endif /* DEBUG */
857 887
858/* A WARN_ON variant that vanishes when b43 debugging is disabled. 888/* A WARN_ON variant that vanishes when b43 debugging is disabled.
859 * This _also_ evaluates the arg with debugging disabled. */ 889 * This _also_ evaluates the arg with debugging disabled. */
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index e04fc91f569..bc2767da46e 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -367,34 +367,6 @@ static int mmio32write__write_file(struct b43_wldev *dev,
367 return 0; 367 return 0;
368} 368}
369 369
370/* wl->irq_lock is locked */
371static ssize_t tsf_read_file(struct b43_wldev *dev,
372 char *buf, size_t bufsize)
373{
374 ssize_t count = 0;
375 u64 tsf;
376
377 b43_tsf_read(dev, &tsf);
378 fappend("0x%08x%08x\n",
379 (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32),
380 (unsigned int)(tsf & 0xFFFFFFFFULL));
381
382 return count;
383}
384
385/* wl->irq_lock is locked */
386static int tsf_write_file(struct b43_wldev *dev,
387 const char *buf, size_t count)
388{
389 u64 tsf;
390
391 if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1)
392 return -EINVAL;
393 b43_tsf_write(dev, tsf);
394
395 return 0;
396}
397
398static ssize_t txstat_read_file(struct b43_wldev *dev, 370static ssize_t txstat_read_file(struct b43_wldev *dev,
399 char *buf, size_t bufsize) 371 char *buf, size_t bufsize)
400{ 372{
@@ -691,15 +663,23 @@ B43_DEBUGFS_FOPS(mmio16read, mmio16read__read_file, mmio16read__write_file, 1);
691B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file, 1); 663B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file, 1);
692B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file, 1); 664B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file, 1);
693B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1); 665B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1);
694B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1);
695B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); 666B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0);
696B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); 667B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1);
697B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0); 668B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0);
698 669
699 670
700int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) 671bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
701{ 672{
702 return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); 673 bool enabled;
674
675 enabled = (dev->dfsentry && dev->dfsentry->dyn_debug[feature]);
676 if (unlikely(enabled)) {
677 /* Force full debugging messages, if the user enabled
678 * some dynamic debugging feature. */
679 b43_modparam_verbose = B43_VERBOSITY_MAX;
680 }
681
682 return enabled;
703} 683}
704 684
705static void b43_remove_dynamic_debug(struct b43_wldev *dev) 685static void b43_remove_dynamic_debug(struct b43_wldev *dev)
@@ -805,7 +785,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
805 ADD_FILE(mmio16write, 0200); 785 ADD_FILE(mmio16write, 0200);
806 ADD_FILE(mmio32read, 0600); 786 ADD_FILE(mmio32read, 0600);
807 ADD_FILE(mmio32write, 0200); 787 ADD_FILE(mmio32write, 0200);
808 ADD_FILE(tsf, 0600);
809 ADD_FILE(txstat, 0400); 788 ADD_FILE(txstat, 0400);
810 ADD_FILE(restart, 0200); 789 ADD_FILE(restart, 0200);
811 ADD_FILE(loctls, 0400); 790 ADD_FILE(loctls, 0400);
@@ -834,7 +813,6 @@ void b43_debugfs_remove_device(struct b43_wldev *dev)
834 debugfs_remove(e->file_mmio16write.dentry); 813 debugfs_remove(e->file_mmio16write.dentry);
835 debugfs_remove(e->file_mmio32read.dentry); 814 debugfs_remove(e->file_mmio32read.dentry);
836 debugfs_remove(e->file_mmio32write.dentry); 815 debugfs_remove(e->file_mmio32write.dentry);
837 debugfs_remove(e->file_tsf.dentry);
838 debugfs_remove(e->file_txstat.dentry); 816 debugfs_remove(e->file_txstat.dentry);
839 debugfs_remove(e->file_restart.dentry); 817 debugfs_remove(e->file_restart.dentry);
840 debugfs_remove(e->file_loctls.dentry); 818 debugfs_remove(e->file_loctls.dentry);
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 7886cbe2d1d..b9d4de4a979 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -46,7 +46,6 @@ struct b43_dfsentry {
46 struct b43_dfs_file file_mmio16write; 46 struct b43_dfs_file file_mmio16write;
47 struct b43_dfs_file file_mmio32read; 47 struct b43_dfs_file file_mmio32read;
48 struct b43_dfs_file file_mmio32write; 48 struct b43_dfs_file file_mmio32write;
49 struct b43_dfs_file file_tsf;
50 struct b43_dfs_file file_txstat; 49 struct b43_dfs_file file_txstat;
51 struct b43_dfs_file file_txpower_g; 50 struct b43_dfs_file file_txpower_g;
52 struct b43_dfs_file file_restart; 51 struct b43_dfs_file file_restart;
@@ -72,7 +71,7 @@ struct b43_dfsentry {
72 struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG]; 71 struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG];
73}; 72};
74 73
75int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature); 74bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature);
76 75
77void b43_debugfs_init(void); 76void b43_debugfs_init(void);
78void b43_debugfs_exit(void); 77void b43_debugfs_exit(void);
@@ -83,7 +82,7 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
83 82
84#else /* CONFIG_B43_DEBUG */ 83#else /* CONFIG_B43_DEBUG */
85 84
86static inline int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) 85static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
87{ 86{
88 return 0; 87 return 0;
89} 88}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c788bad1066..dbb8765506e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4,7 +4,7 @@
4 4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de> 5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
6 Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it> 6 Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 7 Copyright (c) 2005-2009 Michael Buesch <mb@bu3sch.de>
8 Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> 8 Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
9 Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> 9 Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10 10
@@ -88,6 +88,10 @@ static int modparam_btcoex = 1;
88module_param_named(btcoex, modparam_btcoex, int, 0444); 88module_param_named(btcoex, modparam_btcoex, int, 0444);
89MODULE_PARM_DESC(btcoex, "Enable Bluetooth coexistance (default on)"); 89MODULE_PARM_DESC(btcoex, "Enable Bluetooth coexistance (default on)");
90 90
91int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
92module_param_named(verbose, b43_modparam_verbose, int, 0644);
93MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
94
91 95
92static const struct ssb_device_id b43_ssb_tbl[] = { 96static const struct ssb_device_id b43_ssb_tbl[] = {
93 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), 97 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -97,6 +101,8 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
97 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), 101 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
98 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), 102 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
99 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), 103 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
104 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
105 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
100 SSB_DEVTABLE_END 106 SSB_DEVTABLE_END
101}; 107};
102 108
@@ -298,6 +304,8 @@ void b43info(struct b43_wl *wl, const char *fmt, ...)
298{ 304{
299 va_list args; 305 va_list args;
300 306
307 if (b43_modparam_verbose < B43_VERBOSITY_INFO)
308 return;
301 if (!b43_ratelimit(wl)) 309 if (!b43_ratelimit(wl))
302 return; 310 return;
303 va_start(args, fmt); 311 va_start(args, fmt);
@@ -311,6 +319,8 @@ void b43err(struct b43_wl *wl, const char *fmt, ...)
311{ 319{
312 va_list args; 320 va_list args;
313 321
322 if (b43_modparam_verbose < B43_VERBOSITY_ERROR)
323 return;
314 if (!b43_ratelimit(wl)) 324 if (!b43_ratelimit(wl))
315 return; 325 return;
316 va_start(args, fmt); 326 va_start(args, fmt);
@@ -324,6 +334,8 @@ void b43warn(struct b43_wl *wl, const char *fmt, ...)
324{ 334{
325 va_list args; 335 va_list args;
326 336
337 if (b43_modparam_verbose < B43_VERBOSITY_WARN)
338 return;
327 if (!b43_ratelimit(wl)) 339 if (!b43_ratelimit(wl))
328 return; 340 return;
329 va_start(args, fmt); 341 va_start(args, fmt);
@@ -333,18 +345,18 @@ void b43warn(struct b43_wl *wl, const char *fmt, ...)
333 va_end(args); 345 va_end(args);
334} 346}
335 347
336#if B43_DEBUG
337void b43dbg(struct b43_wl *wl, const char *fmt, ...) 348void b43dbg(struct b43_wl *wl, const char *fmt, ...)
338{ 349{
339 va_list args; 350 va_list args;
340 351
352 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
353 return;
341 va_start(args, fmt); 354 va_start(args, fmt);
342 printk(KERN_DEBUG "b43-%s debug: ", 355 printk(KERN_DEBUG "b43-%s debug: ",
343 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 356 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
344 vprintk(fmt, args); 357 vprintk(fmt, args);
345 va_end(args); 358 va_end(args);
346} 359}
347#endif /* DEBUG */
348 360
349static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) 361static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val)
350{ 362{
@@ -526,52 +538,20 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
526 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); 538 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
527} 539}
528 540
529void b43_tsf_read(struct b43_wldev *dev, u64 * tsf) 541void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
530{ 542{
531 /* We need to be careful. As we read the TSF from multiple 543 u32 low, high;
532 * registers, we should take care of register overflows.
533 * In theory, the whole tsf read process should be atomic.
534 * We try to be atomic here, by restaring the read process,
535 * if any of the high registers changed (overflew).
536 */
537 if (dev->dev->id.revision >= 3) {
538 u32 low, high, high2;
539 544
540 do { 545 B43_WARN_ON(dev->dev->id.revision < 3);
541 high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH);
542 low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW);
543 high2 = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH);
544 } while (unlikely(high != high2));
545 546
546 *tsf = high; 547 /* The hardware guarantees us an atomic read, if we
547 *tsf <<= 32; 548 * read the low register first. */
548 *tsf |= low; 549 low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW);
549 } else { 550 high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH);
550 u64 tmp;
551 u16 v0, v1, v2, v3;
552 u16 test1, test2, test3;
553
554 do {
555 v3 = b43_read16(dev, B43_MMIO_TSF_3);
556 v2 = b43_read16(dev, B43_MMIO_TSF_2);
557 v1 = b43_read16(dev, B43_MMIO_TSF_1);
558 v0 = b43_read16(dev, B43_MMIO_TSF_0);
559 551
560 test3 = b43_read16(dev, B43_MMIO_TSF_3); 552 *tsf = high;
561 test2 = b43_read16(dev, B43_MMIO_TSF_2); 553 *tsf <<= 32;
562 test1 = b43_read16(dev, B43_MMIO_TSF_1); 554 *tsf |= low;
563 } while (v3 != test3 || v2 != test2 || v1 != test1);
564
565 *tsf = v3;
566 *tsf <<= 48;
567 tmp = v2;
568 tmp <<= 32;
569 *tsf |= tmp;
570 tmp = v1;
571 tmp <<= 16;
572 *tsf |= tmp;
573 *tsf |= v0;
574 }
575} 555}
576 556
577static void b43_time_lock(struct b43_wldev *dev) 557static void b43_time_lock(struct b43_wldev *dev)
@@ -598,35 +578,18 @@ static void b43_time_unlock(struct b43_wldev *dev)
598 578
599static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) 579static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf)
600{ 580{
601 /* Be careful with the in-progress timer. 581 u32 low, high;
602 * First zero out the low register, so we have a full
603 * register-overflow duration to complete the operation.
604 */
605 if (dev->dev->id.revision >= 3) {
606 u32 lo = (tsf & 0x00000000FFFFFFFFULL);
607 u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
608 582
609 b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, 0); 583 B43_WARN_ON(dev->dev->id.revision < 3);
610 mmiowb();
611 b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, hi);
612 mmiowb();
613 b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, lo);
614 } else {
615 u16 v0 = (tsf & 0x000000000000FFFFULL);
616 u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16;
617 u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32;
618 u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
619 584
620 b43_write16(dev, B43_MMIO_TSF_0, 0); 585 low = tsf;
621 mmiowb(); 586 high = (tsf >> 32);
622 b43_write16(dev, B43_MMIO_TSF_3, v3); 587 /* The hardware guarantees us an atomic write, if we
623 mmiowb(); 588 * write the low register first. */
624 b43_write16(dev, B43_MMIO_TSF_2, v2); 589 b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low);
625 mmiowb(); 590 mmiowb();
626 b43_write16(dev, B43_MMIO_TSF_1, v1); 591 b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high);
627 mmiowb(); 592 mmiowb();
628 b43_write16(dev, B43_MMIO_TSF_0, v0);
629 }
630} 593}
631 594
632void b43_tsf_write(struct b43_wldev *dev, u64 tsf) 595void b43_tsf_write(struct b43_wldev *dev, u64 tsf)
@@ -937,8 +900,7 @@ static int b43_key_write(struct b43_wldev *dev,
937 B43_WARN_ON(dev->key[i].keyconf == keyconf); 900 B43_WARN_ON(dev->key[i].keyconf == keyconf);
938 } 901 }
939 if (index < 0) { 902 if (index < 0) {
940 /* Either pairwise key or address is 00:00:00:00:00:00 903 /* Pairwise key. Get an empty slot for the key. */
941 * for transmit-only keys. Search the index. */
942 if (b43_new_kidx_api(dev)) 904 if (b43_new_kidx_api(dev))
943 sta_keys_start = 4; 905 sta_keys_start = 4;
944 else 906 else
@@ -951,7 +913,7 @@ static int b43_key_write(struct b43_wldev *dev,
951 } 913 }
952 } 914 }
953 if (index < 0) { 915 if (index < 0) {
954 b43err(dev->wl, "Out of hardware key memory\n"); 916 b43warn(dev->wl, "Out of hardware key memory\n");
955 return -ENOSPC; 917 return -ENOSPC;
956 } 918 }
957 } else 919 } else
@@ -1982,7 +1944,7 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
1982 return ret; 1944 return ret;
1983} 1945}
1984 1946
1985static void do_release_fw(struct b43_firmware_file *fw) 1947void b43_do_release_fw(struct b43_firmware_file *fw)
1986{ 1948{
1987 release_firmware(fw->data); 1949 release_firmware(fw->data);
1988 fw->data = NULL; 1950 fw->data = NULL;
@@ -1991,10 +1953,10 @@ static void do_release_fw(struct b43_firmware_file *fw)
1991 1953
1992static void b43_release_firmware(struct b43_wldev *dev) 1954static void b43_release_firmware(struct b43_wldev *dev)
1993{ 1955{
1994 do_release_fw(&dev->fw.ucode); 1956 b43_do_release_fw(&dev->fw.ucode);
1995 do_release_fw(&dev->fw.pcm); 1957 b43_do_release_fw(&dev->fw.pcm);
1996 do_release_fw(&dev->fw.initvals); 1958 b43_do_release_fw(&dev->fw.initvals);
1997 do_release_fw(&dev->fw.initvals_band); 1959 b43_do_release_fw(&dev->fw.initvals_band);
1998} 1960}
1999 1961
2000static void b43_print_fw_helptext(struct b43_wl *wl, bool error) 1962static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
@@ -2002,20 +1964,19 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
2002 const char *text; 1964 const char *text;
2003 1965
2004 text = "You must go to " 1966 text = "You must go to "
2005 "http://linuxwireless.org/en/users/Drivers/b43#devicefirmware " 1967 "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware "
2006 "and download the latest firmware (version 4).\n"; 1968 "and download the correct firmware for this driver version. "
1969 "Please carefully read all instructions on this website.\n";
2007 if (error) 1970 if (error)
2008 b43err(wl, text); 1971 b43err(wl, text);
2009 else 1972 else
2010 b43warn(wl, text); 1973 b43warn(wl, text);
2011} 1974}
2012 1975
2013static int do_request_fw(struct b43_wldev *dev, 1976int b43_do_request_fw(struct b43_request_fw_context *ctx,
2014 const char *name, 1977 const char *name,
2015 struct b43_firmware_file *fw, 1978 struct b43_firmware_file *fw)
2016 bool silent)
2017{ 1979{
2018 char path[sizeof(modparam_fwpostfix) + 32];
2019 const struct firmware *blob; 1980 const struct firmware *blob;
2020 struct b43_fw_header *hdr; 1981 struct b43_fw_header *hdr;
2021 u32 size; 1982 u32 size;
@@ -2023,29 +1984,49 @@ static int do_request_fw(struct b43_wldev *dev,
2023 1984
2024 if (!name) { 1985 if (!name) {
2025 /* Don't fetch anything. Free possibly cached firmware. */ 1986 /* Don't fetch anything. Free possibly cached firmware. */
2026 do_release_fw(fw); 1987 /* FIXME: We should probably keep it anyway, to save some headache
1988 * on suspend/resume with multiband devices. */
1989 b43_do_release_fw(fw);
2027 return 0; 1990 return 0;
2028 } 1991 }
2029 if (fw->filename) { 1992 if (fw->filename) {
2030 if (strcmp(fw->filename, name) == 0) 1993 if ((fw->type == ctx->req_type) &&
1994 (strcmp(fw->filename, name) == 0))
2031 return 0; /* Already have this fw. */ 1995 return 0; /* Already have this fw. */
2032 /* Free the cached firmware first. */ 1996 /* Free the cached firmware first. */
2033 do_release_fw(fw); 1997 /* FIXME: We should probably do this later after we successfully
1998 * got the new fw. This could reduce headache with multiband devices.
1999 * We could also redesign this to cache the firmware for all possible
2000 * bands all the time. */
2001 b43_do_release_fw(fw);
2002 }
2003
2004 switch (ctx->req_type) {
2005 case B43_FWTYPE_PROPRIETARY:
2006 snprintf(ctx->fwname, sizeof(ctx->fwname),
2007 "b43%s/%s.fw",
2008 modparam_fwpostfix, name);
2009 break;
2010 case B43_FWTYPE_OPENSOURCE:
2011 snprintf(ctx->fwname, sizeof(ctx->fwname),
2012 "b43-open%s/%s.fw",
2013 modparam_fwpostfix, name);
2014 break;
2015 default:
2016 B43_WARN_ON(1);
2017 return -ENOSYS;
2034 } 2018 }
2035 2019 err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
2036 snprintf(path, ARRAY_SIZE(path),
2037 "b43%s/%s.fw",
2038 modparam_fwpostfix, name);
2039 err = request_firmware(&blob, path, dev->dev->dev);
2040 if (err == -ENOENT) { 2020 if (err == -ENOENT) {
2041 if (!silent) { 2021 snprintf(ctx->errors[ctx->req_type],
2042 b43err(dev->wl, "Firmware file \"%s\" not found\n", 2022 sizeof(ctx->errors[ctx->req_type]),
2043 path); 2023 "Firmware file \"%s\" not found\n", ctx->fwname);
2044 }
2045 return err; 2024 return err;
2046 } else if (err) { 2025 } else if (err) {
2047 b43err(dev->wl, "Firmware file \"%s\" request failed (err=%d)\n", 2026 snprintf(ctx->errors[ctx->req_type],
2048 path, err); 2027 sizeof(ctx->errors[ctx->req_type]),
2028 "Firmware file \"%s\" request failed (err=%d)\n",
2029 ctx->fwname, err);
2049 return err; 2030 return err;
2050 } 2031 }
2051 if (blob->size < sizeof(struct b43_fw_header)) 2032 if (blob->size < sizeof(struct b43_fw_header))
@@ -2068,20 +2049,24 @@ static int do_request_fw(struct b43_wldev *dev,
2068 2049
2069 fw->data = blob; 2050 fw->data = blob;
2070 fw->filename = name; 2051 fw->filename = name;
2052 fw->type = ctx->req_type;
2071 2053
2072 return 0; 2054 return 0;
2073 2055
2074err_format: 2056err_format:
2075 b43err(dev->wl, "Firmware file \"%s\" format error.\n", path); 2057 snprintf(ctx->errors[ctx->req_type],
2058 sizeof(ctx->errors[ctx->req_type]),
2059 "Firmware file \"%s\" format error.\n", ctx->fwname);
2076 release_firmware(blob); 2060 release_firmware(blob);
2077 2061
2078 return -EPROTO; 2062 return -EPROTO;
2079} 2063}
2080 2064
2081static int b43_request_firmware(struct b43_wldev *dev) 2065static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2082{ 2066{
2083 struct b43_firmware *fw = &dev->fw; 2067 struct b43_wldev *dev = ctx->dev;
2084 const u8 rev = dev->dev->id.revision; 2068 struct b43_firmware *fw = &ctx->dev->fw;
2069 const u8 rev = ctx->dev->dev->id.revision;
2085 const char *filename; 2070 const char *filename;
2086 u32 tmshigh; 2071 u32 tmshigh;
2087 int err; 2072 int err;
@@ -2096,7 +2081,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2096 filename = "ucode13"; 2081 filename = "ucode13";
2097 else 2082 else
2098 goto err_no_ucode; 2083 goto err_no_ucode;
2099 err = do_request_fw(dev, filename, &fw->ucode, 0); 2084 err = b43_do_request_fw(ctx, filename, &fw->ucode);
2100 if (err) 2085 if (err)
2101 goto err_load; 2086 goto err_load;
2102 2087
@@ -2108,7 +2093,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2108 else 2093 else
2109 goto err_no_pcm; 2094 goto err_no_pcm;
2110 fw->pcm_request_failed = 0; 2095 fw->pcm_request_failed = 0;
2111 err = do_request_fw(dev, filename, &fw->pcm, 1); 2096 err = b43_do_request_fw(ctx, filename, &fw->pcm);
2112 if (err == -ENOENT) { 2097 if (err == -ENOENT) {
2113 /* We did not find a PCM file? Not fatal, but 2098 /* We did not find a PCM file? Not fatal, but
2114 * core rev <= 10 must do without hwcrypto then. */ 2099 * core rev <= 10 must do without hwcrypto then. */
@@ -2144,7 +2129,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2144 default: 2129 default:
2145 goto err_no_initvals; 2130 goto err_no_initvals;
2146 } 2131 }
2147 err = do_request_fw(dev, filename, &fw->initvals, 0); 2132 err = b43_do_request_fw(ctx, filename, &fw->initvals);
2148 if (err) 2133 if (err)
2149 goto err_load; 2134 goto err_load;
2150 2135
@@ -2178,30 +2163,34 @@ static int b43_request_firmware(struct b43_wldev *dev)
2178 default: 2163 default:
2179 goto err_no_initvals; 2164 goto err_no_initvals;
2180 } 2165 }
2181 err = do_request_fw(dev, filename, &fw->initvals_band, 0); 2166 err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
2182 if (err) 2167 if (err)
2183 goto err_load; 2168 goto err_load;
2184 2169
2185 return 0; 2170 return 0;
2186 2171
2187err_load:
2188 b43_print_fw_helptext(dev->wl, 1);
2189 goto error;
2190
2191err_no_ucode: 2172err_no_ucode:
2192 err = -ENODEV; 2173 err = ctx->fatal_failure = -EOPNOTSUPP;
2193 b43err(dev->wl, "No microcode available for core rev %u\n", rev); 2174 b43err(dev->wl, "The driver does not know which firmware (ucode) "
2175 "is required for your device (wl-core rev %u)\n", rev);
2194 goto error; 2176 goto error;
2195 2177
2196err_no_pcm: 2178err_no_pcm:
2197 err = -ENODEV; 2179 err = ctx->fatal_failure = -EOPNOTSUPP;
2198 b43err(dev->wl, "No PCM available for core rev %u\n", rev); 2180 b43err(dev->wl, "The driver does not know which firmware (PCM) "
2181 "is required for your device (wl-core rev %u)\n", rev);
2199 goto error; 2182 goto error;
2200 2183
2201err_no_initvals: 2184err_no_initvals:
2202 err = -ENODEV; 2185 err = ctx->fatal_failure = -EOPNOTSUPP;
2203 b43err(dev->wl, "No Initial Values firmware file for PHY %u, " 2186 b43err(dev->wl, "The driver does not know which firmware (initvals) "
2204 "core rev %u\n", dev->phy.type, rev); 2187 "is required for your device (wl-core rev %u)\n", rev);
2188 goto error;
2189
2190err_load:
2191 /* We failed to load this firmware image. The error message
2192 * already is in ctx->errors. Return and let our caller decide
2193 * what to do. */
2205 goto error; 2194 goto error;
2206 2195
2207error: 2196error:
@@ -2209,6 +2198,48 @@ error:
2209 return err; 2198 return err;
2210} 2199}
2211 2200
2201static int b43_request_firmware(struct b43_wldev *dev)
2202{
2203 struct b43_request_fw_context *ctx;
2204 unsigned int i;
2205 int err;
2206 const char *errmsg;
2207
2208 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2209 if (!ctx)
2210 return -ENOMEM;
2211 ctx->dev = dev;
2212
2213 ctx->req_type = B43_FWTYPE_PROPRIETARY;
2214 err = b43_try_request_fw(ctx);
2215 if (!err)
2216 goto out; /* Successfully loaded it. */
2217 err = ctx->fatal_failure;
2218 if (err)
2219 goto out;
2220
2221 ctx->req_type = B43_FWTYPE_OPENSOURCE;
2222 err = b43_try_request_fw(ctx);
2223 if (!err)
2224 goto out; /* Successfully loaded it. */
2225 err = ctx->fatal_failure;
2226 if (err)
2227 goto out;
2228
2229 /* Could not find a usable firmware. Print the errors. */
2230 for (i = 0; i < B43_NR_FWTYPES; i++) {
2231 errmsg = ctx->errors[i];
2232 if (strlen(errmsg))
2233 b43err(dev->wl, errmsg);
2234 }
2235 b43_print_fw_helptext(dev->wl, 1);
2236 err = -ENOENT;
2237
2238out:
2239 kfree(ctx);
2240 return err;
2241}
2242
2212static int b43_upload_microcode(struct b43_wldev *dev) 2243static int b43_upload_microcode(struct b43_wldev *dev)
2213{ 2244{
2214 const size_t hdr_len = sizeof(struct b43_fw_header); 2245 const size_t hdr_len = sizeof(struct b43_fw_header);
@@ -2319,8 +2350,11 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2319 } 2350 }
2320 2351
2321 if (b43_is_old_txhdr_format(dev)) { 2352 if (b43_is_old_txhdr_format(dev)) {
2353 /* We're over the deadline, but we keep support for old fw
2354 * until it turns out to be in major conflict with something new. */
2322 b43warn(dev->wl, "You are using an old firmware image. " 2355 b43warn(dev->wl, "You are using an old firmware image. "
2323 "Support for old firmware will be removed in July 2008.\n"); 2356 "Support for old firmware will be removed soon "
2357 "(official deadline was July 2008).\n");
2324 b43_print_fw_helptext(dev->wl, 0); 2358 b43_print_fw_helptext(dev->wl, 0);
2325 } 2359 }
2326 2360
@@ -3221,6 +3255,43 @@ static int b43_op_get_stats(struct ieee80211_hw *hw,
3221 return 0; 3255 return 0;
3222} 3256}
3223 3257
3258static u64 b43_op_get_tsf(struct ieee80211_hw *hw)
3259{
3260 struct b43_wl *wl = hw_to_b43_wl(hw);
3261 struct b43_wldev *dev;
3262 u64 tsf;
3263
3264 mutex_lock(&wl->mutex);
3265 spin_lock_irq(&wl->irq_lock);
3266 dev = wl->current_dev;
3267
3268 if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED))
3269 b43_tsf_read(dev, &tsf);
3270 else
3271 tsf = 0;
3272
3273 spin_unlock_irq(&wl->irq_lock);
3274 mutex_unlock(&wl->mutex);
3275
3276 return tsf;
3277}
3278
3279static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3280{
3281 struct b43_wl *wl = hw_to_b43_wl(hw);
3282 struct b43_wldev *dev;
3283
3284 mutex_lock(&wl->mutex);
3285 spin_lock_irq(&wl->irq_lock);
3286 dev = wl->current_dev;
3287
3288 if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED))
3289 b43_tsf_write(dev, tsf);
3290
3291 spin_unlock_irq(&wl->irq_lock);
3292 mutex_unlock(&wl->mutex);
3293}
3294
3224static void b43_put_phy_into_reset(struct b43_wldev *dev) 3295static void b43_put_phy_into_reset(struct b43_wldev *dev)
3225{ 3296{
3226 struct ssb_device *sdev = dev->dev; 3297 struct ssb_device *sdev = dev->dev;
@@ -3442,7 +3513,7 @@ out_unlock_mutex:
3442 return err; 3513 return err;
3443} 3514}
3444 3515
3445static void b43_update_basic_rates(struct b43_wldev *dev, u64 brates) 3516static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates)
3446{ 3517{
3447 struct ieee80211_supported_band *sband = 3518 struct ieee80211_supported_band *sband =
3448 dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)]; 3519 dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)];
@@ -3520,21 +3591,29 @@ out_unlock_mutex:
3520} 3591}
3521 3592
3522static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3593static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3523 const u8 *local_addr, const u8 *addr, 3594 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3524 struct ieee80211_key_conf *key) 3595 struct ieee80211_key_conf *key)
3525{ 3596{
3526 struct b43_wl *wl = hw_to_b43_wl(hw); 3597 struct b43_wl *wl = hw_to_b43_wl(hw);
3527 struct b43_wldev *dev; 3598 struct b43_wldev *dev;
3528 unsigned long flags;
3529 u8 algorithm; 3599 u8 algorithm;
3530 u8 index; 3600 u8 index;
3531 int err; 3601 int err;
3602 static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3532 3603
3533 if (modparam_nohwcrypt) 3604 if (modparam_nohwcrypt)
3534 return -ENOSPC; /* User disabled HW-crypto */ 3605 return -ENOSPC; /* User disabled HW-crypto */
3535 3606
3536 mutex_lock(&wl->mutex); 3607 mutex_lock(&wl->mutex);
3537 spin_lock_irqsave(&wl->irq_lock, flags); 3608 spin_lock_irq(&wl->irq_lock);
3609 write_lock(&wl->tx_lock);
3610 /* Why do we need all this locking here?
3611 * mutex -> Every config operation must take it.
3612 * irq_lock -> We modify the dev->key array, which is accessed
3613 * in the IRQ handlers.
3614 * tx_lock -> We modify the dev->key array, which is accessed
3615 * in the TX handler.
3616 */
3538 3617
3539 dev = wl->current_dev; 3618 dev = wl->current_dev;
3540 err = -ENODEV; 3619 err = -ENODEV;
@@ -3551,7 +3630,7 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3551 err = -EINVAL; 3630 err = -EINVAL;
3552 switch (key->alg) { 3631 switch (key->alg) {
3553 case ALG_WEP: 3632 case ALG_WEP:
3554 if (key->keylen == 5) 3633 if (key->keylen == LEN_WEP40)
3555 algorithm = B43_SEC_ALGO_WEP40; 3634 algorithm = B43_SEC_ALGO_WEP40;
3556 else 3635 else
3557 algorithm = B43_SEC_ALGO_WEP104; 3636 algorithm = B43_SEC_ALGO_WEP104;
@@ -3578,17 +3657,19 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3578 goto out_unlock; 3657 goto out_unlock;
3579 } 3658 }
3580 3659
3581 if (is_broadcast_ether_addr(addr)) { 3660 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3582 /* addr is FF:FF:FF:FF:FF:FF for default keys */ 3661 if (WARN_ON(!sta)) {
3662 err = -EOPNOTSUPP;
3663 goto out_unlock;
3664 }
3665 /* Pairwise key with an assigned MAC address. */
3666 err = b43_key_write(dev, -1, algorithm,
3667 key->key, key->keylen,
3668 sta->addr, key);
3669 } else {
3670 /* Group key */
3583 err = b43_key_write(dev, index, algorithm, 3671 err = b43_key_write(dev, index, algorithm,
3584 key->key, key->keylen, NULL, key); 3672 key->key, key->keylen, NULL, key);
3585 } else {
3586 /*
3587 * either pairwise key or address is 00:00:00:00:00:00
3588 * for transmit-only keys
3589 */
3590 err = b43_key_write(dev, -1, algorithm,
3591 key->key, key->keylen, addr, key);
3592 } 3673 }
3593 if (err) 3674 if (err)
3594 goto out_unlock; 3675 goto out_unlock;
@@ -3617,10 +3698,11 @@ out_unlock:
3617 b43dbg(wl, "%s hardware based encryption for keyidx: %d, " 3698 b43dbg(wl, "%s hardware based encryption for keyidx: %d, "
3618 "mac: %pM\n", 3699 "mac: %pM\n",
3619 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, 3700 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
3620 addr); 3701 sta ? sta->addr : bcast_addr);
3621 b43_dump_keymemory(dev); 3702 b43_dump_keymemory(dev);
3622 } 3703 }
3623 spin_unlock_irqrestore(&wl->irq_lock, flags); 3704 write_unlock(&wl->tx_lock);
3705 spin_unlock_irq(&wl->irq_lock);
3624 mutex_unlock(&wl->mutex); 3706 mutex_unlock(&wl->mutex);
3625 3707
3626 return err; 3708 return err;
@@ -3796,6 +3878,12 @@ static int b43_phy_versioning(struct b43_wldev *dev)
3796 break; 3878 break;
3797#ifdef CONFIG_B43_NPHY 3879#ifdef CONFIG_B43_NPHY
3798 case B43_PHYTYPE_N: 3880 case B43_PHYTYPE_N:
3881 if (phy_rev > 4)
3882 unsupported = 1;
3883 break;
3884#endif
3885#ifdef CONFIG_B43_PHY_LP
3886 case B43_PHYTYPE_LP:
3799 if (phy_rev > 1) 3887 if (phy_rev > 1)
3800 unsupported = 1; 3888 unsupported = 1;
3801 break; 3889 break;
@@ -3849,7 +3937,11 @@ static int b43_phy_versioning(struct b43_wldev *dev)
3849 unsupported = 1; 3937 unsupported = 1;
3850 break; 3938 break;
3851 case B43_PHYTYPE_N: 3939 case B43_PHYTYPE_N:
3852 if (radio_ver != 0x2055) 3940 if (radio_ver != 0x2055 && radio_ver != 0x2056)
3941 unsupported = 1;
3942 break;
3943 case B43_PHYTYPE_LP:
3944 if (radio_ver != 0x2062)
3853 unsupported = 1; 3945 unsupported = 1;
3854 break; 3946 break;
3855 default: 3947 default:
@@ -4317,6 +4409,8 @@ static const struct ieee80211_ops b43_hw_ops = {
4317 .set_key = b43_op_set_key, 4409 .set_key = b43_op_set_key,
4318 .get_stats = b43_op_get_stats, 4410 .get_stats = b43_op_get_stats,
4319 .get_tx_stats = b43_op_get_tx_stats, 4411 .get_tx_stats = b43_op_get_tx_stats,
4412 .get_tsf = b43_op_get_tsf,
4413 .set_tsf = b43_op_set_tsf,
4320 .start = b43_op_start, 4414 .start = b43_op_start,
4321 .stop = b43_op_stop, 4415 .stop = b43_op_stop,
4322 .set_tim = b43_op_beacon_set_tim, 4416 .set_tim = b43_op_beacon_set_tim,
@@ -4446,6 +4540,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4446 break; 4540 break;
4447 case B43_PHYTYPE_G: 4541 case B43_PHYTYPE_G:
4448 case B43_PHYTYPE_N: 4542 case B43_PHYTYPE_N:
4543 case B43_PHYTYPE_LP:
4449 have_2ghz_phy = 1; 4544 have_2ghz_phy = 1;
4450 break; 4545 break;
4451 default: 4546 default:
@@ -4657,9 +4752,10 @@ static int b43_wireless_init(struct ssb_device *dev)
4657 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); 4752 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
4658 4753
4659 ssb_set_devtypedata(dev, wl); 4754 ssb_set_devtypedata(dev, wl);
4660 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); 4755 b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
4756 dev->bus->chip_id, dev->id.revision);
4661 err = 0; 4757 err = 0;
4662 out: 4758out:
4663 return err; 4759 return err;
4664} 4760}
4665 4761
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index f871a252cb5..40abcf5d1b4 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -40,6 +40,24 @@
40 40
41 41
42extern int b43_modparam_qos; 42extern int b43_modparam_qos;
43extern int b43_modparam_verbose;
44
45/* Logmessage verbosity levels. Update the b43_modparam_verbose helptext, if
46 * you add or remove levels. */
47enum b43_verbosity {
48 B43_VERBOSITY_ERROR,
49 B43_VERBOSITY_WARN,
50 B43_VERBOSITY_INFO,
51 B43_VERBOSITY_DEBUG,
52 __B43_VERBOSITY_AFTERLAST, /* keep last */
53
54 B43_VERBOSITY_MAX = __B43_VERBOSITY_AFTERLAST - 1,
55#if B43_DEBUG
56 B43_VERBOSITY_DEFAULT = B43_VERBOSITY_DEBUG,
57#else
58 B43_VERBOSITY_DEFAULT = B43_VERBOSITY_INFO,
59#endif
60};
43 61
44 62
45/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ 63/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
@@ -121,4 +139,11 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags);
121void b43_mac_suspend(struct b43_wldev *dev); 139void b43_mac_suspend(struct b43_wldev *dev);
122void b43_mac_enable(struct b43_wldev *dev); 140void b43_mac_enable(struct b43_wldev *dev);
123 141
142
143struct b43_request_fw_context;
144int b43_do_request_fw(struct b43_request_fw_context *ctx,
145 const char *name,
146 struct b43_firmware_file *fw);
147void b43_do_release_fw(struct b43_firmware_file *fw);
148
124#endif /* B43_MAIN_H_ */ 149#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index caac4a45f0b..88bb303ae9d 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -3191,6 +3191,7 @@ static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
3191 * Baseband attennuation. Subtract it. */ 3191 * Baseband attennuation. Subtract it. */
3192 bbatt_delta -= 4 * rfatt_delta; 3192 bbatt_delta -= 4 * rfatt_delta;
3193 3193
3194#if B43_DEBUG
3194 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 3195 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
3195 int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust; 3196 int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust;
3196 b43dbg(dev->wl, 3197 b43dbg(dev->wl,
@@ -3199,6 +3200,8 @@ static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
3199 (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm), 3200 (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm),
3200 bbatt_delta, rfatt_delta); 3201 bbatt_delta, rfatt_delta);
3201 } 3202 }
3203#endif /* DEBUG */
3204
3202 /* So do we finally need to adjust something in hardware? */ 3205 /* So do we finally need to adjust something in hardware? */
3203 if ((rfatt_delta == 0) && (bbatt_delta == 0)) 3206 if ((rfatt_delta == 0) && (bbatt_delta == 0))
3204 goto no_adjustment_needed; 3207 goto no_adjustment_needed;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index c5d9dc3667c..58e319d6b1e 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -3,7 +3,7 @@
3 Broadcom B43 wireless driver 3 Broadcom B43 wireless driver
4 IEEE 802.11g LP-PHY driver 4 IEEE 802.11g LP-PHY driver
5 5
6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de> 6 Copyright (c) 2008-2009 Michael Buesch <mb@bu3sch.de>
7 7
8 This program is free software; you can redistribute it and/or modify 8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by 9 it under the terms of the GNU General Public License as published by
@@ -23,8 +23,10 @@
23*/ 23*/
24 24
25#include "b43.h" 25#include "b43.h"
26#include "main.h"
26#include "phy_lp.h" 27#include "phy_lp.h"
27#include "phy_common.h" 28#include "phy_common.h"
29#include "tables_lpphy.h"
28 30
29 31
30static int b43_lpphy_op_allocate(struct b43_wldev *dev) 32static int b43_lpphy_op_allocate(struct b43_wldev *dev)
@@ -57,9 +59,394 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
57 dev->phy.lp = NULL; 59 dev->phy.lp = NULL;
58} 60}
59 61
60static int b43_lpphy_op_init(struct b43_wldev *dev) 62static void lpphy_table_init(struct b43_wldev *dev)
63{
64 //TODO
65}
66
67static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
68{
69 B43_WARN_ON(1);//TODO rev < 2 not supported, yet.
70}
71
72static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
73{
74 struct ssb_bus *bus = dev->dev->bus;
75 struct b43_phy_lp *lpphy = dev->phy.lp;
76
77 b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50);
78 b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0x8800);
79 b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0);
80 b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0);
81 b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0);
82 b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0);
83 b43_phy_write(dev, B43_PHY_OFDM(0xF9), 0);
84 b43_phy_write(dev, B43_LPPHY_TR_LOOKUP_1, 0);
85 b43_phy_set(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x10);
86 b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0x78);
87 b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xF8FF, 0x200);
88 b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xFF00, 0x7F);
89 b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFF0F, 0x40);
90 b43_phy_maskset(dev, B43_LPPHY_PREAMBLECONFIRMTO, 0xFF00, 0x2);
91 b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000);
92 b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000);
93 b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1);
94 b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x10);
95 b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0xFF00, 0xF4);
96 b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0x00FF, 0xF100);
97 b43_phy_write(dev, B43_LPPHY_CLIPTHRESH, 0x48);
98 b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0xFF00, 0x46);
99 b43_phy_maskset(dev, B43_PHY_OFDM(0xE4), 0xFF00, 0x10);
100 b43_phy_maskset(dev, B43_LPPHY_PWR_THRESH1, 0xFFF0, 0x9);
101 b43_phy_mask(dev, B43_LPPHY_GAINDIRECTMISMATCH, ~0xF);
102 b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5500);
103 b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xF81F, 0xA0);
104 b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300);
105 b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00);
106 if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
107 b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100);
108 b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA);
109 } else {
110 b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x1E00);
111 b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xD);
112 }
113 b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFFE0, 0x1F);
114 b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC);
115 b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0xFF00, 0x19);
116 b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0x03FF, 0x3C00);
117 b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFC1F, 0x3E0);
118 b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC);
119 b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0x00FF, 0x1900);
120 b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800);
121 b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12);
122 b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000);
123
124 b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0);
125 b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
126
127 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
128 b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
129 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
130 b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
131 b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0x9D00);
132 b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0xFF00, 0xA1);
133 } else /* 5GHz */
134 b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x40);
135
136 b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0xB3);
137 b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00);
138 b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset);
139 b43_phy_set(dev, B43_LPPHY_RESET_CTL, 0x44);
140 b43_phy_write(dev, B43_LPPHY_RESET_CTL, 0x80);
141 b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, 0xA954);
142 b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_1,
143 0x2000 | ((u16)lpphy->rssi_gs << 10) |
144 ((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf);
145}
146
147static void lpphy_baseband_init(struct b43_wldev *dev)
148{
149 lpphy_table_init(dev);
150 if (dev->phy.rev >= 2)
151 lpphy_baseband_rev2plus_init(dev);
152 else
153 lpphy_baseband_rev0_1_init(dev);
154}
155
156struct b2062_freqdata {
157 u16 freq;
158 u8 data[6];
159};
160
161/* Initialize the 2062 radio. */
162static void lpphy_2062_init(struct b43_wldev *dev)
163{
164 struct ssb_bus *bus = dev->dev->bus;
165 u32 crystalfreq, pdiv, tmp, ref;
166 unsigned int i;
167 const struct b2062_freqdata *fd = NULL;
168
169 static const struct b2062_freqdata freqdata_tab[] = {
170 { .freq = 12000, .data[0] = 6, .data[1] = 6, .data[2] = 6,
171 .data[3] = 6, .data[4] = 10, .data[5] = 6, },
172 { .freq = 13000, .data[0] = 4, .data[1] = 4, .data[2] = 4,
173 .data[3] = 4, .data[4] = 11, .data[5] = 7, },
174 { .freq = 14400, .data[0] = 3, .data[1] = 3, .data[2] = 3,
175 .data[3] = 3, .data[4] = 12, .data[5] = 7, },
176 { .freq = 16200, .data[0] = 3, .data[1] = 3, .data[2] = 3,
177 .data[3] = 3, .data[4] = 13, .data[5] = 8, },
178 { .freq = 18000, .data[0] = 2, .data[1] = 2, .data[2] = 2,
179 .data[3] = 2, .data[4] = 14, .data[5] = 8, },
180 { .freq = 19200, .data[0] = 1, .data[1] = 1, .data[2] = 1,
181 .data[3] = 1, .data[4] = 14, .data[5] = 9, },
182 };
183
184 b2062_upload_init_table(dev);
185
186 b43_radio_write(dev, B2062_N_TX_CTL3, 0);
187 b43_radio_write(dev, B2062_N_TX_CTL4, 0);
188 b43_radio_write(dev, B2062_N_TX_CTL5, 0);
189 b43_radio_write(dev, B2062_N_PDN_CTL0, 0x40);
190 b43_radio_write(dev, B2062_N_PDN_CTL0, 0);
191 b43_radio_write(dev, B2062_N_CALIB_TS, 0x10);
192 b43_radio_write(dev, B2062_N_CALIB_TS, 0);
193 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
194 b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
195 else
196 b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
197
198 /* Get the crystal freq, in Hz. */
199 crystalfreq = bus->chipco.pmu.crystalfreq * 1000;
200
201 B43_WARN_ON(!(bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU));
202 B43_WARN_ON(crystalfreq == 0);
203
204 if (crystalfreq >= 30000000) {
205 pdiv = 1;
206 b43_radio_mask(dev, B2062_S_RFPLL_CTL1, 0xFFFB);
207 } else {
208 pdiv = 2;
209 b43_radio_set(dev, B2062_S_RFPLL_CTL1, 0x4);
210 }
211
212 tmp = (800000000 * pdiv + crystalfreq) / (32000000 * pdiv);
213 tmp = (tmp - 1) & 0xFF;
214 b43_radio_write(dev, B2062_S_RFPLL_CTL18, tmp);
215
216 tmp = (2 * crystalfreq + 1000000 * pdiv) / (2000000 * pdiv);
217 tmp = ((tmp & 0xFF) - 1) & 0xFFFF;
218 b43_radio_write(dev, B2062_S_RFPLL_CTL19, tmp);
219
220 ref = (1000 * pdiv + 2 * crystalfreq) / (2000 * pdiv);
221 ref &= 0xFFFF;
222 for (i = 0; i < ARRAY_SIZE(freqdata_tab); i++) {
223 if (ref < freqdata_tab[i].freq) {
224 fd = &freqdata_tab[i];
225 break;
226 }
227 }
228 if (!fd)
229 fd = &freqdata_tab[ARRAY_SIZE(freqdata_tab) - 1];
230 b43dbg(dev->wl, "b2062: Using crystal tab entry %u kHz.\n",
231 fd->freq); /* FIXME: Keep this printk until the code is fully debugged. */
232
233 b43_radio_write(dev, B2062_S_RFPLL_CTL8,
234 ((u16)(fd->data[1]) << 4) | fd->data[0]);
235 b43_radio_write(dev, B2062_S_RFPLL_CTL9,
236 ((u16)(fd->data[3]) << 4) | fd->data[2]);
237 b43_radio_write(dev, B2062_S_RFPLL_CTL10, fd->data[4]);
238 b43_radio_write(dev, B2062_S_RFPLL_CTL11, fd->data[5]);
239}
240
241/* Initialize the 2063 radio. */
242static void lpphy_2063_init(struct b43_wldev *dev)
61{ 243{
62 //TODO 244 //TODO
245}
246
247static void lpphy_sync_stx(struct b43_wldev *dev)
248{
249 //TODO
250}
251
252static void lpphy_radio_init(struct b43_wldev *dev)
253{
254 /* The radio is attached through the 4wire bus. */
255 b43_phy_set(dev, B43_LPPHY_FOURWIRE_CTL, 0x2);
256 udelay(1);
257 b43_phy_mask(dev, B43_LPPHY_FOURWIRE_CTL, 0xFFFD);
258 udelay(1);
259
260 if (dev->phy.rev < 2) {
261 lpphy_2062_init(dev);
262 } else {
263 lpphy_2063_init(dev);
264 lpphy_sync_stx(dev);
265 b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80);
266 b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0);
267 //TODO Do something on the backplane
268 }
269}
270
271/* Read the TX power control mode from hardware. */
272static void lpphy_read_tx_pctl_mode_from_hardware(struct b43_wldev *dev)
273{
274 struct b43_phy_lp *lpphy = dev->phy.lp;
275 u16 ctl;
276
277 ctl = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_CMD);
278 switch (ctl & B43_LPPHY_TX_PWR_CTL_CMD_MODE) {
279 case B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF:
280 lpphy->txpctl_mode = B43_LPPHY_TXPCTL_OFF;
281 break;
282 case B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW:
283 lpphy->txpctl_mode = B43_LPPHY_TXPCTL_SW;
284 break;
285 case B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW:
286 lpphy->txpctl_mode = B43_LPPHY_TXPCTL_HW;
287 break;
288 default:
289 lpphy->txpctl_mode = B43_LPPHY_TXPCTL_UNKNOWN;
290 B43_WARN_ON(1);
291 break;
292 }
293}
294
295/* Set the TX power control mode in hardware. */
296static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev)
297{
298 struct b43_phy_lp *lpphy = dev->phy.lp;
299 u16 ctl;
300
301 switch (lpphy->txpctl_mode) {
302 case B43_LPPHY_TXPCTL_OFF:
303 ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF;
304 break;
305 case B43_LPPHY_TXPCTL_HW:
306 ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW;
307 break;
308 case B43_LPPHY_TXPCTL_SW:
309 ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW;
310 break;
311 default:
312 ctl = 0;
313 B43_WARN_ON(1);
314 }
315 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
316 (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, ctl);
317}
318
319static void lpphy_set_tx_power_control(struct b43_wldev *dev,
320 enum b43_lpphy_txpctl_mode mode)
321{
322 struct b43_phy_lp *lpphy = dev->phy.lp;
323 enum b43_lpphy_txpctl_mode oldmode;
324
325 oldmode = lpphy->txpctl_mode;
326 lpphy_read_tx_pctl_mode_from_hardware(dev);
327 if (lpphy->txpctl_mode == mode)
328 return;
329 lpphy->txpctl_mode = mode;
330
331 if (oldmode == B43_LPPHY_TXPCTL_HW) {
332 //TODO Update TX Power NPT
333 //TODO Clear all TX Power offsets
334 } else {
335 if (mode == B43_LPPHY_TXPCTL_HW) {
336 //TODO Recalculate target TX power
337 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
338 0xFF80, lpphy->tssi_idx);
339 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM,
340 0x8FFF, ((u16)lpphy->tssi_npt << 16));
341 //TODO Set "TSSI Transmit Count" variable to total transmitted frame count
342 //TODO Disable TX gain override
343 lpphy->tx_pwr_idx_over = -1;
344 }
345 }
346 if (dev->phy.rev >= 2) {
347 if (mode == B43_LPPHY_TXPCTL_HW)
348 b43_phy_maskset(dev, B43_PHY_OFDM(0xD0), 0xFD, 0x2);
349 else
350 b43_phy_maskset(dev, B43_PHY_OFDM(0xD0), 0xFD, 0);
351 }
352 lpphy_write_tx_pctl_mode_to_hardware(dev);
353}
354
355static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index)
356{
357 struct b43_phy_lp *lpphy = dev->phy.lp;
358
359 lpphy->tx_pwr_idx_over = index;
360 if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF)
361 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW);
362
363 //TODO
364}
365
366static void lpphy_btcoex_override(struct b43_wldev *dev)
367{
368 b43_write16(dev, B43_MMIO_BTCOEX_CTL, 0x3);
369 b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF);
370}
371
372static void lpphy_pr41573_workaround(struct b43_wldev *dev)
373{
374 struct b43_phy_lp *lpphy = dev->phy.lp;
375 u32 *saved_tab;
376 const unsigned int saved_tab_size = 256;
377 enum b43_lpphy_txpctl_mode txpctl_mode;
378 s8 tx_pwr_idx_over;
379 u16 tssi_npt, tssi_idx;
380
381 saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
382 if (!saved_tab) {
383 b43err(dev->wl, "PR41573 failed. Out of memory!\n");
384 return;
385 }
386
387 lpphy_read_tx_pctl_mode_from_hardware(dev);
388 txpctl_mode = lpphy->txpctl_mode;
389 tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
390 tssi_npt = lpphy->tssi_npt;
391 tssi_idx = lpphy->tssi_idx;
392
393 if (dev->phy.rev < 2) {
394 b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
395 saved_tab_size, saved_tab);
396 } else {
397 b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
398 saved_tab_size, saved_tab);
399 }
400 //TODO
401
402 kfree(saved_tab);
403}
404
405static void lpphy_calibration(struct b43_wldev *dev)
406{
407 struct b43_phy_lp *lpphy = dev->phy.lp;
408 enum b43_lpphy_txpctl_mode saved_pctl_mode;
409
410 b43_mac_suspend(dev);
411
412 lpphy_btcoex_override(dev);
413 lpphy_read_tx_pctl_mode_from_hardware(dev);
414 saved_pctl_mode = lpphy->txpctl_mode;
415 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
416 //TODO Perform transmit power table I/Q LO calibration
417 if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
418 lpphy_pr41573_workaround(dev);
419 //TODO If a full calibration has not been performed on this channel yet, perform PAPD TX-power calibration
420 lpphy_set_tx_power_control(dev, saved_pctl_mode);
421 //TODO Perform I/Q calibration with a single control value set
422
423 b43_mac_enable(dev);
424}
425
426/* Initialize TX power control */
427static void lpphy_tx_pctl_init(struct b43_wldev *dev)
428{
429 if (0/*FIXME HWPCTL capable */) {
430 //TODO
431 } else { /* This device is only software TX power control capable. */
432 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
433 //TODO
434 } else {
435 //TODO
436 }
437 //TODO set BB multiplier to 0x0096
438 }
439}
440
441static int b43_lpphy_op_init(struct b43_wldev *dev)
442{
443 /* TODO: band SPROM */
444 lpphy_baseband_init(dev);
445 lpphy_radio_init(dev);
446 //TODO calibrate RC
447 //TODO set channel
448 lpphy_tx_pctl_init(dev);
449 //TODO full calib
63 450
64 return 0; 451 return 0;
65} 452}
@@ -115,7 +502,9 @@ static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
115 502
116static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev) 503static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
117{ 504{
118 return 1; /* Default to channel 1 */ 505 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
506 return 1;
507 return 36;
119} 508}
120 509
121static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) 510static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
index b0b5357abf9..18370b4ac38 100644
--- a/drivers/net/wireless/b43/phy_lp.h
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -4,8 +4,285 @@
4/* Definitions for the LP-PHY */ 4/* Definitions for the LP-PHY */
5 5
6 6
7/* The CCK PHY register range. */
8#define B43_LPPHY_B_VERSION B43_PHY_CCK(0x00) /* B PHY version */
9#define B43_LPPHY_B_BBCONFIG B43_PHY_CCK(0x01) /* B PHY BBConfig */
10#define B43_LPPHY_B_RX_STAT0 B43_PHY_CCK(0x04) /* B PHY RX Status0 */
11#define B43_LPPHY_B_RX_STAT1 B43_PHY_CCK(0x05) /* B PHY RX Status1 */
12#define B43_LPPHY_B_CRS_THRESH B43_PHY_CCK(0x06) /* B PHY CRS Thresh */
13#define B43_LPPHY_B_TXERROR B43_PHY_CCK(0x07) /* B PHY TxError */
14#define B43_LPPHY_B_CHANNEL B43_PHY_CCK(0x08) /* B PHY Channel */
15#define B43_LPPHY_B_WORKAROUND B43_PHY_CCK(0x09) /* B PHY workaround */
16#define B43_LPPHY_B_TEST B43_PHY_CCK(0x0A) /* B PHY Test */
17#define B43_LPPHY_B_FOURWIRE_ADDR B43_PHY_CCK(0x0B) /* B PHY Fourwire Address */
18#define B43_LPPHY_B_FOURWIRE_DATA_HI B43_PHY_CCK(0x0C) /* B PHY Fourwire Data Hi */
19#define B43_LPPHY_B_FOURWIRE_DATA_LO B43_PHY_CCK(0x0D) /* B PHY Fourwire Data Lo */
20#define B43_LPPHY_B_BIST_STAT B43_PHY_CCK(0x0E) /* B PHY Bist Status */
21#define B43_LPPHY_PA_RAMP_TX_TO B43_PHY_CCK(0x10) /* PA Ramp TX Timeout */
22#define B43_LPPHY_RF_SYNTH_DC_TIMER B43_PHY_CCK(0x11) /* RF Synth DC Timer */
23#define B43_LPPHY_PA_RAMP_TX_TIME_IN B43_PHY_CCK(0x12) /* PA ramp TX Time in */
24#define B43_LPPHY_RX_FILTER_TIME_IN B43_PHY_CCK(0x13) /* RX Filter Time in */
25#define B43_LPPHY_PLL_COEFF_S B43_PHY_CCK(0x18) /* PLL Coefficient(s) */
26#define B43_LPPHY_PLL_OUT B43_PHY_CCK(0x19) /* PLL Out */
27#define B43_LPPHY_RSSI_THRES B43_PHY_CCK(0x20) /* RSSI Threshold */
28#define B43_LPPHY_IQ_THRES_HH B43_PHY_CCK(0x21) /* IQ Threshold HH */
29#define B43_LPPHY_IQ_THRES_H B43_PHY_CCK(0x22) /* IQ Threshold H */
30#define B43_LPPHY_IQ_THRES_L B43_PHY_CCK(0x23) /* IQ Threshold L */
31#define B43_LPPHY_IQ_THRES_LL B43_PHY_CCK(0x24) /* IQ Threshold LL */
32#define B43_LPPHY_AGC_GAIN B43_PHY_CCK(0x25) /* AGC Gain */
33#define B43_LPPHY_LNA_GAIN_RANGE B43_PHY_CCK(0x26) /* LNA Gain Range */
34#define B43_LPPHY_JSSI B43_PHY_CCK(0x27) /* JSSI */
35#define B43_LPPHY_TSSI_CTL B43_PHY_CCK(0x28) /* TSSI Control */
36#define B43_LPPHY_TSSI B43_PHY_CCK(0x29) /* TSSI */
37#define B43_LPPHY_TR_LOSS B43_PHY_CCK(0x2A) /* TR Loss */
38#define B43_LPPHY_LO_LEAKAGE B43_PHY_CCK(0x2B) /* LO Leakage */
39#define B43_LPPHY_LO_RSSIACC B43_PHY_CCK(0x2C) /* LO RSSIAcc */
40#define B43_LPPHY_LO_IQ_MAG_ACC B43_PHY_CCK(0x2D) /* LO IQ Mag Acc */
41#define B43_LPPHY_TX_DCOFFSET1 B43_PHY_CCK(0x2E) /* TX DCOffset1 */
42#define B43_LPPHY_TX_DCOFFSET2 B43_PHY_CCK(0x2F) /* TX DCOffset2 */
43#define B43_LPPHY_SYNCPEAKCNT B43_PHY_CCK(0x30) /* SyncPeakCnt */
44#define B43_LPPHY_SYNCFREQ B43_PHY_CCK(0x31) /* SyncFreq */
45#define B43_LPPHY_SYNCDIVERSITYCTL B43_PHY_CCK(0x32) /* SyncDiversityControl */
46#define B43_LPPHY_PEAKENERGYL B43_PHY_CCK(0x33) /* PeakEnergyL */
47#define B43_LPPHY_PEAKENERGYH B43_PHY_CCK(0x34) /* PeakEnergyH */
48#define B43_LPPHY_SYNCCTL B43_PHY_CCK(0x35) /* SyncControl */
49#define B43_LPPHY_DSSSSTEP B43_PHY_CCK(0x38) /* DsssStep */
50#define B43_LPPHY_DSSSWARMUP B43_PHY_CCK(0x39) /* DsssWarmup */
51#define B43_LPPHY_DSSSSIGPOW B43_PHY_CCK(0x3D) /* DsssSigPow */
52#define B43_LPPHY_SFDDETECTBLOCKTIME B43_PHY_CCK(0x40) /* SfdDetectBlockTIme */
53#define B43_LPPHY_SFDTO B43_PHY_CCK(0x41) /* SFDTimeOut */
54#define B43_LPPHY_SFDCTL B43_PHY_CCK(0x42) /* SFDControl */
55#define B43_LPPHY_RXDBG B43_PHY_CCK(0x43) /* rxDebug */
56#define B43_LPPHY_RX_DELAYCOMP B43_PHY_CCK(0x44) /* RX DelayComp */
57#define B43_LPPHY_CRSDROPOUTTO B43_PHY_CCK(0x45) /* CRSDropoutTimeout */
58#define B43_LPPHY_PSEUDOSHORTTO B43_PHY_CCK(0x46) /* PseudoShortTimeout */
59#define B43_LPPHY_PR3931 B43_PHY_CCK(0x47) /* PR3931 */
60#define B43_LPPHY_DSSSCOEFF1 B43_PHY_CCK(0x48) /* DSSSCoeff1 */
61#define B43_LPPHY_DSSSCOEFF2 B43_PHY_CCK(0x49) /* DSSSCoeff2 */
62#define B43_LPPHY_CCKCOEFF1 B43_PHY_CCK(0x4A) /* CCKCoeff1 */
63#define B43_LPPHY_CCKCOEFF2 B43_PHY_CCK(0x4B) /* CCKCoeff2 */
64#define B43_LPPHY_TRCORR B43_PHY_CCK(0x4C) /* TRCorr */
65#define B43_LPPHY_ANGLESCALE B43_PHY_CCK(0x4D) /* AngleScale */
66#define B43_LPPHY_OPTIONALMODES2 B43_PHY_CCK(0x4F) /* OptionalModes2 */
67#define B43_LPPHY_CCKLMSSTEPSIZE B43_PHY_CCK(0x50) /* CCKLMSStepSize */
68#define B43_LPPHY_DFEBYPASS B43_PHY_CCK(0x51) /* DFEBypass */
69#define B43_LPPHY_CCKSTARTDELAYLONG B43_PHY_CCK(0x52) /* CCKStartDelayLong */
70#define B43_LPPHY_CCKSTARTDELAYSHORT B43_PHY_CCK(0x53) /* CCKStartDelayShort */
71#define B43_LPPHY_PPROCCHDELAY B43_PHY_CCK(0x54) /* PprocChDelay */
72#define B43_LPPHY_PPROCONOFF B43_PHY_CCK(0x55) /* PProcOnOff */
73#define B43_LPPHY_LNAGAINTWOBIT10 B43_PHY_CCK(0x5B) /* LNAGainTwoBit10 */
74#define B43_LPPHY_LNAGAINTWOBIT32 B43_PHY_CCK(0x5C) /* LNAGainTwoBit32 */
75#define B43_LPPHY_OPTIONALMODES B43_PHY_CCK(0x5D) /* OptionalModes */
76#define B43_LPPHY_B_RX_STAT2 B43_PHY_CCK(0x5E) /* B PHY RX Status2 */
77#define B43_LPPHY_B_RX_STAT3 B43_PHY_CCK(0x5F) /* B PHY RX Status3 */
78#define B43_LPPHY_PWDNDACDELAY B43_PHY_CCK(0x63) /* pwdnDacDelay */
79#define B43_LPPHY_FINEDIGIGAIN_CTL B43_PHY_CCK(0x67) /* FineDigiGain Control */
80#define B43_LPPHY_LG2GAINTBLLNA8 B43_PHY_CCK(0x68) /* Lg2GainTblLNA8 */
81#define B43_LPPHY_LG2GAINTBLLNA28 B43_PHY_CCK(0x69) /* Lg2GainTblLNA28 */
82#define B43_LPPHY_GAINTBLLNATRSW B43_PHY_CCK(0x6A) /* GainTblLNATrSw */
83#define B43_LPPHY_PEAKENERGY B43_PHY_CCK(0x6B) /* PeakEnergy */
84#define B43_LPPHY_LG2INITGAIN B43_PHY_CCK(0x6C) /* lg2InitGain */
85#define B43_LPPHY_BLANKCOUNTLNAPGA B43_PHY_CCK(0x6D) /* BlankCountLnaPga */
86#define B43_LPPHY_LNAGAINTWOBIT54 B43_PHY_CCK(0x6E) /* LNAGainTwoBit54 */
87#define B43_LPPHY_LNAGAINTWOBIT76 B43_PHY_CCK(0x6F) /* LNAGainTwoBit76 */
88#define B43_LPPHY_JSSICTL B43_PHY_CCK(0x70) /* JSSIControl */
89#define B43_LPPHY_LG2GAINTBLLNA44 B43_PHY_CCK(0x71) /* Lg2GainTblLNA44 */
90#define B43_LPPHY_LG2GAINTBLLNA62 B43_PHY_CCK(0x72) /* Lg2GainTblLNA62 */
7 91
92/* The OFDM PHY register range. */
93#define B43_LPPHY_VERSION B43_PHY_OFDM(0x00) /* Version */
94#define B43_LPPHY_BBCONFIG B43_PHY_OFDM(0x01) /* BBConfig */
95#define B43_LPPHY_RX_STAT0 B43_PHY_OFDM(0x04) /* RX Status0 */
96#define B43_LPPHY_RX_STAT1 B43_PHY_OFDM(0x05) /* RX Status1 */
97#define B43_LPPHY_TX_ERROR B43_PHY_OFDM(0x07) /* TX Error */
98#define B43_LPPHY_CHANNEL B43_PHY_OFDM(0x08) /* Channel */
99#define B43_LPPHY_WORKAROUND B43_PHY_OFDM(0x09) /* workaround */
100#define B43_LPPHY_FOURWIRE_ADDR B43_PHY_OFDM(0x0B) /* Fourwire Address */
101#define B43_LPPHY_FOURWIREDATAHI B43_PHY_OFDM(0x0C) /* FourwireDataHi */
102#define B43_LPPHY_FOURWIREDATALO B43_PHY_OFDM(0x0D) /* FourwireDataLo */
103#define B43_LPPHY_BISTSTAT0 B43_PHY_OFDM(0x0E) /* BistStatus0 */
104#define B43_LPPHY_BISTSTAT1 B43_PHY_OFDM(0x0F) /* BistStatus1 */
105#define B43_LPPHY_CRSGAIN_CTL B43_PHY_OFDM(0x10) /* crsgain Control */
106#define B43_LPPHY_OFDMPWR_THRESH0 B43_PHY_OFDM(0x11) /* ofdmPower Thresh0 */
107#define B43_LPPHY_OFDMPWR_THRESH1 B43_PHY_OFDM(0x12) /* ofdmPower Thresh1 */
108#define B43_LPPHY_OFDMPWR_THRESH2 B43_PHY_OFDM(0x13) /* ofdmPower Thresh2 */
109#define B43_LPPHY_DSSSPWR_THRESH0 B43_PHY_OFDM(0x14) /* dsssPower Thresh0 */
110#define B43_LPPHY_DSSSPWR_THRESH1 B43_PHY_OFDM(0x15) /* dsssPower Thresh1 */
111#define B43_LPPHY_MINPWR_LEVEL B43_PHY_OFDM(0x16) /* MinPower Level */
112#define B43_LPPHY_OFDMSYNCTHRESH0 B43_PHY_OFDM(0x17) /* ofdmSyncThresh0 */
113#define B43_LPPHY_OFDMSYNCTHRESH1 B43_PHY_OFDM(0x18) /* ofdmSyncThresh1 */
114#define B43_LPPHY_FINEFREQEST B43_PHY_OFDM(0x19) /* FineFreqEst */
115#define B43_LPPHY_IDLEAFTERPKTRXTO B43_PHY_OFDM(0x1A) /* IDLEafterPktRXTimeout */
116#define B43_LPPHY_LTRN_CTL B43_PHY_OFDM(0x1B) /* LTRN Control */
117#define B43_LPPHY_DCOFFSETTRANSIENT B43_PHY_OFDM(0x1C) /* DCOffsetTransient */
118#define B43_LPPHY_PREAMBLEINTO B43_PHY_OFDM(0x1D) /* PreambleInTimeout */
119#define B43_LPPHY_PREAMBLECONFIRMTO B43_PHY_OFDM(0x1E) /* PreambleConfirmTimeout */
120#define B43_LPPHY_CLIPTHRESH B43_PHY_OFDM(0x1F) /* ClipThresh */
121#define B43_LPPHY_CLIPCTRTHRESH B43_PHY_OFDM(0x20) /* ClipCtrThresh */
122#define B43_LPPHY_OFDMSYNCTIMER_CTL B43_PHY_OFDM(0x21) /* ofdmSyncTimer Control */
123#define B43_LPPHY_WAITFORPHYSELTO B43_PHY_OFDM(0x22) /* WaitforPHYSelTimeout */
124#define B43_LPPHY_HIGAINDB B43_PHY_OFDM(0x23) /* HiGainDB */
125#define B43_LPPHY_LOWGAINDB B43_PHY_OFDM(0x24) /* LowGainDB */
126#define B43_LPPHY_VERYLOWGAINDB B43_PHY_OFDM(0x25) /* VeryLowGainDB */
127#define B43_LPPHY_GAINMISMATCH B43_PHY_OFDM(0x26) /* gainMismatch */
128#define B43_LPPHY_GAINDIRECTMISMATCH B43_PHY_OFDM(0x27) /* gaindirectMismatch */
129#define B43_LPPHY_PWR_THRESH0 B43_PHY_OFDM(0x28) /* Power Thresh0 */
130#define B43_LPPHY_PWR_THRESH1 B43_PHY_OFDM(0x29) /* Power Thresh1 */
131#define B43_LPPHY_DETECTOR_DELAY_ADJUST B43_PHY_OFDM(0x2A) /* Detector Delay Adjust */
132#define B43_LPPHY_REDUCED_DETECTOR_DELAY B43_PHY_OFDM(0x2B) /* Reduced Detector Delay */
133#define B43_LPPHY_DATA_TO B43_PHY_OFDM(0x2C) /* data Timeout */
134#define B43_LPPHY_CORRELATOR_DIS_DELAY B43_PHY_OFDM(0x2D) /* correlator Dis Delay */
135#define B43_LPPHY_DIVERSITY_GAINBACK B43_PHY_OFDM(0x2E) /* Diversity GainBack */
136#define B43_LPPHY_DSSS_CONFIRM_CNT B43_PHY_OFDM(0x2F) /* DSSS Confirm Cnt */
137#define B43_LPPHY_DC_BLANK_INT B43_PHY_OFDM(0x30) /* DC Blank Interval */
138#define B43_LPPHY_GAIN_MISMATCH_LIMIT B43_PHY_OFDM(0x31) /* gain Mismatch Limit */
139#define B43_LPPHY_CRS_ED_THRESH B43_PHY_OFDM(0x32) /* crs ed thresh */
140#define B43_LPPHY_PHASE_SHIFT_CTL B43_PHY_OFDM(0x33) /* phase shift Control */
141#define B43_LPPHY_INPUT_PWRDB B43_PHY_OFDM(0x34) /* Input PowerDB */
142#define B43_LPPHY_OFDM_SYNC_CTL B43_PHY_OFDM(0x35) /* ofdm sync Control */
143#define B43_LPPHY_AFE_ADC_CTL_0 B43_PHY_OFDM(0x36) /* Afe ADC Control 0 */
144#define B43_LPPHY_AFE_ADC_CTL_1 B43_PHY_OFDM(0x37) /* Afe ADC Control 1 */
145#define B43_LPPHY_AFE_ADC_CTL_2 B43_PHY_OFDM(0x38) /* Afe ADC Control 2 */
146#define B43_LPPHY_AFE_DAC_CTL B43_PHY_OFDM(0x39) /* Afe DAC Control */
147#define B43_LPPHY_AFE_CTL B43_PHY_OFDM(0x3A) /* Afe Control */
148#define B43_LPPHY_AFE_CTL_OVR B43_PHY_OFDM(0x3B) /* Afe Control Ovr */
149#define B43_LPPHY_AFE_CTL_OVRVAL B43_PHY_OFDM(0x3C) /* Afe Control OvrVal */
150#define B43_LPPHY_AFE_RSSI_CTL_0 B43_PHY_OFDM(0x3D) /* Afe RSSI Control 0 */
151#define B43_LPPHY_AFE_RSSI_CTL_1 B43_PHY_OFDM(0x3E) /* Afe RSSI Control 1 */
152#define B43_LPPHY_AFE_RSSI_SEL B43_PHY_OFDM(0x3F) /* Afe RSSI Sel */
153#define B43_LPPHY_RADAR_THRESH B43_PHY_OFDM(0x40) /* Radar Thresh */
154#define B43_LPPHY_RADAR_BLANK_INT B43_PHY_OFDM(0x41) /* Radar blank Interval */
155#define B43_LPPHY_RADAR_MIN_FM_INT B43_PHY_OFDM(0x42) /* Radar min fm Interval */
156#define B43_LPPHY_RADAR_GAIN_TO B43_PHY_OFDM(0x43) /* Radar gain timeout */
157#define B43_LPPHY_RADAR_PULSE_TO B43_PHY_OFDM(0x44) /* Radar pulse timeout */
158#define B43_LPPHY_RADAR_DETECT_FM_CTL B43_PHY_OFDM(0x45) /* Radar detect FM Control */
159#define B43_LPPHY_RADAR_DETECT_EN B43_PHY_OFDM(0x46) /* Radar detect En */
160#define B43_LPPHY_RADAR_RD_DATA_REG B43_PHY_OFDM(0x47) /* Radar Rd Data Reg */
161#define B43_LPPHY_LP_PHY_CTL B43_PHY_OFDM(0x48) /* LP PHY Control */
162#define B43_LPPHY_CLASSIFIER_CTL B43_PHY_OFDM(0x49) /* classifier Control */
163#define B43_LPPHY_RESET_CTL B43_PHY_OFDM(0x4A) /* reset Control */
164#define B43_LPPHY_CLKEN_CTL B43_PHY_OFDM(0x4B) /* ClkEn Control */
165#define B43_LPPHY_RF_OVERRIDE_0 B43_PHY_OFDM(0x4C) /* RF Override 0 */
166#define B43_LPPHY_RF_OVERRIDE_VAL_0 B43_PHY_OFDM(0x4D) /* RF Override Val 0 */
167#define B43_LPPHY_TR_LOOKUP_1 B43_PHY_OFDM(0x4E) /* TR Lookup 1 */
168#define B43_LPPHY_TR_LOOKUP_2 B43_PHY_OFDM(0x4F) /* TR Lookup 2 */
169#define B43_LPPHY_RSSISELLOOKUP1 B43_PHY_OFDM(0x50) /* RssiSelLookup1 */
170#define B43_LPPHY_IQLO_CAL_CMD B43_PHY_OFDM(0x51) /* iqlo Cal Cmd */
171#define B43_LPPHY_IQLO_CAL_CMD_N_NUM B43_PHY_OFDM(0x52) /* iqlo Cal Cmd N num */
172#define B43_LPPHY_IQLO_CAL_CMD_G_CTL B43_PHY_OFDM(0x53) /* iqlo Cal Cmd G control */
173#define B43_LPPHY_MACINT_DBG_REGISTER B43_PHY_OFDM(0x54) /* macint Debug Register */
174#define B43_LPPHY_TABLE_ADDR B43_PHY_OFDM(0x55) /* Table Address */
175#define B43_LPPHY_TABLEDATALO B43_PHY_OFDM(0x56) /* TabledataLo */
176#define B43_LPPHY_TABLEDATAHI B43_PHY_OFDM(0x57) /* TabledataHi */
177#define B43_LPPHY_PHY_CRS_ENABLE_ADDR B43_PHY_OFDM(0x58) /* phy CRS Enable Address */
178#define B43_LPPHY_IDLETIME_CTL B43_PHY_OFDM(0x59) /* Idletime Control */
179#define B43_LPPHY_IDLETIME_CRS_ON_LO B43_PHY_OFDM(0x5A) /* Idletime CRS On Lo */
180#define B43_LPPHY_IDLETIME_CRS_ON_HI B43_PHY_OFDM(0x5B) /* Idletime CRS On Hi */
181#define B43_LPPHY_IDLETIME_MEAS_TIME_LO B43_PHY_OFDM(0x5C) /* Idletime Meas Time Lo */
182#define B43_LPPHY_IDLETIME_MEAS_TIME_HI B43_PHY_OFDM(0x5D) /* Idletime Meas Time Hi */
183#define B43_LPPHY_RESET_LEN_OFDM_TX_ADDR B43_PHY_OFDM(0x5E) /* Reset len Ofdm TX Address */
184#define B43_LPPHY_RESET_LEN_OFDM_RX_ADDR B43_PHY_OFDM(0x5F) /* Reset len Ofdm RX Address */
185#define B43_LPPHY_REG_CRS_ENABLE B43_PHY_OFDM(0x60) /* reg crs enable */
186#define B43_LPPHY_PLCP_TMT_STR0_CTR_MIN B43_PHY_OFDM(0x61) /* PLCP Tmt Str0 Ctr Min */
187#define B43_LPPHY_PKT_FSM_RESET_LEN_VAL B43_PHY_OFDM(0x62) /* Pkt fsm Reset Len Value */
188#define B43_LPPHY_READSYM2RESET_CTL B43_PHY_OFDM(0x63) /* readsym2reset Control */
189#define B43_LPPHY_DC_FILTER_DELAY1 B43_PHY_OFDM(0x64) /* Dc filter delay1 */
190#define B43_LPPHY_PACKET_RX_ACTIVE_TO B43_PHY_OFDM(0x65) /* packet rx Active timeout */
191#define B43_LPPHY_ED_TOVAL B43_PHY_OFDM(0x66) /* ed timeoutValue */
192#define B43_LPPHY_HOLD_CRS_ON_VAL B43_PHY_OFDM(0x67) /* hold CRS On Value */
193#define B43_LPPHY_OFDM_TX_PHY_CRS_DELAY_VAL B43_PHY_OFDM(0x69) /* ofdm tx phy CRS Delay Value */
194#define B43_LPPHY_CCK_TX_PHY_CRS_DELAY_VAL B43_PHY_OFDM(0x6A) /* cck tx phy CRS Delay Value */
195#define B43_LPPHY_ED_ON_CONFIRM_TIMER_VAL B43_PHY_OFDM(0x6B) /* Ed on confirm Timer Value */
196#define B43_LPPHY_ED_OFFSET_CONFIRM_TIMER_VAL B43_PHY_OFDM(0x6C) /* Ed offset confirm Timer Value */
197#define B43_LPPHY_PHY_CRS_OFFSET_TIMER_VAL B43_PHY_OFDM(0x6D) /* phy CRS offset Timer Value */
198#define B43_LPPHY_ADC_COMPENSATION_CTL B43_PHY_OFDM(0x70) /* ADC Compensation Control */
199#define B43_LPPHY_LOG2_RBPSK_ADDR B43_PHY_OFDM(0x71) /* log2 RBPSK Address */
200#define B43_LPPHY_LOG2_RQPSK_ADDR B43_PHY_OFDM(0x72) /* log2 RQPSK Address */
201#define B43_LPPHY_LOG2_R16QAM_ADDR B43_PHY_OFDM(0x73) /* log2 R16QAM Address */
202#define B43_LPPHY_LOG2_R64QAM_ADDR B43_PHY_OFDM(0x74) /* log2 R64QAM Address */
203#define B43_LPPHY_OFFSET_BPSK_ADDR B43_PHY_OFDM(0x75) /* offset BPSK Address */
204#define B43_LPPHY_OFFSET_QPSK_ADDR B43_PHY_OFDM(0x76) /* offset QPSK Address */
205#define B43_LPPHY_OFFSET_16QAM_ADDR B43_PHY_OFDM(0x77) /* offset 16QAM Address */
206#define B43_LPPHY_OFFSET_64QAM_ADDR B43_PHY_OFDM(0x78) /* offset 64QAM Address */
207#define B43_LPPHY_ALPHA1 B43_PHY_OFDM(0x79) /* Alpha1 */
208#define B43_LPPHY_ALPHA2 B43_PHY_OFDM(0x7A) /* Alpha2 */
209#define B43_LPPHY_BETA1 B43_PHY_OFDM(0x7B) /* Beta1 */
210#define B43_LPPHY_BETA2 B43_PHY_OFDM(0x7C) /* Beta2 */
211#define B43_LPPHY_LOOP_NUM_ADDR B43_PHY_OFDM(0x7D) /* Loop Num Address */
212#define B43_LPPHY_STR_COLLMAX_SMPL_ADDR B43_PHY_OFDM(0x7E) /* Str Collmax Sample Address */
213#define B43_LPPHY_MAX_SMPL_COARSE_FINE_ADDR B43_PHY_OFDM(0x7F) /* Max Sample Coarse/Fine Address */
214#define B43_LPPHY_MAX_SMPL_COARSE_STR0CTR_ADDR B43_PHY_OFDM(0x80) /* Max Sample Coarse/Str0Ctr Address */
215#define B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR B43_PHY_OFDM(0x81) /* IQ Enable Wait Time Address */
216#define B43_LPPHY_IQ_NUM_SMPLS_ADDR B43_PHY_OFDM(0x82) /* IQ Num Samples Address */
217#define B43_LPPHY_IQ_ACC_HI_ADDR B43_PHY_OFDM(0x83) /* IQ Acc Hi Address */
218#define B43_LPPHY_IQ_ACC_LO_ADDR B43_PHY_OFDM(0x84) /* IQ Acc Lo Address */
219#define B43_LPPHY_IQ_I_PWR_ACC_HI_ADDR B43_PHY_OFDM(0x85) /* IQ I PWR Acc Hi Address */
220#define B43_LPPHY_IQ_I_PWR_ACC_LO_ADDR B43_PHY_OFDM(0x86) /* IQ I PWR Acc Lo Address */
221#define B43_LPPHY_IQ_Q_PWR_ACC_HI_ADDR B43_PHY_OFDM(0x87) /* IQ Q PWR Acc Hi Address */
222#define B43_LPPHY_IQ_Q_PWR_ACC_LO_ADDR B43_PHY_OFDM(0x88) /* IQ Q PWR Acc Lo Address */
223#define B43_LPPHY_MAXNUMSTEPS B43_PHY_OFDM(0x89) /* MaxNumsteps */
224#define B43_LPPHY_ROTORPHASE_ADDR B43_PHY_OFDM(0x8A) /* RotorPhase Address */
225#define B43_LPPHY_ADVANCEDRETARDROTOR_ADDR B43_PHY_OFDM(0x8B) /* AdvancedRetardRotor Address */
226#define B43_LPPHY_RSSIADCDELAY_CTL_ADDR B43_PHY_OFDM(0x8D) /* rssiAdcdelay Control Address */
227#define B43_LPPHY_TSSISTAT_ADDR B43_PHY_OFDM(0x8E) /* tssiStatus Address */
228#define B43_LPPHY_TEMPSENSESTAT_ADDR B43_PHY_OFDM(0x8F) /* tempsenseStatus Address */
229#define B43_LPPHY_TEMPSENSE_CTL_ADDR B43_PHY_OFDM(0x90) /* tempsense Control Address */
230#define B43_LPPHY_WRSSISTAT_ADDR B43_PHY_OFDM(0x91) /* wrssistatus Address */
231#define B43_LPPHY_MUFACTORADDR B43_PHY_OFDM(0x92) /* mufactoraddr */
232#define B43_LPPHY_SCRAMSTATE_ADDR B43_PHY_OFDM(0x93) /* scramstate Address */
233#define B43_LPPHY_TXHOLDOFFADDR B43_PHY_OFDM(0x94) /* txholdoffaddr */
234#define B43_LPPHY_PKTGAINVAL_ADDR B43_PHY_OFDM(0x95) /* pktgainval Address */
235#define B43_LPPHY_COARSEESTIM_ADDR B43_PHY_OFDM(0x96) /* Coarseestim Address */
236#define B43_LPPHY_STATE_TRANSITION_ADDR B43_PHY_OFDM(0x97) /* state Transition Address */
237#define B43_LPPHY_TRN_OFFSET_ADDR B43_PHY_OFDM(0x98) /* TRN offset Address */
238#define B43_LPPHY_NUM_ROTOR_ADDR B43_PHY_OFDM(0x99) /* Num Rotor Address */
239#define B43_LPPHY_VITERBI_OFFSET_ADDR B43_PHY_OFDM(0x9A) /* Viterbi Offset Address */
240#define B43_LPPHY_SMPL_COLLECT_WAIT_ADDR B43_PHY_OFDM(0x9B) /* Sample collect wait Address */
241#define B43_LPPHY_A_PHY_CTL_ADDR B43_PHY_OFDM(0x9C) /* A PHY Control Address */
242#define B43_LPPHY_NUM_PASS_THROUGH_ADDR B43_PHY_OFDM(0x9D) /* Num Pass Through Address */
243#define B43_LPPHY_RX_COMP_COEFF_S B43_PHY_OFDM(0x9E) /* RX Comp coefficient(s) */
244#define B43_LPPHY_CPAROTATEVAL B43_PHY_OFDM(0x9F) /* cpaRotateValue */
245#define B43_LPPHY_SMPL_PLAY_COUNT B43_PHY_OFDM(0xA0) /* Sample play count */
246#define B43_LPPHY_SMPL_PLAY_BUFFER_CTL B43_PHY_OFDM(0xA1) /* Sample play Buffer Control */
247#define B43_LPPHY_FOURWIRE_CTL B43_PHY_OFDM(0xA2) /* fourwire Control */
248#define B43_LPPHY_CPA_TAILCOUNT_VAL B43_PHY_OFDM(0xA3) /* CPA TailCount Value */
249#define B43_LPPHY_TX_PWR_CTL_CMD B43_PHY_OFDM(0xA4) /* TX Power Control Cmd */
250#define B43_LPPHY_TX_PWR_CTL_CMD_MODE 0xE000 /* TX power control mode mask */
251#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF 0x0000 /* TX power control is OFF */
252#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW 0x8000 /* TX power control is SOFTWARE */
253#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW 0xE000 /* TX power control is HARDWARE */
254#define B43_LPPHY_TX_PWR_CTL_NNUM B43_PHY_OFDM(0xA5) /* TX Power Control Nnum */
255#define B43_LPPHY_TX_PWR_CTL_IDLETSSI B43_PHY_OFDM(0xA6) /* TX Power Control IdleTssi */
256#define B43_LPPHY_TX_PWR_CTL_TARGETPWR B43_PHY_OFDM(0xA7) /* TX Power Control TargetPower */
257#define B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT B43_PHY_OFDM(0xA8) /* TX Power Control DeltaPower Limit */
258#define B43_LPPHY_TX_PWR_CTL_BASEINDEX B43_PHY_OFDM(0xA9) /* TX Power Control BaseIndex */
259#define B43_LPPHY_TX_PWR_CTL_PWR_INDEX B43_PHY_OFDM(0xAA) /* TX Power Control Power Index */
260#define B43_LPPHY_TX_PWR_CTL_STAT B43_PHY_OFDM(0xAB) /* TX Power Control Status */
261#define B43_LPPHY_LP_RF_SIGNAL_LUT B43_PHY_OFDM(0xAC) /* LP RF signal LUT */
262#define B43_LPPHY_RX_RADIO_CTL_FILTER_STATE B43_PHY_OFDM(0xAD) /* RX Radio Control Filter State */
263#define B43_LPPHY_RX_RADIO_CTL B43_PHY_OFDM(0xAE) /* RX Radio Control */
264#define B43_LPPHY_NRSSI_STAT_ADDR B43_PHY_OFDM(0xAF) /* NRSSI status Address */
265#define B43_LPPHY_RF_OVERRIDE_2 B43_PHY_OFDM(0xB0) /* RF override 2 */
266#define B43_LPPHY_RF_OVERRIDE_2_VAL B43_PHY_OFDM(0xB1) /* RF override 2 val */
267#define B43_LPPHY_PS_CTL_OVERRIDE_VAL0 B43_PHY_OFDM(0xB2) /* PS Control override val0 */
268#define B43_LPPHY_PS_CTL_OVERRIDE_VAL1 B43_PHY_OFDM(0xB3) /* PS Control override val1 */
269#define B43_LPPHY_PS_CTL_OVERRIDE_VAL2 B43_PHY_OFDM(0xB4) /* PS Control override val2 */
270#define B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL B43_PHY_OFDM(0xB5) /* TX gain Control override val */
271#define B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL B43_PHY_OFDM(0xB6) /* RX gain Control override val */
272#define B43_LPPHY_AFE_DDFS B43_PHY_OFDM(0xB7) /* AFE DDFS */
273#define B43_LPPHY_AFE_DDFS_POINTER_INIT B43_PHY_OFDM(0xB8) /* AFE DDFS pointer init */
274#define B43_LPPHY_AFE_DDFS_INCR_INIT B43_PHY_OFDM(0xB9) /* AFE DDFS incr init */
275#define B43_LPPHY_MRCNOISEREDUCTION B43_PHY_OFDM(0xBA) /* mrcNoiseReduction */
276#define B43_LPPHY_TRLOOKUP3 B43_PHY_OFDM(0xBB) /* TRLookup3 */
277#define B43_LPPHY_TRLOOKUP4 B43_PHY_OFDM(0xBC) /* TRLookup4 */
278#define B43_LPPHY_RADAR_FIFO_STAT B43_PHY_OFDM(0xBD) /* Radar FIFO Status */
279#define B43_LPPHY_GPIO_OUTEN B43_PHY_OFDM(0xBE) /* GPIO Out enable */
280#define B43_LPPHY_GPIO_SELECT B43_PHY_OFDM(0xBF) /* GPIO Select */
281#define B43_LPPHY_GPIO_OUT B43_PHY_OFDM(0xC0) /* GPIO Out */
8 282
283
284
285/* Radio register access decorators. */
9#define B43_LP_RADIO(radio_reg) (radio_reg) 286#define B43_LP_RADIO(radio_reg) (radio_reg)
10#define B43_LP_NORTH(radio_reg) B43_LP_RADIO(radio_reg) 287#define B43_LP_NORTH(radio_reg) B43_LP_RADIO(radio_reg)
11#define B43_LP_SOUTH(radio_reg) B43_LP_RADIO((radio_reg) | 0x4000) 288#define B43_LP_SOUTH(radio_reg) B43_LP_RADIO((radio_reg) | 0x4000)
@@ -529,8 +806,58 @@
529 806
530 807
531 808
809enum b43_lpphy_txpctl_mode {
810 B43_LPPHY_TXPCTL_UNKNOWN = 0,
811 B43_LPPHY_TXPCTL_OFF, /* TX power control is OFF */
812 B43_LPPHY_TXPCTL_SW, /* TX power control is set to Software */
813 B43_LPPHY_TXPCTL_HW, /* TX power control is set to Hardware */
814};
815
532struct b43_phy_lp { 816struct b43_phy_lp {
533 //TODO 817 /* Current TX power control mode. */
818 enum b43_lpphy_txpctl_mode txpctl_mode;
819
820 /* Transmit isolation medium band */
821 u8 tx_isolation_med_band; /* FIXME initial value? */
822 /* Transmit isolation low band */
823 u8 tx_isolation_low_band; /* FIXME initial value? */
824 /* Transmit isolation high band */
825 u8 tx_isolation_hi_band; /* FIXME initial value? */
826
827 /* Receive power offset */
828 u8 rx_pwr_offset; /* FIXME initial value? */
829
830 /* TSSI transmit count */
831 u16 tssi_tx_count;
832 /* TSSI index */
833 u16 tssi_idx; /* FIXME initial value? */
834 /* TSSI npt */
835 u16 tssi_npt; /* FIXME initial value? */
836
837 /* Target TX frequency */
838 u16 tgt_tx_freq; /* FIXME initial value? */
839
840 /* Transmit power index override */
841 s8 tx_pwr_idx_over; /* FIXME initial value? */
842
843 /* RSSI vf */
844 u8 rssi_vf; /* FIXME initial value? */
845 /* RSSI vc */
846 u8 rssi_vc; /* FIXME initial value? */
847 /* RSSI gs */
848 u8 rssi_gs; /* FIXME initial value? */
849
850 /* RC cap */
851 u8 rc_cap; /* FIXME initial value? */
852 /* BX arch */
853 u8 bx_arch; /* FIXME initial value? */
854
855 /* Full calibration channel */
856 u8 full_calib_chan; /* FIXME initial value? */
857
858 /* Transmit iqlocal best coeffs */
859 bool tx_iqloc_best_coeffs_valid;
860 u8 tx_iqloc_best_coeffs[11];
534}; 861};
535 862
536 863
diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c
new file mode 100644
index 00000000000..4ea734dce21
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_lpphy.c
@@ -0,0 +1,394 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11g LP-PHY and radio device data tables
5
6 Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "tables_lpphy.h"
27#include "phy_common.h"
28#include "phy_lp.h"
29
30
31/* Entry of the 2062 radio init table */
32struct b2062_init_tab_entry {
33 u16 offset;
34 u16 value_a;
35 u16 value_g;
36 u8 flags;
37};
38#define B2062_FLAG_A 0x01 /* Flag: Init in A mode */
39#define B2062_FLAG_G 0x02 /* Flag: Init in G mode */
40
41static const struct b2062_init_tab_entry b2062_init_tab[] = {
42 /* { .offset = B2062_N_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
43 /* { .offset = 0x0001, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
44 /* { .offset = B2062_N_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
45 /* { .offset = B2062_N_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
46 { .offset = B2062_N_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
47 /* { .offset = B2062_N_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
48 /* { .offset = B2062_N_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
49 /* { .offset = B2062_N_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
50 /* { .offset = B2062_N_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
51 /* { .offset = B2062_N_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
52 /* { .offset = B2062_N_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
53 /* { .offset = B2062_N_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
54 /* { .offset = B2062_N_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
55 /* { .offset = B2062_N_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
56 /* { .offset = B2062_N_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
57 /* { .offset = B2062_N_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
58 /* { .offset = B2062_N_PDN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
59 { .offset = B2062_N_PDN_CTL1, .value_a = 0x0000, .value_g = 0x00CA, .flags = B2062_FLAG_G, },
60 /* { .offset = B2062_N_PDN_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */
61 { .offset = B2062_N_PDN_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
62 { .offset = B2062_N_PDN_CTL4, .value_a = 0x0015, .value_g = 0x002A, .flags = B2062_FLAG_A | B2062_FLAG_G, },
63 /* { .offset = B2062_N_GEN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
64 /* { .offset = B2062_N_IQ_CALIB, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
65 { .offset = B2062_N_LGENC, .value_a = 0x00DB, .value_g = 0x00FF, .flags = B2062_FLAG_A, },
66 /* { .offset = B2062_N_LGENA_LPF, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
67 /* { .offset = B2062_N_LGENA_BIAS0, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */
68 /* { .offset = B2062_N_LGNEA_BIAS1, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
69 /* { .offset = B2062_N_LGENA_CTL0, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
70 /* { .offset = B2062_N_LGENA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
71 /* { .offset = B2062_N_LGENA_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
72 { .offset = B2062_N_LGENA_TUNE0, .value_a = 0x00DD, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
73 /* { .offset = B2062_N_LGENA_TUNE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
74 { .offset = B2062_N_LGENA_TUNE2, .value_a = 0x00DD, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
75 { .offset = B2062_N_LGENA_TUNE3, .value_a = 0x0077, .value_g = 0x00B5, .flags = B2062_FLAG_A | B2062_FLAG_G, },
76 { .offset = B2062_N_LGENA_CTL3, .value_a = 0x0000, .value_g = 0x00FF, .flags = B2062_FLAG_A | B2062_FLAG_G, },
77 /* { .offset = B2062_N_LGENA_CTL4, .value_a = 0x001F, .value_g = 0x001F, .flags = 0, }, */
78 /* { .offset = B2062_N_LGENA_CTL5, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
79 /* { .offset = B2062_N_LGENA_CTL6, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
80 { .offset = B2062_N_LGENA_CTL7, .value_a = 0x0033, .value_g = 0x0033, .flags = B2062_FLAG_A | B2062_FLAG_G, },
81 /* { .offset = B2062_N_RXA_CTL0, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */
82 { .offset = B2062_N_RXA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = B2062_FLAG_G, },
83 /* { .offset = B2062_N_RXA_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */
84 /* { .offset = B2062_N_RXA_CTL3, .value_a = 0x0027, .value_g = 0x0027, .flags = 0, }, */
85 /* { .offset = B2062_N_RXA_CTL4, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */
86 /* { .offset = B2062_N_RXA_CTL5, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
87 /* { .offset = B2062_N_RXA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
88 /* { .offset = B2062_N_RXA_CTL7, .value_a = 0x0008, .value_g = 0x0008, .flags = 0, }, */
89 { .offset = B2062_N_RXBB_CTL0, .value_a = 0x0082, .value_g = 0x0080, .flags = B2062_FLAG_A | B2062_FLAG_G, },
90 /* { .offset = B2062_N_RXBB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
91 /* { .offset = B2062_N_RXBB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
92 /* { .offset = B2062_N_RXBB_GAIN0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
93 { .offset = B2062_N_RXBB_GAIN1, .value_a = 0x0004, .value_g = 0x0004, .flags = B2062_FLAG_A | B2062_FLAG_G, },
94 { .offset = B2062_N_RXBB_GAIN2, .value_a = 0x0000, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
95 /* { .offset = B2062_N_RXBB_GAIN3, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
96 /* { .offset = B2062_N_RXBB_RSSI0, .value_a = 0x0043, .value_g = 0x0043, .flags = 0, }, */
97 /* { .offset = B2062_N_RXBB_RSSI1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
98 /* { .offset = B2062_N_RXBB_CALIB0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */
99 /* { .offset = B2062_N_RXBB_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
100 /* { .offset = B2062_N_RXBB_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
101 /* { .offset = B2062_N_RXBB_BIAS0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
102 /* { .offset = B2062_N_RXBB_BIAS1, .value_a = 0x002A, .value_g = 0x002A, .flags = 0, }, */
103 /* { .offset = B2062_N_RXBB_BIAS2, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */
104 /* { .offset = B2062_N_RXBB_BIAS3, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */
105 /* { .offset = B2062_N_RXBB_BIAS4, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */
106 /* { .offset = B2062_N_RXBB_BIAS5, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */
107 /* { .offset = B2062_N_RXBB_RSSI2, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
108 /* { .offset = B2062_N_RXBB_RSSI3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
109 /* { .offset = B2062_N_RXBB_RSSI4, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
110 /* { .offset = B2062_N_RXBB_RSSI5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
111 /* { .offset = B2062_N_TX_CTL0, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
112 /* { .offset = B2062_N_TX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
113 /* { .offset = B2062_N_TX_CTL2, .value_a = 0x0084, .value_g = 0x0084, .flags = 0, }, */
114 /* { .offset = B2062_N_TX_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
115 { .offset = B2062_N_TX_CTL4, .value_a = 0x0003, .value_g = 0x0003, .flags = B2062_FLAG_A | B2062_FLAG_G, },
116 { .offset = B2062_N_TX_CTL5, .value_a = 0x0002, .value_g = 0x0002, .flags = B2062_FLAG_A | B2062_FLAG_G, },
117 /* { .offset = B2062_N_TX_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
118 /* { .offset = B2062_N_TX_CTL7, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */
119 /* { .offset = B2062_N_TX_CTL8, .value_a = 0x0082, .value_g = 0x0082, .flags = 0, }, */
120 /* { .offset = B2062_N_TX_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
121 /* { .offset = B2062_N_TX_CTL_A, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
122 /* { .offset = B2062_N_TX_GC2G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */
123 /* { .offset = B2062_N_TX_GC5G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */
124 { .offset = B2062_N_TX_TUNE, .value_a = 0x0088, .value_g = 0x001B, .flags = B2062_FLAG_A | B2062_FLAG_G, },
125 /* { .offset = B2062_N_TX_PAD, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
126 /* { .offset = B2062_N_TX_PGA, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
127 /* { .offset = B2062_N_TX_PADAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
128 /* { .offset = B2062_N_TX_PGAAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
129 /* { .offset = B2062_N_TSSI_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
130 /* { .offset = B2062_N_TSSI_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
131 /* { .offset = B2062_N_TSSI_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
132 /* { .offset = B2062_N_IQ_CALIB_CTL0, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
133 /* { .offset = B2062_N_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
134 /* { .offset = B2062_N_IQ_CALIB_CTL2, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
135 /* { .offset = B2062_N_CALIB_TS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
136 /* { .offset = B2062_N_CALIB_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
137 /* { .offset = B2062_N_CALIB_CTL1, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */
138 /* { .offset = B2062_N_CALIB_CTL2, .value_a = 0x000F, .value_g = 0x000F, .flags = 0, }, */
139 /* { .offset = B2062_N_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
140 /* { .offset = B2062_N_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
141 /* { .offset = B2062_N_CALIB_DBG0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
142 /* { .offset = B2062_N_CALIB_DBG1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
143 /* { .offset = B2062_N_CALIB_DBG2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
144 /* { .offset = B2062_N_CALIB_DBG3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
145 /* { .offset = B2062_N_PSENSE_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
146 /* { .offset = B2062_N_PSENSE_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
147 /* { .offset = B2062_N_PSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
148 /* { .offset = B2062_N_TEST_BUF0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
149 /* { .offset = B2062_S_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
150 /* { .offset = B2062_S_RADIO_ID_CODE, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
151 /* { .offset = B2062_S_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
152 /* { .offset = B2062_S_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
153 { .offset = B2062_S_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B2062_FLAG_A | B2062_FLAG_G, },
154 /* { .offset = B2062_S_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
155 /* { .offset = B2062_S_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
156 /* { .offset = B2062_S_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
157 /* { .offset = B2062_S_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
158 /* { .offset = B2062_S_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
159 /* { .offset = B2062_S_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
160 /* { .offset = B2062_S_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
161 /* { .offset = B2062_S_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
162 /* { .offset = B2062_S_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
163 /* { .offset = B2062_S_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
164 /* { .offset = B2062_S_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
165 { .offset = B2062_S_PDS_CTL0, .value_a = 0x00FF, .value_g = 0x00FF, .flags = B2062_FLAG_A | B2062_FLAG_G, },
166 /* { .offset = B2062_S_PDS_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
167 /* { .offset = B2062_S_PDS_CTL2, .value_a = 0x008E, .value_g = 0x008E, .flags = 0, }, */
168 /* { .offset = B2062_S_PDS_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
169 /* { .offset = B2062_S_BG_CTL0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
170 /* { .offset = B2062_S_BG_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
171 /* { .offset = B2062_S_BG_CTL2, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
172 { .offset = B2062_S_LGENG_CTL0, .value_a = 0x00F8, .value_g = 0x00D8, .flags = B2062_FLAG_A | B2062_FLAG_G, },
173 { .offset = B2062_S_LGENG_CTL1, .value_a = 0x003C, .value_g = 0x0024, .flags = B2062_FLAG_A | B2062_FLAG_G, },
174 /* { .offset = B2062_S_LGENG_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
175 /* { .offset = B2062_S_LGENG_CTL3, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */
176 /* { .offset = B2062_S_LGENG_CTL4, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
177 /* { .offset = B2062_S_LGENG_CTL5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
178 /* { .offset = B2062_S_LGENG_CTL6, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */
179 /* { .offset = B2062_S_LGENG_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
180 { .offset = B2062_S_LGENG_CTL8, .value_a = 0x0088, .value_g = 0x0080, .flags = B2062_FLAG_A | B2062_FLAG_G, },
181 /* { .offset = B2062_S_LGENG_CTL9, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
182 { .offset = B2062_S_LGENG_CTL10, .value_a = 0x0088, .value_g = 0x0080, .flags = B2062_FLAG_A | B2062_FLAG_G, },
183 /* { .offset = B2062_S_LGENG_CTL11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
184 /* { .offset = B2062_S_REFPLL_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
185 /* { .offset = B2062_S_REFPLL_CTL1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
186 /* { .offset = B2062_S_REFPLL_CTL2, .value_a = 0x00AF, .value_g = 0x00AF, .flags = 0, }, */
187 /* { .offset = B2062_S_REFPLL_CTL3, .value_a = 0x0012, .value_g = 0x0012, .flags = 0, }, */
188 /* { .offset = B2062_S_REFPLL_CTL4, .value_a = 0x000B, .value_g = 0x000B, .flags = 0, }, */
189 /* { .offset = B2062_S_REFPLL_CTL5, .value_a = 0x005F, .value_g = 0x005F, .flags = 0, }, */
190 /* { .offset = B2062_S_REFPLL_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
191 /* { .offset = B2062_S_REFPLL_CTL7, .value_a = 0x0040, .value_g = 0x0040, .flags = 0, }, */
192 /* { .offset = B2062_S_REFPLL_CTL8, .value_a = 0x0052, .value_g = 0x0052, .flags = 0, }, */
193 /* { .offset = B2062_S_REFPLL_CTL9, .value_a = 0x0026, .value_g = 0x0026, .flags = 0, }, */
194 /* { .offset = B2062_S_REFPLL_CTL10, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
195 /* { .offset = B2062_S_REFPLL_CTL11, .value_a = 0x0036, .value_g = 0x0036, .flags = 0, }, */
196 /* { .offset = B2062_S_REFPLL_CTL12, .value_a = 0x0057, .value_g = 0x0057, .flags = 0, }, */
197 /* { .offset = B2062_S_REFPLL_CTL13, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
198 /* { .offset = B2062_S_REFPLL_CTL14, .value_a = 0x0075, .value_g = 0x0075, .flags = 0, }, */
199 /* { .offset = B2062_S_REFPLL_CTL15, .value_a = 0x00B4, .value_g = 0x00B4, .flags = 0, }, */
200 /* { .offset = B2062_S_REFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
201 { .offset = B2062_S_RFPLL_CTL0, .value_a = 0x0098, .value_g = 0x0098, .flags = B2062_FLAG_A | B2062_FLAG_G, },
202 { .offset = B2062_S_RFPLL_CTL1, .value_a = 0x0010, .value_g = 0x0010, .flags = B2062_FLAG_A | B2062_FLAG_G, },
203 /* { .offset = B2062_S_RFPLL_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
204 /* { .offset = B2062_S_RFPLL_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
205 /* { .offset = B2062_S_RFPLL_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
206 { .offset = B2062_S_RFPLL_CTL5, .value_a = 0x0043, .value_g = 0x0043, .flags = B2062_FLAG_A | B2062_FLAG_G, },
207 { .offset = B2062_S_RFPLL_CTL6, .value_a = 0x0047, .value_g = 0x0047, .flags = B2062_FLAG_A | B2062_FLAG_G, },
208 { .offset = B2062_S_RFPLL_CTL7, .value_a = 0x000C, .value_g = 0x000C, .flags = B2062_FLAG_A | B2062_FLAG_G, },
209 { .offset = B2062_S_RFPLL_CTL8, .value_a = 0x0011, .value_g = 0x0011, .flags = B2062_FLAG_A | B2062_FLAG_G, },
210 { .offset = B2062_S_RFPLL_CTL9, .value_a = 0x0011, .value_g = 0x0011, .flags = B2062_FLAG_A | B2062_FLAG_G, },
211 { .offset = B2062_S_RFPLL_CTL10, .value_a = 0x000E, .value_g = 0x000E, .flags = B2062_FLAG_A | B2062_FLAG_G, },
212 { .offset = B2062_S_RFPLL_CTL11, .value_a = 0x0008, .value_g = 0x0008, .flags = B2062_FLAG_A | B2062_FLAG_G, },
213 { .offset = B2062_S_RFPLL_CTL12, .value_a = 0x0033, .value_g = 0x0033, .flags = B2062_FLAG_A | B2062_FLAG_G, },
214 { .offset = B2062_S_RFPLL_CTL13, .value_a = 0x000A, .value_g = 0x000A, .flags = B2062_FLAG_A | B2062_FLAG_G, },
215 { .offset = B2062_S_RFPLL_CTL14, .value_a = 0x0006, .value_g = 0x0006, .flags = B2062_FLAG_A | B2062_FLAG_G, },
216 /* { .offset = B2062_S_RFPLL_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
217 /* { .offset = B2062_S_RFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
218 /* { .offset = B2062_S_RFPLL_CTL17, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
219 { .offset = B2062_S_RFPLL_CTL18, .value_a = 0x003E, .value_g = 0x003E, .flags = B2062_FLAG_A | B2062_FLAG_G, },
220 { .offset = B2062_S_RFPLL_CTL19, .value_a = 0x0013, .value_g = 0x0013, .flags = B2062_FLAG_A | B2062_FLAG_G, },
221 /* { .offset = B2062_S_RFPLL_CTL20, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
222 { .offset = B2062_S_RFPLL_CTL21, .value_a = 0x0062, .value_g = 0x0062, .flags = B2062_FLAG_A | B2062_FLAG_G, },
223 { .offset = B2062_S_RFPLL_CTL22, .value_a = 0x0007, .value_g = 0x0007, .flags = B2062_FLAG_A | B2062_FLAG_G, },
224 { .offset = B2062_S_RFPLL_CTL23, .value_a = 0x0016, .value_g = 0x0016, .flags = B2062_FLAG_A | B2062_FLAG_G, },
225 { .offset = B2062_S_RFPLL_CTL24, .value_a = 0x005C, .value_g = 0x005C, .flags = B2062_FLAG_A | B2062_FLAG_G, },
226 { .offset = B2062_S_RFPLL_CTL25, .value_a = 0x0095, .value_g = 0x0095, .flags = B2062_FLAG_A | B2062_FLAG_G, },
227 /* { .offset = B2062_S_RFPLL_CTL26, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
228 /* { .offset = B2062_S_RFPLL_CTL27, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
229 /* { .offset = B2062_S_RFPLL_CTL28, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
230 /* { .offset = B2062_S_RFPLL_CTL29, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
231 { .offset = B2062_S_RFPLL_CTL30, .value_a = 0x00A0, .value_g = 0x00A0, .flags = B2062_FLAG_A | B2062_FLAG_G, },
232 { .offset = B2062_S_RFPLL_CTL31, .value_a = 0x0004, .value_g = 0x0004, .flags = B2062_FLAG_A | B2062_FLAG_G, },
233 /* { .offset = B2062_S_RFPLL_CTL32, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
234 { .offset = B2062_S_RFPLL_CTL33, .value_a = 0x00CC, .value_g = 0x00CC, .flags = B2062_FLAG_A | B2062_FLAG_G, },
235 { .offset = B2062_S_RFPLL_CTL34, .value_a = 0x0007, .value_g = 0x0007, .flags = B2062_FLAG_A | B2062_FLAG_G, },
236 /* { .offset = B2062_S_RXG_CNT0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */
237 /* { .offset = B2062_S_RXG_CNT1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
238 /* { .offset = B2062_S_RXG_CNT2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
239 /* { .offset = B2062_S_RXG_CNT3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
240 /* { .offset = B2062_S_RXG_CNT4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
241 /* { .offset = B2062_S_RXG_CNT5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
242 /* { .offset = B2062_S_RXG_CNT6, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
243 /* { .offset = B2062_S_RXG_CNT7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
244 { .offset = B2062_S_RXG_CNT8, .value_a = 0x000F, .value_g = 0x000F, .flags = B2062_FLAG_A, },
245 /* { .offset = B2062_S_RXG_CNT9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
246 /* { .offset = B2062_S_RXG_CNT10, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
247 /* { .offset = B2062_S_RXG_CNT11, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
248 /* { .offset = B2062_S_RXG_CNT12, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
249 /* { .offset = B2062_S_RXG_CNT13, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
250 /* { .offset = B2062_S_RXG_CNT14, .value_a = 0x00A0, .value_g = 0x00A0, .flags = 0, }, */
251 /* { .offset = B2062_S_RXG_CNT15, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */
252 /* { .offset = B2062_S_RXG_CNT16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
253 /* { .offset = B2062_S_RXG_CNT17, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
254};
255
256void b2062_upload_init_table(struct b43_wldev *dev)
257{
258 const struct b2062_init_tab_entry *e;
259 unsigned int i;
260
261 for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
262 e = &b2062_init_tab[i];
263 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
264 if (!(e->flags & B2062_FLAG_G))
265 continue;
266 b43_radio_write(dev, e->offset, e->value_g);
267 } else {
268 if (!(e->flags & B2062_FLAG_A))
269 continue;
270 b43_radio_write(dev, e->offset, e->value_a);
271 }
272 }
273}
274
275u32 b43_lptab_read(struct b43_wldev *dev, u32 offset)
276{
277 u32 type, value;
278
279 type = offset & B43_LPTAB_TYPEMASK;
280 offset &= ~B43_LPTAB_TYPEMASK;
281 B43_WARN_ON(offset > 0xFFFF);
282
283 switch (type) {
284 case B43_LPTAB_8BIT:
285 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
286 value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF;
287 break;
288 case B43_LPTAB_16BIT:
289 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
290 value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
291 break;
292 case B43_LPTAB_32BIT:
293 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
294 value = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI);
295 value <<= 16;
296 value |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
297 break;
298 default:
299 B43_WARN_ON(1);
300 value = 0;
301 }
302
303 return value;
304}
305
306void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset,
307 unsigned int nr_elements, void *_data)
308{
309 u32 type, value;
310 u8 *data = _data;
311 unsigned int i;
312
313 type = offset & B43_LPTAB_TYPEMASK;
314 for (i = 0; i < nr_elements; i++) {
315 value = b43_lptab_read(dev, offset);
316 switch (type) {
317 case B43_LPTAB_8BIT:
318 *data = value;
319 data++;
320 break;
321 case B43_LPTAB_16BIT:
322 *((u16 *)data) = value;
323 data += 2;
324 break;
325 case B43_LPTAB_32BIT:
326 *((u32 *)data) = value;
327 data += 4;
328 break;
329 default:
330 B43_WARN_ON(1);
331 }
332 offset++;
333 }
334}
335
336void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value)
337{
338 u32 type;
339
340 type = offset & B43_LPTAB_TYPEMASK;
341 offset &= ~B43_LPTAB_TYPEMASK;
342 B43_WARN_ON(offset > 0xFFFF);
343
344 switch (type) {
345 case B43_LPTAB_8BIT:
346 B43_WARN_ON(value & ~0xFF);
347 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
348 b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
349 break;
350 case B43_LPTAB_16BIT:
351 B43_WARN_ON(value & ~0xFFFF);
352 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
353 b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
354 break;
355 case B43_LPTAB_32BIT:
356 b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
357 b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16);
358 b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
359 break;
360 default:
361 B43_WARN_ON(1);
362 }
363}
364
365void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset,
366 unsigned int nr_elements, const void *_data)
367{
368 u32 type, value;
369 const u8 *data = _data;
370 unsigned int i;
371
372 type = offset & B43_LPTAB_TYPEMASK;
373 for (i = 0; i < nr_elements; i++) {
374 switch (type) {
375 case B43_LPTAB_8BIT:
376 value = *data;
377 data++;
378 break;
379 case B43_LPTAB_16BIT:
380 value = *((u16 *)data);
381 data += 2;
382 break;
383 case B43_LPTAB_32BIT:
384 value = *((u32 *)data);
385 data += 4;
386 break;
387 default:
388 B43_WARN_ON(1);
389 value = 0;
390 }
391 b43_lptab_write(dev, offset, value);
392 offset++;
393 }
394}
diff --git a/drivers/net/wireless/b43/tables_lpphy.h b/drivers/net/wireless/b43/tables_lpphy.h
new file mode 100644
index 00000000000..0b8d02895a5
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_lpphy.h
@@ -0,0 +1,31 @@
1#ifndef B43_TABLES_LPPHY_H_
2#define B43_TABLES_LPPHY_H_
3
4
5#define B43_LPTAB_TYPEMASK 0xF0000000
6#define B43_LPTAB_8BIT 0x10000000
7#define B43_LPTAB_16BIT 0x20000000
8#define B43_LPTAB_32BIT 0x30000000
9#define B43_LPTAB8(table, offset) (((table) << 10) | (offset) | B43_LPTAB_8BIT)
10#define B43_LPTAB16(table, offset) (((table) << 10) | (offset) | B43_LPTAB_16BIT)
11#define B43_LPTAB32(table, offset) (((table) << 10) | (offset) | B43_LPTAB_32BIT)
12
13/* Table definitions */
14#define B43_LPTAB_TXPWR_R2PLUS B43_LPTAB32(0x07, 0) /* TX power lookup table (rev >= 2) */
15#define B43_LPTAB_TXPWR_R0_1 B43_LPTAB32(0xA0, 0) /* TX power lookup table (rev < 2) */
16
17u32 b43_lptab_read(struct b43_wldev *dev, u32 offset);
18void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value);
19
20/* Bulk table access. Note that these functions return the bulk data in
21 * host endianness! The returned data is _not_ a bytearray, but an array
22 * consisting of nr_elements of the data type. */
23void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset,
24 unsigned int nr_elements, void *data);
25void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset,
26 unsigned int nr_elements, const void *data);
27
28void b2062_upload_init_table(struct b43_wldev *dev);
29
30
31#endif /* B43_TABLES_LPPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index cacb786d971..3ea55b18c70 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -146,12 +146,12 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
146 case B43legacy_LED_TRANSFER: 146 case B43legacy_LED_TRANSFER:
147 case B43legacy_LED_APTRANSFER: 147 case B43legacy_LED_APTRANSFER:
148 snprintf(name, sizeof(name), 148 snprintf(name, sizeof(name),
149 "b43legacy-%s:tx", wiphy_name(hw->wiphy)); 149 "b43legacy-%s::tx", wiphy_name(hw->wiphy));
150 b43legacy_register_led(dev, &dev->led_tx, name, 150 b43legacy_register_led(dev, &dev->led_tx, name,
151 ieee80211_get_tx_led_name(hw), 151 ieee80211_get_tx_led_name(hw),
152 led_index, activelow); 152 led_index, activelow);
153 snprintf(name, sizeof(name), 153 snprintf(name, sizeof(name),
154 "b43legacy-%s:rx", wiphy_name(hw->wiphy)); 154 "b43legacy-%s::rx", wiphy_name(hw->wiphy));
155 b43legacy_register_led(dev, &dev->led_rx, name, 155 b43legacy_register_led(dev, &dev->led_rx, name,
156 ieee80211_get_rx_led_name(hw), 156 ieee80211_get_rx_led_name(hw),
157 led_index, activelow); 157 led_index, activelow);
@@ -161,7 +161,7 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
161 case B43legacy_LED_RADIO_B: 161 case B43legacy_LED_RADIO_B:
162 case B43legacy_LED_MODE_BG: 162 case B43legacy_LED_MODE_BG:
163 snprintf(name, sizeof(name), 163 snprintf(name, sizeof(name),
164 "b43legacy-%s:radio", wiphy_name(hw->wiphy)); 164 "b43legacy-%s::radio", wiphy_name(hw->wiphy));
165 b43legacy_register_led(dev, &dev->led_radio, name, 165 b43legacy_register_led(dev, &dev->led_radio, name,
166 b43legacy_rfkill_led_name(dev), 166 b43legacy_rfkill_led_name(dev),
167 led_index, activelow); 167 led_index, activelow);
@@ -172,7 +172,7 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
172 case B43legacy_LED_WEIRD: 172 case B43legacy_LED_WEIRD:
173 case B43legacy_LED_ASSOC: 173 case B43legacy_LED_ASSOC:
174 snprintf(name, sizeof(name), 174 snprintf(name, sizeof(name),
175 "b43legacy-%s:assoc", wiphy_name(hw->wiphy)); 175 "b43legacy-%s::assoc", wiphy_name(hw->wiphy));
176 b43legacy_register_led(dev, &dev->led_assoc, name, 176 b43legacy_register_led(dev, &dev->led_assoc, name,
177 ieee80211_get_assoc_led_name(hw), 177 ieee80211_get_assoc_led_name(hw),
178 led_index, activelow); 178 led_index, activelow);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index fb996c27a19..879edc78671 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2650,7 +2650,7 @@ out_unlock_mutex:
2650 return err; 2650 return err;
2651} 2651}
2652 2652
2653static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u64 brates) 2653static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
2654{ 2654{
2655 struct ieee80211_supported_band *sband = 2655 struct ieee80211_supported_band *sband =
2656 dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 2656 dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 19b1bf0478b..241756318da 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -193,7 +193,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
193 if (prism_header) 193 if (prism_header)
194 skb_pull(skb, phdrlen); 194 skb_pull(skb, phdrlen);
195 skb->pkt_type = PACKET_OTHERHOST; 195 skb->pkt_type = PACKET_OTHERHOST;
196 skb->protocol = __constant_htons(ETH_P_802_2); 196 skb->protocol = cpu_to_be16(ETH_P_802_2);
197 memset(skb->cb, 0, sizeof(skb->cb)); 197 memset(skb->cb, 0, sizeof(skb->cb));
198 netif_rx(skb); 198 netif_rx(skb);
199 199
@@ -1094,7 +1094,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
1094 if (skb2 != NULL) { 1094 if (skb2 != NULL) {
1095 /* send to wireless media */ 1095 /* send to wireless media */
1096 skb2->dev = dev; 1096 skb2->dev = dev;
1097 skb2->protocol = __constant_htons(ETH_P_802_3); 1097 skb2->protocol = cpu_to_be16(ETH_P_802_3);
1098 skb_reset_mac_header(skb2); 1098 skb_reset_mac_header(skb2);
1099 skb_reset_network_header(skb2); 1099 skb_reset_network_header(skb2);
1100 /* skb2->network_header += ETH_HLEN; */ 1100 /* skb2->network_header += ETH_HLEN; */
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 0903db786d5..0a4bf94dddf 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -609,7 +609,7 @@ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
609 skb->dev = ap->local->apdev; 609 skb->dev = ap->local->apdev;
610 skb_pull(skb, hostap_80211_get_hdrlen(fc)); 610 skb_pull(skb, hostap_80211_get_hdrlen(fc));
611 skb->pkt_type = PACKET_OTHERHOST; 611 skb->pkt_type = PACKET_OTHERHOST;
612 skb->protocol = __constant_htons(ETH_P_802_2); 612 skb->protocol = cpu_to_be16(ETH_P_802_2);
613 memset(skb->cb, 0, sizeof(skb->cb)); 613 memset(skb->cb, 0, sizeof(skb->cb));
614 netif_rx(skb); 614 netif_rx(skb);
615} 615}
@@ -2281,7 +2281,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
2281 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_BEACON) 2281 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_BEACON)
2282 goto drop; 2282 goto drop;
2283 2283
2284 skb->protocol = __constant_htons(ETH_P_HOSTAP); 2284 skb->protocol = cpu_to_be16(ETH_P_HOSTAP);
2285 handle_ap_item(local, skb, rx_stats); 2285 handle_ap_item(local, skb, rx_stats);
2286 return; 2286 return;
2287 2287
@@ -2310,7 +2310,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2310 hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16); 2310 hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16);
2311 2311
2312 /* Generate a fake pspoll frame to start packet delivery */ 2312 /* Generate a fake pspoll frame to start packet delivery */
2313 hdr->frame_ctl = __constant_cpu_to_le16( 2313 hdr->frame_ctl = cpu_to_le16(
2314 IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 2314 IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
2315 memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN); 2315 memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN);
2316 memcpy(hdr->addr2, sta->addr, ETH_ALEN); 2316 memcpy(hdr->addr2, sta->addr, ETH_ALEN);
@@ -2754,7 +2754,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2754 if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) { 2754 if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) {
2755 /* indicate to STA that more frames follow */ 2755 /* indicate to STA that more frames follow */
2756 hdr->frame_ctl |= 2756 hdr->frame_ctl |=
2757 __constant_cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2757 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2758 } 2758 }
2759 2759
2760 if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) { 2760 if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) {
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index c40fdf4c79d..8618b3355eb 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1638,7 +1638,7 @@ static int prism2_request_hostscan(struct net_device *dev,
1638 memset(&scan_req, 0, sizeof(scan_req)); 1638 memset(&scan_req, 0, sizeof(scan_req));
1639 scan_req.channel_list = cpu_to_le16(local->channel_mask & 1639 scan_req.channel_list = cpu_to_le16(local->channel_mask &
1640 local->scan_channel_mask); 1640 local->scan_channel_mask);
1641 scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS); 1641 scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
1642 if (ssid) { 1642 if (ssid) {
1643 if (ssid_len > 32) 1643 if (ssid_len > 32)
1644 return -EINVAL; 1644 return -EINVAL;
@@ -1668,7 +1668,7 @@ static int prism2_request_scan(struct net_device *dev)
1668 memset(&scan_req, 0, sizeof(scan_req)); 1668 memset(&scan_req, 0, sizeof(scan_req));
1669 scan_req.channel_list = cpu_to_le16(local->channel_mask & 1669 scan_req.channel_list = cpu_to_le16(local->channel_mask &
1670 local->scan_channel_mask); 1670 local->scan_channel_mask);
1671 scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS); 1671 scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
1672 1672
1673 /* FIX: 1673 /* FIX:
1674 * It seems to be enough to set roaming mode for a short moment to 1674 * It seems to be enough to set roaming mode for a short moment to
@@ -2514,7 +2514,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
2514 u16 rate; 2514 u16 rate;
2515 2515
2516 memset(&scan_req, 0, sizeof(scan_req)); 2516 memset(&scan_req, 0, sizeof(scan_req));
2517 scan_req.channel_list = __constant_cpu_to_le16(0x3fff); 2517 scan_req.channel_list = cpu_to_le16(0x3fff);
2518 switch (value) { 2518 switch (value) {
2519 case 1: rate = HFA384X_RATES_1MBPS; break; 2519 case 1: rate = HFA384X_RATES_1MBPS; break;
2520 case 2: rate = HFA384X_RATES_2MBPS; break; 2520 case 2: rate = HFA384X_RATES_2MBPS; break;
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 3d5cc4463d4..1d5dc3e9c5f 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -150,6 +150,7 @@ config IPW2200_DEBUG
150 150
151config LIBIPW 151config LIBIPW
152 tristate 152 tristate
153 depends on PCI && WLAN_80211
153 select WIRELESS_EXT 154 select WIRELESS_EXT
154 select CRYPTO 155 select CRYPTO
155 select CRYPTO_ARC4 156 select CRYPTO_ARC4
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 625f2cf99fa..0420d3d35dd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -8272,7 +8272,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8272 skb_reset_mac_header(skb); 8272 skb_reset_mac_header(skb);
8273 8273
8274 skb->pkt_type = PACKET_OTHERHOST; 8274 skb->pkt_type = PACKET_OTHERHOST;
8275 skb->protocol = __constant_htons(ETH_P_80211_STATS); 8275 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8276 memset(skb->cb, 0, sizeof(rxb->skb->cb)); 8276 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8277 netif_rx(skb); 8277 netif_rx(skb);
8278 rxb->skb = NULL; 8278 rxb->skb = NULL;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 47bee0ee0a7..7b3bad1796c 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,25 +1,26 @@
1config IWLWIFI 1config IWLWIFI
2 tristate 2 bool "Intel Wireless Wifi"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 default y
3 5
4config IWLCORE 6config IWLCORE
5 tristate "Intel Wireless Wifi Core" 7 tristate "Intel Wireless Wifi Core"
6 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 8 depends on IWLWIFI
7 select LIB80211 9 select LIB80211
8 select IWLWIFI
9 select MAC80211_LEDS if IWLWIFI_LEDS 10 select MAC80211_LEDS if IWLWIFI_LEDS
10 select LEDS_CLASS if IWLWIFI_LEDS 11 select LEDS_CLASS if IWLWIFI_LEDS
11 select RFKILL if IWLWIFI_RFKILL 12 select RFKILL if IWLWIFI_RFKILL
12 13
13config IWLWIFI_LEDS 14config IWLWIFI_LEDS
14 bool 15 bool "Enable LED support in iwlagn driver"
15 default n 16 depends on IWLCORE
16 17
17config IWLWIFI_RFKILL 18config IWLWIFI_RFKILL
18 boolean "Iwlwifi RF kill support" 19 bool "Enable RF kill support in iwlagn driver"
19 depends on IWLCORE 20 depends on IWLCORE
20 21
21config IWLWIFI_DEBUG 22config IWLWIFI_DEBUG
22 bool "Enable full debugging output in iwlagn driver" 23 bool "Enable full debugging output in iwlagn and iwl3945 drivers"
23 depends on IWLCORE 24 depends on IWLCORE
24 ---help--- 25 ---help---
25 This option will enable debug tracing output for the iwlwifi drivers 26 This option will enable debug tracing output for the iwlwifi drivers
@@ -51,7 +52,7 @@ config IWLWIFI_DEBUGFS
51 52
52config IWLAGN 53config IWLAGN
53 tristate "Intel Wireless WiFi Next Gen AGN" 54 tristate "Intel Wireless WiFi Next Gen AGN"
54 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 55 depends on IWLWIFI
55 select FW_LOADER 56 select FW_LOADER
56 select IWLCORE 57 select IWLCORE
57 ---help--- 58 ---help---
@@ -104,13 +105,12 @@ config IWL5000
104 105
105config IWL3945 106config IWL3945
106 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" 107 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
107 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 108 depends on IWLWIFI
108 select FW_LOADER 109 select FW_LOADER
109 select LIB80211 110 select LIB80211
110 select IWLWIFI
111 select MAC80211_LEDS if IWL3945_LEDS 111 select MAC80211_LEDS if IWL3945_LEDS
112 select LEDS_CLASS if IWL3945_LEDS 112 select LEDS_CLASS if IWL3945_LEDS
113 select RFKILL if IWL3945_RFKILL 113 select RFKILL if IWLWIFI_RFKILL
114 ---help--- 114 ---help---
115 Select to build the driver supporting the: 115 Select to build the driver supporting the:
116 116
@@ -133,10 +133,6 @@ config IWL3945
133 say M here and read <file:Documentation/kbuild/modules.txt>. The 133 say M here and read <file:Documentation/kbuild/modules.txt>. The
134 module will be called iwl3945.ko. 134 module will be called iwl3945.ko.
135 135
136config IWL3945_RFKILL
137 bool "Enable RF kill support in iwl3945 drivers"
138 depends on IWL3945
139
140config IWL3945_SPECTRUM_MEASUREMENT 136config IWL3945_SPECTRUM_MEASUREMENT
141 bool "Enable Spectrum Measurement in iwl3945 drivers" 137 bool "Enable Spectrum Measurement in iwl3945 drivers"
142 depends on IWL3945 138 depends on IWL3945
@@ -148,30 +144,3 @@ config IWL3945_LEDS
148 depends on IWL3945 144 depends on IWL3945
149 ---help--- 145 ---help---
150 This option enables LEDS for the iwl3945 driver. 146 This option enables LEDS for the iwl3945 driver.
151
152config IWL3945_DEBUG
153 bool "Enable full debugging output in iwl3945 driver"
154 depends on IWL3945
155 ---help---
156 This option will enable debug tracing output for the iwl3945
157 driver.
158
159 This will result in the kernel module being ~100k larger. You can
160 control which debug output is sent to the kernel log by setting the
161 value in
162
163 /sys/bus/pci/drivers/${DRIVER}/debug_level
164
165 This entry will only exist if this option is enabled.
166
167 To set a value, simply echo an 8-byte hex value to the same file:
168
169 % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level
170
171 You can find the list of debug mask values in:
172 drivers/net/wireless/iwlwifi/iwl-3945-debug.h
173
174 If this is your first time using this driver, you should say Y here
175 as the debug information can assist others in helping you resolve
176 any problems you may encounter.
177
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 0be9e6b66aa..ddc8b31b260 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,10 +8,12 @@ iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
8iwlcore-$(CONFIG_IWLAGN_SPECTRUM_MEASUREMENT) += iwl-spectrum.o 8iwlcore-$(CONFIG_IWLAGN_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
9 9
10obj-$(CONFIG_IWLAGN) += iwlagn.o 10obj-$(CONFIG_IWLAGN) += iwlagn.o
11iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-hcmd-check.o 11iwlagn-objs := iwl-agn.o iwl-agn-rs.o
12 12
13iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 13iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
14iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 14iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
15iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
16iwlagn-$(CONFIG_IWL5000) += iwl-100.o
15 17
16obj-$(CONFIG_IWL3945) += iwl3945.o 18obj-$(CONFIG_IWL3945) += iwl3945.o
17iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o 19iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-100.c b/drivers/net/wireless/iwlwifi/iwl-100.c
new file mode 100644
index 00000000000..11d206abb71
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-100.c
@@ -0,0 +1,73 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h"
47
48/* Highest firmware API version supported */
49#define IWL100_UCODE_API_MAX 2
50
51/* Lowest firmware API version supported */
52#define IWL100_UCODE_API_MIN 1
53
54#define IWL100_FW_PRE "iwlwifi-100-"
55#define _IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
56#define IWL100_MODULE_FIRMWARE(api) _IWL100_MODULE_FIRMWARE(api)
57
58struct iwl_cfg iwl100_bgn_cfg = {
59 .name = "100 Series BGN",
60 .fw_name_pre = IWL100_FW_PRE,
61 .ucode_api_max = IWL100_UCODE_API_MAX,
62 .ucode_api_min = IWL100_UCODE_API_MIN,
63 .sku = IWL_SKU_G|IWL_SKU_N,
64 .ops = &iwl5000_ops,
65 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
66 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
67 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
68 .mod_params = &iwl50_mod_params,
69 .valid_tx_ant = ANT_A,
70 .valid_rx_ant = ANT_AB,
71 .need_pll_cfg = true,
72};
73
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h b/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
deleted file mode 100644
index c6f4eb54a2b..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
+++ /dev/null
@@ -1,1702 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-commands.h) only for uCode API definitions.
65 * Please use iwl-3945-hw.h for hardware-related definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_commands_h__
70#define __iwl_3945_commands_h__
71
72/* uCode version contains 4 values: Major/Minor/API/Serial */
73#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
74#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
75#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
76#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
77
78enum {
79 REPLY_ALIVE = 0x1,
80 REPLY_ERROR = 0x2,
81
82 /* RXON and QOS commands */
83 REPLY_RXON = 0x10,
84 REPLY_RXON_ASSOC = 0x11,
85 REPLY_QOS_PARAM = 0x13,
86 REPLY_RXON_TIMING = 0x14,
87
88 /* Multi-Station support */
89 REPLY_ADD_STA = 0x18,
90 REPLY_REMOVE_STA = 0x19, /* not used */
91 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
92
93 /* RX, TX, LEDs */
94 REPLY_3945_RX = 0x1b, /* 3945 only */
95 REPLY_TX = 0x1c,
96 REPLY_RATE_SCALE = 0x47, /* 3945 only */
97 REPLY_LEDS_CMD = 0x48,
98 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
99
100 /* 802.11h related */
101 RADAR_NOTIFICATION = 0x70, /* not used */
102 REPLY_QUIET_CMD = 0x71, /* not used */
103 REPLY_CHANNEL_SWITCH = 0x72,
104 CHANNEL_SWITCH_NOTIFICATION = 0x73,
105 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
106 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
107
108 /* Power Management */
109 POWER_TABLE_CMD = 0x77,
110 PM_SLEEP_NOTIFICATION = 0x7A,
111 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
112
113 /* Scan commands and notifications */
114 REPLY_SCAN_CMD = 0x80,
115 REPLY_SCAN_ABORT_CMD = 0x81,
116 SCAN_START_NOTIFICATION = 0x82,
117 SCAN_RESULTS_NOTIFICATION = 0x83,
118 SCAN_COMPLETE_NOTIFICATION = 0x84,
119
120 /* IBSS/AP commands */
121 BEACON_NOTIFICATION = 0x90,
122 REPLY_TX_BEACON = 0x91,
123 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
124
125 /* Miscellaneous commands */
126 QUIET_NOTIFICATION = 0x96, /* not used */
127 REPLY_TX_PWR_TABLE_CMD = 0x97,
128 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
129
130 /* Bluetooth device coexistence config command */
131 REPLY_BT_CONFIG = 0x9b,
132
133 /* Statistics */
134 REPLY_STATISTICS_CMD = 0x9c,
135 STATISTICS_NOTIFICATION = 0x9d,
136
137 /* RF-KILL commands and notifications */
138 REPLY_CARD_STATE_CMD = 0xa0,
139 CARD_STATE_NOTIFICATION = 0xa1,
140
141 /* Missed beacons notification */
142 MISSED_BEACONS_NOTIFICATION = 0xa2,
143
144 REPLY_MAX = 0xff
145};
146
147/******************************************************************************
148 * (0)
149 * Commonly used structures and definitions:
150 * Command header, txpower
151 *
152 *****************************************************************************/
153
154/* iwl3945_cmd_header flags value */
155#define IWL_CMD_FAILED_MSK 0x40
156
157/**
158 * struct iwl3945_cmd_header
159 *
160 * This header format appears in the beginning of each command sent from the
161 * driver, and each response/notification received from uCode.
162 */
163struct iwl3945_cmd_header {
164 u8 cmd; /* Command ID: REPLY_RXON, etc. */
165 u8 flags; /* IWL_CMD_* */
166 /*
167 * The driver sets up the sequence number to values of its choosing.
168 * uCode does not use this value, but passes it back to the driver
169 * when sending the response to each driver-originated command, so
170 * the driver can match the response to the command. Since the values
171 * don't get used by uCode, the driver may set up an arbitrary format.
172 *
173 * There is one exception: uCode sets bit 15 when it originates
174 * the response/notification, i.e. when the response/notification
175 * is not a direct response to a command sent by the driver. For
176 * example, uCode issues REPLY_3945_RX when it sends a received frame
177 * to the driver; it is not a direct response to any driver command.
178 *
179 * The Linux driver uses the following format:
180 *
181 * 0:7 index/position within Tx queue
182 * 8:13 Tx queue selection
183 * 14:14 driver sets this to indicate command is in the 'huge'
184 * storage at the end of the command buffers, i.e. scan cmd
185 * 15:15 uCode sets this in uCode-originated response/notification
186 */
187 __le16 sequence;
188
189 /* command or response/notification data follows immediately */
190 u8 data[0];
191} __attribute__ ((packed));
192
193/**
194 * struct iwl3945_tx_power
195 *
196 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
197 *
198 * Each entry contains two values:
199 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
200 * linear value that multiplies the output of the digital signal processor,
201 * before being sent to the analog radio.
202 * 2) Radio gain. This sets the analog gain of the radio Tx path.
203 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
204 *
205 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
206 */
207struct iwl3945_tx_power {
208 u8 tx_gain; /* gain for analog radio */
209 u8 dsp_atten; /* gain for DSP */
210} __attribute__ ((packed));
211
212/**
213 * struct iwl3945_power_per_rate
214 *
215 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
216 */
217struct iwl3945_power_per_rate {
218 u8 rate; /* plcp */
219 struct iwl3945_tx_power tpc;
220 u8 reserved;
221} __attribute__ ((packed));
222
223/******************************************************************************
224 * (0a)
225 * Alive and Error Commands & Responses:
226 *
227 *****************************************************************************/
228
229#define UCODE_VALID_OK cpu_to_le32(0x1)
230#define INITIALIZE_SUBTYPE (9)
231
232/*
233 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
234 *
235 * uCode issues this "initialize alive" notification once the initialization
236 * uCode image has completed its work, and is ready to load the runtime image.
237 * This is the *first* "alive" notification that the driver will receive after
238 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
239 *
240 * See comments documenting "BSM" (bootstrap state machine).
241 */
242struct iwl3945_init_alive_resp {
243 u8 ucode_minor;
244 u8 ucode_major;
245 __le16 reserved1;
246 u8 sw_rev[8];
247 u8 ver_type;
248 u8 ver_subtype; /* "9" for initialize alive */
249 __le16 reserved2;
250 __le32 log_event_table_ptr;
251 __le32 error_event_table_ptr;
252 __le32 timestamp;
253 __le32 is_valid;
254} __attribute__ ((packed));
255
256
257/**
258 * REPLY_ALIVE = 0x1 (response only, not a command)
259 *
260 * uCode issues this "alive" notification once the runtime image is ready
261 * to receive commands from the driver. This is the *second* "alive"
262 * notification that the driver will receive after rebooting uCode;
263 * this "alive" is indicated by subtype field != 9.
264 *
265 * See comments documenting "BSM" (bootstrap state machine).
266 *
267 * This response includes two pointers to structures within the device's
268 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
269 *
270 * 1) log_event_table_ptr indicates base of the event log. This traces
271 * a 256-entry history of uCode execution within a circular buffer.
272 *
273 * 2) error_event_table_ptr indicates base of the error log. This contains
274 * information about any uCode error that occurs.
275 *
276 * The Linux driver can print both logs to the system log when a uCode error
277 * occurs.
278 */
279struct iwl3945_alive_resp {
280 u8 ucode_minor;
281 u8 ucode_major;
282 __le16 reserved1;
283 u8 sw_rev[8];
284 u8 ver_type;
285 u8 ver_subtype; /* not "9" for runtime alive */
286 __le16 reserved2;
287 __le32 log_event_table_ptr; /* SRAM address for event log */
288 __le32 error_event_table_ptr; /* SRAM address for error log */
289 __le32 timestamp;
290 __le32 is_valid;
291} __attribute__ ((packed));
292
293union tsf {
294 u8 byte[8];
295 __le16 word[4];
296 __le32 dw[2];
297};
298
299/*
300 * REPLY_ERROR = 0x2 (response only, not a command)
301 */
302struct iwl3945_error_resp {
303 __le32 error_type;
304 u8 cmd_id;
305 u8 reserved1;
306 __le16 bad_cmd_seq_num;
307 __le16 reserved2;
308 __le32 error_info;
309 union tsf timestamp;
310} __attribute__ ((packed));
311
312/******************************************************************************
313 * (1)
314 * RXON Commands & Responses:
315 *
316 *****************************************************************************/
317
318/*
319 * Rx config defines & structure
320 */
321/* rx_config device types */
322enum {
323 RXON_DEV_TYPE_AP = 1,
324 RXON_DEV_TYPE_ESS = 3,
325 RXON_DEV_TYPE_IBSS = 4,
326 RXON_DEV_TYPE_SNIFFER = 6,
327};
328
329/* rx_config flags */
330/* band & modulation selection */
331#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
332#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
333/* auto detection enable */
334#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
335/* TGg protection when tx */
336#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
337/* cck short slot & preamble */
338#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
339#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
340/* antenna selection */
341#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
342#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
343#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
344#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
345/* radar detection enable */
346#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
347#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
348/* rx response to host with 8-byte TSF
349* (according to ON_AIR deassertion) */
350#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
351
352/* rx_config filter flags */
353/* accept all data frames */
354#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
355/* pass control & management to host */
356#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
357/* accept multi-cast */
358#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
359/* don't decrypt uni-cast frames */
360#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
361/* don't decrypt multi-cast frames */
362#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
363/* STA is associated */
364#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
365/* transfer to host non bssid beacons in associated state */
366#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
367
368/**
369 * REPLY_RXON = 0x10 (command, has simple generic response)
370 *
371 * RXON tunes the radio tuner to a service channel, and sets up a number
372 * of parameters that are used primarily for Rx, but also for Tx operations.
373 *
374 * NOTE: When tuning to a new channel, driver must set the
375 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
376 * info within the device, including the station tables, tx retry
377 * rate tables, and txpower tables. Driver must build a new station
378 * table and txpower table before transmitting anything on the RXON
379 * channel.
380 *
381 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
382 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
383 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
384 */
385struct iwl3945_rxon_cmd {
386 u8 node_addr[6];
387 __le16 reserved1;
388 u8 bssid_addr[6];
389 __le16 reserved2;
390 u8 wlap_bssid_addr[6];
391 __le16 reserved3;
392 u8 dev_type;
393 u8 air_propagation;
394 __le16 reserved4;
395 u8 ofdm_basic_rates;
396 u8 cck_basic_rates;
397 __le16 assoc_id;
398 __le32 flags;
399 __le32 filter_flags;
400 __le16 channel;
401 __le16 reserved5;
402} __attribute__ ((packed));
403
404/*
405 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
406 */
407struct iwl3945_rxon_assoc_cmd {
408 __le32 flags;
409 __le32 filter_flags;
410 u8 ofdm_basic_rates;
411 u8 cck_basic_rates;
412 __le16 reserved;
413} __attribute__ ((packed));
414
415/*
416 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
417 */
418struct iwl3945_rxon_time_cmd {
419 union tsf timestamp;
420 __le16 beacon_interval;
421 __le16 atim_window;
422 __le32 beacon_init_val;
423 __le16 listen_interval;
424 __le16 reserved;
425} __attribute__ ((packed));
426
427/*
428 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
429 */
430struct iwl3945_channel_switch_cmd {
431 u8 band;
432 u8 expect_beacon;
433 __le16 channel;
434 __le32 rxon_flags;
435 __le32 rxon_filter_flags;
436 __le32 switch_time;
437 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
438} __attribute__ ((packed));
439
440/*
441 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
442 */
443struct iwl3945_csa_notification {
444 __le16 band;
445 __le16 channel;
446 __le32 status; /* 0 - OK, 1 - fail */
447} __attribute__ ((packed));
448
449/******************************************************************************
450 * (2)
451 * Quality-of-Service (QOS) Commands & Responses:
452 *
453 *****************************************************************************/
454
455/**
456 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
457 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
458 *
459 * @cw_min: Contention window, start value in numbers of slots.
460 * Should be a power-of-2, minus 1. Device's default is 0x0f.
461 * @cw_max: Contention window, max value in numbers of slots.
462 * Should be a power-of-2, minus 1. Device's default is 0x3f.
463 * @aifsn: Number of slots in Arbitration Interframe Space (before
464 * performing random backoff timing prior to Tx). Device default 1.
465 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
466 *
467 * Device will automatically increase contention window by (2*CW) + 1 for each
468 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
469 * value, to cap the CW value.
470 */
471struct iwl3945_ac_qos {
472 __le16 cw_min;
473 __le16 cw_max;
474 u8 aifsn;
475 u8 reserved1;
476 __le16 edca_txop;
477} __attribute__ ((packed));
478
479/* QoS flags defines */
480#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
481#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
482#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
483
484/* Number of Access Categories (AC) (EDCA), queues 0..3 */
485#define AC_NUM 4
486
487/*
488 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
489 *
490 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
491 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
492 */
493struct iwl3945_qosparam_cmd {
494 __le32 qos_flags;
495 struct iwl3945_ac_qos ac[AC_NUM];
496} __attribute__ ((packed));
497
498/******************************************************************************
499 * (3)
500 * Add/Modify Stations Commands & Responses:
501 *
502 *****************************************************************************/
503/*
504 * Multi station support
505 */
506
507/* Special, dedicated locations within device's station table */
508#define IWL_AP_ID 0
509#define IWL_MULTICAST_ID 1
510#define IWL_STA_ID 2
511#define IWL3945_BROADCAST_ID 24
512#define IWL3945_STATION_COUNT 25
513
514#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
515#define IWL_INVALID_STATION 255
516
517#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2);
518#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8);
519
520/* Use in mode field. 1: modify existing entry, 0: add new station entry */
521#define STA_CONTROL_MODIFY_MSK 0x01
522
523/* key flags __le16*/
524#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
525#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
526#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
527#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
528#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
529
530#define STA_KEY_FLG_KEYID_POS 8
531#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
532/* wep key is either from global key (0) or from station info array (1) */
533#define STA_KEY_FLG_WEP_KEY_MAP_MSK cpu_to_le16(0x0008)
534
535/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
536#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
537#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
538
539/* Flags indicate whether to modify vs. don't change various station params */
540#define STA_MODIFY_KEY_MASK 0x01
541#define STA_MODIFY_TID_DISABLE_TX 0x02
542#define STA_MODIFY_TX_RATE_MSK 0x04
543
544/*
545 * Antenna masks:
546 * bit14:15 01 B inactive, A active
547 * 10 B active, A inactive
548 * 11 Both active
549 */
550#define RATE_MCS_ANT_A_POS 14
551#define RATE_MCS_ANT_B_POS 15
552#define RATE_MCS_ANT_A_MSK 0x4000
553#define RATE_MCS_ANT_B_MSK 0x8000
554#define RATE_MCS_ANT_AB_MSK 0xc000
555
556struct iwl3945_keyinfo {
557 __le16 key_flags;
558 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
559 u8 reserved1;
560 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
561 u8 key_offset;
562 u8 reserved2;
563 u8 key[16]; /* 16-byte unicast decryption key */
564} __attribute__ ((packed));
565
566/**
567 * struct sta_id_modify
568 * @addr[ETH_ALEN]: station's MAC address
569 * @sta_id: index of station in uCode's station table
570 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
571 *
572 * Driver selects unused table index when adding new station,
573 * or the index to a pre-existing station entry when modifying that station.
574 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
575 *
576 * modify_mask flags select which parameters to modify vs. leave alone.
577 */
578struct sta_id_modify {
579 u8 addr[ETH_ALEN];
580 __le16 reserved1;
581 u8 sta_id;
582 u8 modify_mask;
583 __le16 reserved2;
584} __attribute__ ((packed));
585
586/*
587 * REPLY_ADD_STA = 0x18 (command)
588 *
589 * The device contains an internal table of per-station information,
590 * with info on security keys, aggregation parameters, and Tx rates for
591 * initial Tx attempt and any retries (4965 uses REPLY_TX_LINK_QUALITY_CMD,
592 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
593 *
594 * REPLY_ADD_STA sets up the table entry for one station, either creating
595 * a new entry, or modifying a pre-existing one.
596 *
597 * NOTE: RXON command (without "associated" bit set) wipes the station table
598 * clean. Moving into RF_KILL state does this also. Driver must set up
599 * new station table before transmitting anything on the RXON channel
600 * (except active scans or active measurements; those commands carry
601 * their own txpower/rate setup data).
602 *
603 * When getting started on a new channel, driver must set up the
604 * IWL_BROADCAST_ID entry (last entry in the table). For a client
605 * station in a BSS, once an AP is selected, driver sets up the AP STA
606 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
607 * are all that are needed for a BSS client station. If the device is
608 * used as AP, or in an IBSS network, driver must set up station table
609 * entries for all STAs in network, starting with index IWL_STA_ID.
610 */
611struct iwl3945_addsta_cmd {
612 u8 mode; /* 1: modify existing, 0: add new station */
613 u8 reserved[3];
614 struct sta_id_modify sta;
615 struct iwl3945_keyinfo key;
616 __le32 station_flags; /* STA_FLG_* */
617 __le32 station_flags_msk; /* STA_FLG_* */
618
619 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
620 * corresponding to bit (e.g. bit 5 controls TID 5).
621 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
622 __le16 tid_disable_tx;
623
624 __le16 rate_n_flags;
625
626 /* TID for which to add block-ack support.
627 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
628 u8 add_immediate_ba_tid;
629
630 /* TID for which to remove block-ack support.
631 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
632 u8 remove_immediate_ba_tid;
633
634 /* Starting Sequence Number for added block-ack support.
635 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
636 __le16 add_immediate_ba_ssn;
637} __attribute__ ((packed));
638
639#define ADD_STA_SUCCESS_MSK 0x1
640#define ADD_STA_NO_ROOM_IN_TABLE 0x2
641#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
642/*
643 * REPLY_ADD_STA = 0x18 (response)
644 */
645struct iwl3945_add_sta_resp {
646 u8 status; /* ADD_STA_* */
647} __attribute__ ((packed));
648
649
650/******************************************************************************
651 * (4)
652 * Rx Responses:
653 *
654 *****************************************************************************/
655
656struct iwl3945_rx_frame_stats {
657 u8 phy_count;
658 u8 id;
659 u8 rssi;
660 u8 agc;
661 __le16 sig_avg;
662 __le16 noise_diff;
663 u8 payload[0];
664} __attribute__ ((packed));
665
666struct iwl3945_rx_frame_hdr {
667 __le16 channel;
668 __le16 phy_flags;
669 u8 reserved1;
670 u8 rate;
671 __le16 len;
672 u8 payload[0];
673} __attribute__ ((packed));
674
675#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
676#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
677
678#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
679#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
680#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
681#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
682#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0)
683
684#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
685#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
686#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
687#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
688#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
689
690#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
691#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
692#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
693#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
694#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
695
696struct iwl3945_rx_frame_end {
697 __le32 status;
698 __le64 timestamp;
699 __le32 beacon_timestamp;
700} __attribute__ ((packed));
701
702/*
703 * REPLY_3945_RX = 0x1b (response only, not a command)
704 *
705 * NOTE: DO NOT dereference from casts to this structure
706 * It is provided only for calculating minimum data set size.
707 * The actual offsets of the hdr and end are dynamic based on
708 * stats.phy_count
709 */
710struct iwl3945_rx_frame {
711 struct iwl3945_rx_frame_stats stats;
712 struct iwl3945_rx_frame_hdr hdr;
713 struct iwl3945_rx_frame_end end;
714} __attribute__ ((packed));
715
716/******************************************************************************
717 * (5)
718 * Tx Commands & Responses:
719 *
720 * Driver must place each REPLY_TX command into one of the prioritized Tx
721 * queues in host DRAM, shared between driver and device. When the device's
722 * Tx scheduler and uCode are preparing to transmit, the device pulls the
723 * Tx command over the PCI bus via one of the device's Tx DMA channels,
724 * to fill an internal FIFO from which data will be transmitted.
725 *
726 * uCode handles all timing and protocol related to control frames
727 * (RTS/CTS/ACK), based on flags in the Tx command.
728 *
729 * uCode handles retrying Tx when an ACK is expected but not received.
730 * This includes trying lower data rates than the one requested in the Tx
731 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
732 * REPLY_TX_LINK_QUALITY_CMD (4965).
733 *
734 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
735 * This command must be executed after every RXON command, before Tx can occur.
736 *****************************************************************************/
737
738/* REPLY_TX Tx flags field */
739
740/* 1: Use Request-To-Send protocol before this frame.
741 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
742#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
743
744/* 1: Transmit Clear-To-Send to self before this frame.
745 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
746 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */
747#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
748
749/* 1: Expect ACK from receiving station
750 * 0: Don't expect ACK (MAC header's duration field s/b 0)
751 * Set this for unicast frames, but not broadcast/multicast. */
752#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
753
754/* 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
755 * Tx command's initial_rate_index indicates first rate to try;
756 * uCode walks through table for additional Tx attempts.
757 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
758 * This rate will be used for all Tx attempts; it will not be scaled. */
759#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
760
761/* 1: Expect immediate block-ack.
762 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
763#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
764
765/* 1: Frame requires full Tx-Op protection.
766 * Set this if either RTS or CTS Tx Flag gets set. */
767#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
768
769/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
770 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
771#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
772#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
773#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
774
775/* 1: Ignore Bluetooth priority for this frame.
776 * 0: Delay Tx until Bluetooth device is done (normal usage). */
777#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
778
779/* 1: uCode overrides sequence control field in MAC header.
780 * 0: Driver provides sequence control field in MAC header.
781 * Set this for management frames, non-QOS data frames, non-unicast frames,
782 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
783#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
784
785/* 1: This frame is non-last MPDU; more fragments are coming.
786 * 0: Last fragment, or not using fragmentation. */
787#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
788
789/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
790 * 0: No TSF required in outgoing frame.
791 * Set this for transmitting beacons and probe responses. */
792#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
793
794/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
795 * alignment of frame's payload data field.
796 * 0: No pad
797 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
798 * field (but not both). Driver must align frame data (i.e. data following
799 * MAC header) to DWORD boundary. */
800#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
801
802/* HCCA-AP - disable duration overwriting. */
803#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
804
805/*
806 * TX command security control
807 */
808#define TX_CMD_SEC_WEP 0x01
809#define TX_CMD_SEC_CCM 0x02
810#define TX_CMD_SEC_TKIP 0x03
811#define TX_CMD_SEC_MSK 0x03
812#define TX_CMD_SEC_SHIFT 6
813#define TX_CMD_SEC_KEY128 0x08
814
815/*
816 * REPLY_TX = 0x1c (command)
817 */
818struct iwl3945_tx_cmd {
819 /*
820 * MPDU byte count:
821 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
822 * + 8 byte IV for CCM or TKIP (not used for WEP)
823 * + Data payload
824 * + 8-byte MIC (not used for CCM/WEP)
825 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
826 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
827 * Range: 14-2342 bytes.
828 */
829 __le16 len;
830
831 /*
832 * MPDU or MSDU byte count for next frame.
833 * Used for fragmentation and bursting, but not 11n aggregation.
834 * Same as "len", but for next frame. Set to 0 if not applicable.
835 */
836 __le16 next_frame_len;
837
838 __le32 tx_flags; /* TX_CMD_FLG_* */
839
840 u8 rate;
841
842 /* Index of recipient station in uCode's station table */
843 u8 sta_id;
844 u8 tid_tspec;
845 u8 sec_ctl;
846 u8 key[16];
847 union {
848 u8 byte[8];
849 __le16 word[4];
850 __le32 dw[2];
851 } tkip_mic;
852 __le32 next_frame_info;
853 union {
854 __le32 life_time;
855 __le32 attempt;
856 } stop_time;
857 u8 supp_rates[2];
858 u8 rts_retry_limit; /*byte 50 */
859 u8 data_retry_limit; /*byte 51 */
860 union {
861 __le16 pm_frame_timeout;
862 __le16 attempt_duration;
863 } timeout;
864
865 /*
866 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
867 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
868 */
869 __le16 driver_txop;
870
871 /*
872 * MAC header goes here, followed by 2 bytes padding if MAC header
873 * length is 26 or 30 bytes, followed by payload data
874 */
875 u8 payload[0];
876 struct ieee80211_hdr hdr[0];
877} __attribute__ ((packed));
878
879/* TX command response is sent after *all* transmission attempts.
880 *
881 * NOTES:
882 *
883 * TX_STATUS_FAIL_NEXT_FRAG
884 *
885 * If the fragment flag in the MAC header for the frame being transmitted
886 * is set and there is insufficient time to transmit the next frame, the
887 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
888 *
889 * TX_STATUS_FIFO_UNDERRUN
890 *
891 * Indicates the host did not provide bytes to the FIFO fast enough while
892 * a TX was in progress.
893 *
894 * TX_STATUS_FAIL_MGMNT_ABORT
895 *
896 * This status is only possible if the ABORT ON MGMT RX parameter was
897 * set to true with the TX command.
898 *
899 * If the MSB of the status parameter is set then an abort sequence is
900 * required. This sequence consists of the host activating the TX Abort
901 * control line, and then waiting for the TX Abort command response. This
902 * indicates that a the device is no longer in a transmit state, and that the
903 * command FIFO has been cleared. The host must then deactivate the TX Abort
904 * control line. Receiving is still allowed in this case.
905 */
906enum {
907 TX_STATUS_SUCCESS = 0x01,
908 TX_STATUS_DIRECT_DONE = 0x02,
909 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
910 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
911 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
912 TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
913 TX_STATUS_FAIL_NEXT_FRAG = 0x86,
914 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
915 TX_STATUS_FAIL_DEST_PS = 0x88,
916 TX_STATUS_FAIL_ABORTED = 0x89,
917 TX_STATUS_FAIL_BT_RETRY = 0x8a,
918 TX_STATUS_FAIL_STA_INVALID = 0x8b,
919 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
920 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
921 TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
922 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
923 TX_STATUS_FAIL_TX_LOCKED = 0x90,
924 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
925};
926
927#define TX_PACKET_MODE_REGULAR 0x0000
928#define TX_PACKET_MODE_BURST_SEQ 0x0100
929#define TX_PACKET_MODE_BURST_FIRST 0x0200
930
931enum {
932 TX_POWER_PA_NOT_ACTIVE = 0x0,
933};
934
935enum {
936 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
937 TX_STATUS_DELAY_MSK = 0x00000040,
938 TX_STATUS_ABORT_MSK = 0x00000080,
939 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
940 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
941 TX_RESERVED = 0x00780000, /* bits 19:22 */
942 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
943 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
944};
945
946/*
947 * REPLY_TX = 0x1c (response)
948 */
949struct iwl3945_tx_resp {
950 u8 failure_rts;
951 u8 failure_frame;
952 u8 bt_kill_count;
953 u8 rate;
954 __le32 wireless_media_time;
955 __le32 status; /* TX status */
956} __attribute__ ((packed));
957
958/*
959 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
960 */
961struct iwl3945_txpowertable_cmd {
962 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
963 u8 reserved;
964 __le16 channel;
965 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
966} __attribute__ ((packed));
967
968struct iwl3945_rate_scaling_info {
969 __le16 rate_n_flags;
970 u8 try_cnt;
971 u8 next_rate_index;
972} __attribute__ ((packed));
973
974/**
975 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
976 *
977 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
978 *
979 * NOTE: The table of rates passed to the uCode via the
980 * RATE_SCALE command sets up the corresponding order of
981 * rates used for all related commands, including rate
982 * masks, etc.
983 *
984 * For example, if you set 9MB (PLCP 0x0f) as the first
985 * rate in the rate table, the bit mask for that rate
986 * when passed through ofdm_basic_rates on the REPLY_RXON
987 * command would be bit 0 (1 << 0)
988 */
989struct iwl3945_rate_scaling_cmd {
990 u8 table_id;
991 u8 reserved[3];
992 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
993} __attribute__ ((packed));
994
995/*
996 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
997 *
998 * 3945 and 4965 support hardware handshake with Bluetooth device on
999 * same platform. Bluetooth device alerts wireless device when it will Tx;
1000 * wireless device can delay or kill its own Tx to accommodate.
1001 */
1002struct iwl3945_bt_cmd {
1003 u8 flags;
1004 u8 lead_time;
1005 u8 max_kill;
1006 u8 reserved;
1007 __le32 kill_ack_mask;
1008 __le32 kill_cts_mask;
1009} __attribute__ ((packed));
1010
1011/******************************************************************************
1012 * (6)
1013 * Spectrum Management (802.11h) Commands, Responses, Notifications:
1014 *
1015 *****************************************************************************/
1016
1017/*
1018 * Spectrum Management
1019 */
1020#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
1021 RXON_FILTER_CTL2HOST_MSK | \
1022 RXON_FILTER_ACCEPT_GRP_MSK | \
1023 RXON_FILTER_DIS_DECRYPT_MSK | \
1024 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
1025 RXON_FILTER_ASSOC_MSK | \
1026 RXON_FILTER_BCON_AWARE_MSK)
1027
1028struct iwl3945_measure_channel {
1029 __le32 duration; /* measurement duration in extended beacon
1030 * format */
1031 u8 channel; /* channel to measure */
1032 u8 type; /* see enum iwl3945_measure_type */
1033 __le16 reserved;
1034} __attribute__ ((packed));
1035
1036/*
1037 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
1038 */
1039struct iwl3945_spectrum_cmd {
1040 __le16 len; /* number of bytes starting from token */
1041 u8 token; /* token id */
1042 u8 id; /* measurement id -- 0 or 1 */
1043 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
1044 u8 periodic; /* 1 = periodic */
1045 __le16 path_loss_timeout;
1046 __le32 start_time; /* start time in extended beacon format */
1047 __le32 reserved2;
1048 __le32 flags; /* rxon flags */
1049 __le32 filter_flags; /* rxon filter flags */
1050 __le16 channel_count; /* minimum 1, maximum 10 */
1051 __le16 reserved3;
1052 struct iwl3945_measure_channel channels[10];
1053} __attribute__ ((packed));
1054
1055/*
1056 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
1057 */
1058struct iwl3945_spectrum_resp {
1059 u8 token;
1060 u8 id; /* id of the prior command replaced, or 0xff */
1061 __le16 status; /* 0 - command will be handled
1062 * 1 - cannot handle (conflicts with another
1063 * measurement) */
1064} __attribute__ ((packed));
1065
1066enum iwl3945_measurement_state {
1067 IWL_MEASUREMENT_START = 0,
1068 IWL_MEASUREMENT_STOP = 1,
1069};
1070
1071enum iwl3945_measurement_status {
1072 IWL_MEASUREMENT_OK = 0,
1073 IWL_MEASUREMENT_CONCURRENT = 1,
1074 IWL_MEASUREMENT_CSA_CONFLICT = 2,
1075 IWL_MEASUREMENT_TGH_CONFLICT = 3,
1076 /* 4-5 reserved */
1077 IWL_MEASUREMENT_STOPPED = 6,
1078 IWL_MEASUREMENT_TIMEOUT = 7,
1079 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
1080};
1081
1082#define NUM_ELEMENTS_IN_HISTOGRAM 8
1083
1084struct iwl3945_measurement_histogram {
1085 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
1086 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
1087} __attribute__ ((packed));
1088
1089/* clear channel availability counters */
1090struct iwl3945_measurement_cca_counters {
1091 __le32 ofdm;
1092 __le32 cck;
1093} __attribute__ ((packed));
1094
1095enum iwl3945_measure_type {
1096 IWL_MEASURE_BASIC = (1 << 0),
1097 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
1098 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
1099 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
1100 IWL_MEASURE_FRAME = (1 << 4),
1101 /* bits 5:6 are reserved */
1102 IWL_MEASURE_IDLE = (1 << 7),
1103};
1104
1105/*
1106 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
1107 */
1108struct iwl3945_spectrum_notification {
1109 u8 id; /* measurement id -- 0 or 1 */
1110 u8 token;
1111 u8 channel_index; /* index in measurement channel list */
1112 u8 state; /* 0 - start, 1 - stop */
1113 __le32 start_time; /* lower 32-bits of TSF */
1114 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
1115 u8 channel;
1116 u8 type; /* see enum iwl3945_measurement_type */
1117 u8 reserved1;
1118 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
1119 * valid if applicable for measurement type requested. */
1120 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
1121 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
1122 __le32 cca_time; /* channel load time in usecs */
1123 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
1124 * unidentified */
1125 u8 reserved2[3];
1126 struct iwl3945_measurement_histogram histogram;
1127 __le32 stop_time; /* lower 32-bits of TSF */
1128 __le32 status; /* see iwl3945_measurement_status */
1129} __attribute__ ((packed));
1130
1131/******************************************************************************
1132 * (7)
1133 * Power Management Commands, Responses, Notifications:
1134 *
1135 *****************************************************************************/
1136
1137/**
1138 * struct iwl3945_powertable_cmd - Power Table Command
1139 * @flags: See below:
1140 *
1141 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
1142 *
1143 * PM allow:
1144 * bit 0 - '0' Driver not allow power management
1145 * '1' Driver allow PM (use rest of parameters)
1146 * uCode send sleep notifications:
1147 * bit 1 - '0' Don't send sleep notification
1148 * '1' send sleep notification (SEND_PM_NOTIFICATION)
1149 * Sleep over DTIM
1150 * bit 2 - '0' PM have to walk up every DTIM
1151 * '1' PM could sleep over DTIM till listen Interval.
1152 * PCI power managed
1153 * bit 3 - '0' (PCI_LINK_CTRL & 0x1)
1154 * '1' !(PCI_LINK_CTRL & 0x1)
1155 * Force sleep Modes
1156 * bit 31/30- '00' use both mac/xtal sleeps
1157 * '01' force Mac sleep
1158 * '10' force xtal sleep
1159 * '11' Illegal set
1160 *
1161 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
1162 * ucode assume sleep over DTIM is allowed and we don't need to wakeup
1163 * for every DTIM.
1164 */
1165#define IWL_POWER_VEC_SIZE 5
1166
1167#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le32(1 << 0)
1168#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le32(1 << 2)
1169#define IWL_POWER_PCI_PM_MSK cpu_to_le32(1 << 3)
1170struct iwl3945_powertable_cmd {
1171 __le32 flags;
1172 __le32 rx_data_timeout;
1173 __le32 tx_data_timeout;
1174 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
1175} __attribute__((packed));
1176
1177/*
1178 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
1179 * 3945 and 4965 identical.
1180 */
1181struct iwl3945_sleep_notification {
1182 u8 pm_sleep_mode;
1183 u8 pm_wakeup_src;
1184 __le16 reserved;
1185 __le32 sleep_time;
1186 __le32 tsf_low;
1187 __le32 bcon_timer;
1188} __attribute__ ((packed));
1189
1190/* Sleep states. 3945 and 4965 identical. */
1191enum {
1192 IWL_PM_NO_SLEEP = 0,
1193 IWL_PM_SLP_MAC = 1,
1194 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
1195 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
1196 IWL_PM_SLP_PHY = 4,
1197 IWL_PM_SLP_REPENT = 5,
1198 IWL_PM_WAKEUP_BY_TIMER = 6,
1199 IWL_PM_WAKEUP_BY_DRIVER = 7,
1200 IWL_PM_WAKEUP_BY_RFKILL = 8,
1201 /* 3 reserved */
1202 IWL_PM_NUM_OF_MODES = 12,
1203};
1204
1205/*
1206 * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
1207 */
1208#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
1209#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
1210#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
1211struct iwl3945_card_state_cmd {
1212 __le32 status; /* CARD_STATE_CMD_* request new power state */
1213} __attribute__ ((packed));
1214
1215/*
1216 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
1217 */
1218struct iwl3945_card_state_notif {
1219 __le32 flags;
1220} __attribute__ ((packed));
1221
1222#define HW_CARD_DISABLED 0x01
1223#define SW_CARD_DISABLED 0x02
1224#define RF_CARD_DISABLED 0x04
1225#define RXON_CARD_DISABLED 0x10
1226
1227struct iwl3945_ct_kill_config {
1228 __le32 reserved;
1229 __le32 critical_temperature_M;
1230 __le32 critical_temperature_R;
1231} __attribute__ ((packed));
1232
1233/******************************************************************************
1234 * (8)
1235 * Scan Commands, Responses, Notifications:
1236 *
1237 *****************************************************************************/
1238
1239/**
1240 * struct iwl3945_scan_channel - entry in REPLY_SCAN_CMD channel table
1241 *
1242 * One for each channel in the scan list.
1243 * Each channel can independently select:
1244 * 1) SSID for directed active scans
1245 * 2) Txpower setting (for rate specified within Tx command)
1246 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
1247 * quiet_plcp_th, good_CRC_th)
1248 *
1249 * To avoid uCode errors, make sure the following are true (see comments
1250 * under struct iwl3945_scan_cmd about max_out_time and quiet_time):
1251 * 1) If using passive_dwell (i.e. passive_dwell != 0):
1252 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
1253 * 2) quiet_time <= active_dwell
1254 * 3) If restricting off-channel time (i.e. max_out_time !=0):
1255 * passive_dwell < max_out_time
1256 * active_dwell < max_out_time
1257 */
1258struct iwl3945_scan_channel {
1259 /*
1260 * type is defined as:
1261 * 0:0 1 = active, 0 = passive
1262 * 1:4 SSID direct bit map; if a bit is set, then corresponding
1263 * SSID IE is transmitted in probe request.
1264 * 5:7 reserved
1265 */
1266 u8 type;
1267 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
1268 struct iwl3945_tx_power tpc;
1269 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
1270 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
1271} __attribute__ ((packed));
1272
1273/**
1274 * struct iwl3945_ssid_ie - directed scan network information element
1275 *
1276 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field
1277 * in struct iwl3945_scan_channel; each channel may select different ssids from
1278 * among the 4 entries. SSID IEs get transmitted in reverse order of entry.
1279 */
1280struct iwl3945_ssid_ie {
1281 u8 id;
1282 u8 len;
1283 u8 ssid[32];
1284} __attribute__ ((packed));
1285
1286#define PROBE_OPTION_MAX 0x4
1287#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
1288#define IWL_GOOD_CRC_TH cpu_to_le16(1)
1289#define IWL_MAX_SCAN_SIZE 1024
1290
1291/*
1292 * REPLY_SCAN_CMD = 0x80 (command)
1293 *
1294 * The hardware scan command is very powerful; the driver can set it up to
1295 * maintain (relatively) normal network traffic while doing a scan in the
1296 * background. The max_out_time and suspend_time control the ratio of how
1297 * long the device stays on an associated network channel ("service channel")
1298 * vs. how long it's away from the service channel, tuned to other channels
1299 * for scanning.
1300 *
1301 * max_out_time is the max time off-channel (in usec), and suspend_time
1302 * is how long (in "extended beacon" format) that the scan is "suspended"
1303 * after returning to the service channel. That is, suspend_time is the
1304 * time that we stay on the service channel, doing normal work, between
1305 * scan segments. The driver may set these parameters differently to support
1306 * scanning when associated vs. not associated, and light vs. heavy traffic
1307 * loads when associated.
1308 *
1309 * After receiving this command, the device's scan engine does the following;
1310 *
1311 * 1) Sends SCAN_START notification to driver
1312 * 2) Checks to see if it has time to do scan for one channel
1313 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
1314 * to tell AP that we're going off-channel
1315 * 4) Tunes to first channel in scan list, does active or passive scan
1316 * 5) Sends SCAN_RESULT notification to driver
1317 * 6) Checks to see if it has time to do scan on *next* channel in list
1318 * 7) Repeats 4-6 until it no longer has time to scan the next channel
1319 * before max_out_time expires
1320 * 8) Returns to service channel
1321 * 9) Sends NULL packet with PS=0 to tell AP that we're back
1322 * 10) Stays on service channel until suspend_time expires
1323 * 11) Repeats entire process 2-10 until list is complete
1324 * 12) Sends SCAN_COMPLETE notification
1325 *
1326 * For fast, efficient scans, the scan command also has support for staying on
1327 * a channel for just a short time, if doing active scanning and getting no
1328 * responses to the transmitted probe request. This time is controlled by
1329 * quiet_time, and the number of received packets below which a channel is
1330 * considered "quiet" is controlled by quiet_plcp_threshold.
1331 *
1332 * For active scanning on channels that have regulatory restrictions against
1333 * blindly transmitting, the scan can listen before transmitting, to make sure
1334 * that there is already legitimate activity on the channel. If enough
1335 * packets are cleanly received on the channel (controlled by good_CRC_th,
1336 * typical value 1), the scan engine starts transmitting probe requests.
1337 *
1338 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
1339 *
1340 * To avoid uCode errors, see timing restrictions described under
1341 * struct iwl3945_scan_channel.
1342 */
1343struct iwl3945_scan_cmd {
1344 __le16 len;
1345 u8 reserved0;
1346 u8 channel_count; /* # channels in channel list */
1347 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
1348 * (only for active scan) */
1349 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
1350 __le16 good_CRC_th; /* passive -> active promotion threshold */
1351 __le16 reserved1;
1352 __le32 max_out_time; /* max usec to be away from associated (service)
1353 * channel */
1354 __le32 suspend_time; /* pause scan this long (in "extended beacon
1355 * format") when returning to service channel:
1356 * 3945; 31:24 # beacons, 19:0 additional usec,
1357 * 4965; 31:22 # beacons, 21:0 additional usec.
1358 */
1359 __le32 flags; /* RXON_FLG_* */
1360 __le32 filter_flags; /* RXON_FILTER_* */
1361
1362 /* For active scans (set to all-0s for passive scans).
1363 * Does not include payload. Must specify Tx rate; no rate scaling. */
1364 struct iwl3945_tx_cmd tx_cmd;
1365
1366 /* For directed active scans (set to all-0s otherwise) */
1367 struct iwl3945_ssid_ie direct_scan[PROBE_OPTION_MAX];
1368
1369 /*
1370 * Probe request frame, followed by channel list.
1371 *
1372 * Size of probe request frame is specified by byte count in tx_cmd.
1373 * Channel list follows immediately after probe request frame.
1374 * Number of channels in list is specified by channel_count.
1375 * Each channel in list is of type:
1376 *
1377 * struct iwl3945_scan_channel channels[0];
1378 *
1379 * NOTE: Only one band of channels can be scanned per pass. You
1380 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
1381 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
1382 * before requesting another scan.
1383 */
1384 u8 data[0];
1385} __attribute__ ((packed));
1386
1387/* Can abort will notify by complete notification with abort status. */
1388#define CAN_ABORT_STATUS cpu_to_le32(0x1)
1389/* complete notification statuses */
1390#define ABORT_STATUS 0x2
1391
1392/*
1393 * REPLY_SCAN_CMD = 0x80 (response)
1394 */
1395struct iwl3945_scanreq_notification {
1396 __le32 status; /* 1: okay, 2: cannot fulfill request */
1397} __attribute__ ((packed));
1398
1399/*
1400 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
1401 */
1402struct iwl3945_scanstart_notification {
1403 __le32 tsf_low;
1404 __le32 tsf_high;
1405 __le32 beacon_timer;
1406 u8 channel;
1407 u8 band;
1408 u8 reserved[2];
1409 __le32 status;
1410} __attribute__ ((packed));
1411
1412#define SCAN_OWNER_STATUS 0x1;
1413#define MEASURE_OWNER_STATUS 0x2;
1414
1415#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
1416/*
1417 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
1418 */
1419struct iwl3945_scanresults_notification {
1420 u8 channel;
1421 u8 band;
1422 u8 reserved[2];
1423 __le32 tsf_low;
1424 __le32 tsf_high;
1425 __le32 statistics[NUMBER_OF_STATISTICS];
1426} __attribute__ ((packed));
1427
1428/*
1429 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
1430 */
1431struct iwl3945_scancomplete_notification {
1432 u8 scanned_channels;
1433 u8 status;
1434 u8 reserved;
1435 u8 last_channel;
1436 __le32 tsf_low;
1437 __le32 tsf_high;
1438} __attribute__ ((packed));
1439
1440
1441/******************************************************************************
1442 * (9)
1443 * IBSS/AP Commands and Notifications:
1444 *
1445 *****************************************************************************/
1446
1447/*
1448 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
1449 */
1450struct iwl3945_beacon_notif {
1451 struct iwl3945_tx_resp beacon_notify_hdr;
1452 __le32 low_tsf;
1453 __le32 high_tsf;
1454 __le32 ibss_mgr_status;
1455} __attribute__ ((packed));
1456
1457/*
1458 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
1459 */
1460struct iwl3945_tx_beacon_cmd {
1461 struct iwl3945_tx_cmd tx;
1462 __le16 tim_idx;
1463 u8 tim_size;
1464 u8 reserved1;
1465 struct ieee80211_hdr frame[0]; /* beacon frame */
1466} __attribute__ ((packed));
1467
1468/******************************************************************************
1469 * (10)
1470 * Statistics Commands and Notifications:
1471 *
1472 *****************************************************************************/
1473
1474#define IWL_TEMP_CONVERT 260
1475
1476#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
1477#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
1478#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
1479
1480/* Used for passing to driver number of successes and failures per rate */
1481struct rate_histogram {
1482 union {
1483 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
1484 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
1485 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
1486 } success;
1487 union {
1488 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
1489 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
1490 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
1491 } failed;
1492} __attribute__ ((packed));
1493
1494/* statistics command response */
1495
1496struct statistics_rx_phy {
1497 __le32 ina_cnt;
1498 __le32 fina_cnt;
1499 __le32 plcp_err;
1500 __le32 crc32_err;
1501 __le32 overrun_err;
1502 __le32 early_overrun_err;
1503 __le32 crc32_good;
1504 __le32 false_alarm_cnt;
1505 __le32 fina_sync_err_cnt;
1506 __le32 sfd_timeout;
1507 __le32 fina_timeout;
1508 __le32 unresponded_rts;
1509 __le32 rxe_frame_limit_overrun;
1510 __le32 sent_ack_cnt;
1511 __le32 sent_cts_cnt;
1512} __attribute__ ((packed));
1513
1514struct statistics_rx_non_phy {
1515 __le32 bogus_cts; /* CTS received when not expecting CTS */
1516 __le32 bogus_ack; /* ACK received when not expecting ACK */
1517 __le32 non_bssid_frames; /* number of frames with BSSID that
1518 * doesn't belong to the STA BSSID */
1519 __le32 filtered_frames; /* count frames that were dumped in the
1520 * filtering process */
1521 __le32 non_channel_beacons; /* beacons with our bss id but not on
1522 * our serving channel */
1523} __attribute__ ((packed));
1524
1525struct statistics_rx {
1526 struct statistics_rx_phy ofdm;
1527 struct statistics_rx_phy cck;
1528 struct statistics_rx_non_phy general;
1529} __attribute__ ((packed));
1530
1531struct statistics_tx {
1532 __le32 preamble_cnt;
1533 __le32 rx_detected_cnt;
1534 __le32 bt_prio_defer_cnt;
1535 __le32 bt_prio_kill_cnt;
1536 __le32 few_bytes_cnt;
1537 __le32 cts_timeout;
1538 __le32 ack_timeout;
1539 __le32 expected_ack_cnt;
1540 __le32 actual_ack_cnt;
1541} __attribute__ ((packed));
1542
1543struct statistics_dbg {
1544 __le32 burst_check;
1545 __le32 burst_count;
1546 __le32 reserved[4];
1547} __attribute__ ((packed));
1548
1549struct statistics_div {
1550 __le32 tx_on_a;
1551 __le32 tx_on_b;
1552 __le32 exec_time;
1553 __le32 probe_time;
1554} __attribute__ ((packed));
1555
1556struct statistics_general {
1557 __le32 temperature;
1558 struct statistics_dbg dbg;
1559 __le32 sleep_time;
1560 __le32 slots_out;
1561 __le32 slots_idle;
1562 __le32 ttl_timestamp;
1563 struct statistics_div div;
1564} __attribute__ ((packed));
1565
1566/*
1567 * REPLY_STATISTICS_CMD = 0x9c,
1568 * 3945 and 4965 identical.
1569 *
1570 * This command triggers an immediate response containing uCode statistics.
1571 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
1572 *
1573 * If the CLEAR_STATS configuration flag is set, uCode will clear its
1574 * internal copy of the statistics (counters) after issuing the response.
1575 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
1576 *
1577 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
1578 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
1579 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
1580 */
1581#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
1582#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
1583struct iwl3945_statistics_cmd {
1584 __le32 configuration_flags; /* IWL_STATS_CONF_* */
1585} __attribute__ ((packed));
1586
1587/*
1588 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
1589 *
1590 * By default, uCode issues this notification after receiving a beacon
1591 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
1592 * REPLY_STATISTICS_CMD 0x9c, above.
1593 *
1594 * Statistics counters continue to increment beacon after beacon, but are
1595 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
1596 * 0x9c with CLEAR_STATS bit set (see above).
1597 *
1598 * uCode also issues this notification during scans. uCode clears statistics
1599 * appropriately so that each notification contains statistics for only the
1600 * one channel that has just been scanned.
1601 */
1602#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
1603#define STATISTICS_REPLY_FLG_FAT_MODE_MSK cpu_to_le32(0x8)
1604struct iwl3945_notif_statistics {
1605 __le32 flag;
1606 struct statistics_rx rx;
1607 struct statistics_tx tx;
1608 struct statistics_general general;
1609} __attribute__ ((packed));
1610
1611
1612/*
1613 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
1614 */
1615/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
1616 * then this notification will be sent. */
1617#define CONSECUTIVE_MISSED_BCONS_TH 20
1618
1619struct iwl3945_missed_beacon_notif {
1620 __le32 consequtive_missed_beacons;
1621 __le32 total_missed_becons;
1622 __le32 num_expected_beacons;
1623 __le32 num_recvd_beacons;
1624} __attribute__ ((packed));
1625
1626/******************************************************************************
1627 * (11)
1628 * Rx Calibration Commands:
1629 *
1630 *****************************************************************************/
1631
1632#define PHY_CALIBRATE_DIFF_GAIN_CMD (7)
1633#define HD_TABLE_SIZE (11)
1634
1635struct iwl3945_sensitivity_cmd {
1636 __le16 control;
1637 __le16 table[HD_TABLE_SIZE];
1638} __attribute__ ((packed));
1639
1640struct iwl3945_calibration_cmd {
1641 u8 opCode;
1642 u8 flags;
1643 __le16 reserved;
1644 s8 diff_gain_a;
1645 s8 diff_gain_b;
1646 s8 diff_gain_c;
1647 u8 reserved1;
1648} __attribute__ ((packed));
1649
1650/******************************************************************************
1651 * (12)
1652 * Miscellaneous Commands:
1653 *
1654 *****************************************************************************/
1655
1656/*
1657 * LEDs Command & Response
1658 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
1659 *
1660 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
1661 * this command turns it on or off, or sets up a periodic blinking cycle.
1662 */
1663struct iwl3945_led_cmd {
1664 __le32 interval; /* "interval" in uSec */
1665 u8 id; /* 1: Activity, 2: Link, 3: Tech */
1666 u8 off; /* # intervals off while blinking;
1667 * "0", with >0 "on" value, turns LED on */
1668 u8 on; /* # intervals on while blinking;
1669 * "0", regardless of "off", turns LED off */
1670 u8 reserved;
1671} __attribute__ ((packed));
1672
1673/******************************************************************************
1674 * (13)
1675 * Union of all expected notifications/responses:
1676 *
1677 *****************************************************************************/
1678
1679struct iwl3945_rx_packet {
1680 __le32 len;
1681 struct iwl3945_cmd_header hdr;
1682 union {
1683 struct iwl3945_alive_resp alive_frame;
1684 struct iwl3945_rx_frame rx_frame;
1685 struct iwl3945_tx_resp tx_resp;
1686 struct iwl3945_spectrum_notification spectrum_notif;
1687 struct iwl3945_csa_notification csa_notif;
1688 struct iwl3945_error_resp err_resp;
1689 struct iwl3945_card_state_notif card_state_notif;
1690 struct iwl3945_beacon_notif beacon_status;
1691 struct iwl3945_add_sta_resp add_sta;
1692 struct iwl3945_sleep_notification sleep_notif;
1693 struct iwl3945_spectrum_resp spectrum;
1694 struct iwl3945_notif_statistics stats;
1695 __le32 status;
1696 u8 raw[0];
1697 } u;
1698} __attribute__ ((packed));
1699
1700#define IWL_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1701
1702#endif /* __iwl3945_3945_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
deleted file mode 100644
index 85eb778f9df..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ /dev/null
@@ -1,167 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl3945_debug_h__
30#define __iwl3945_debug_h__
31
32#ifdef CONFIG_IWL3945_DEBUG
33extern u32 iwl3945_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl3945_debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
38
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
43
44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl3945_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
52#else
53static inline void IWL_DEBUG(int level, const char *fmt, ...)
54{
55}
56static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
57{
58}
59static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
60{
61}
62#endif /* CONFIG_IWL3945_DEBUG */
63
64
65
66/*
67 * To use the debug system;
68 *
69 * If you are defining a new debug classification, simply add it to the #define
70 * list here in the form of:
71 *
72 * #define IWL_DL_xxxx VALUE
73 *
74 * shifting value to the left one bit from the previous entry. xxxx should be
75 * the name of the classification (for example, WEP)
76 *
77 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
78 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
79 * to send output to that classification.
80 *
81 * To add your debug level to the list of levels seen when you perform
82 *
83 * % cat /proc/net/iwl/debug_level
84 *
85 * you simply need to add your entry to the iwl3945_debug_levels array.
86 *
87 * If you do not see debug_level in /proc/net/iwl then you do not have
88 * CONFIG_IWL3945_DEBUG defined in your kernel configuration
89 *
90 */
91
92#define IWL_DL_INFO (1 << 0)
93#define IWL_DL_MAC80211 (1 << 1)
94#define IWL_DL_HOST_COMMAND (1 << 2)
95#define IWL_DL_STATE (1 << 3)
96
97#define IWL_DL_RADIO (1 << 7)
98#define IWL_DL_POWER (1 << 8)
99#define IWL_DL_TEMP (1 << 9)
100
101#define IWL_DL_NOTIF (1 << 10)
102#define IWL_DL_SCAN (1 << 11)
103#define IWL_DL_ASSOC (1 << 12)
104#define IWL_DL_DROP (1 << 13)
105
106#define IWL_DL_TXPOWER (1 << 14)
107
108#define IWL_DL_AP (1 << 15)
109
110#define IWL_DL_FW (1 << 16)
111#define IWL_DL_RF_KILL (1 << 17)
112#define IWL_DL_FW_ERRORS (1 << 18)
113
114#define IWL_DL_LED (1 << 19)
115
116#define IWL_DL_RATE (1 << 20)
117
118#define IWL_DL_CALIB (1 << 21)
119#define IWL_DL_WEP (1 << 22)
120#define IWL_DL_TX (1 << 23)
121#define IWL_DL_RX (1 << 24)
122#define IWL_DL_ISR (1 << 25)
123#define IWL_DL_HT (1 << 26)
124#define IWL_DL_IO (1 << 27)
125#define IWL_DL_11H (1 << 28)
126
127#define IWL_DL_STATS (1 << 29)
128#define IWL_DL_TX_REPLY (1 << 30)
129#define IWL_DL_QOS (1 << 31)
130
131#define IWL_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
132#define IWL_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
133#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
134
135#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
136#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
137#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
138#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
139#define IWL_DEBUG_TX(f, a...) IWL_DEBUG(IWL_DL_TX, f, ## a)
140#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a)
141#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a)
142#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a)
143#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a)
144#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a)
145#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a)
146#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a)
147#define IWL_DEBUG_DROP(f, a...) IWL_DEBUG(IWL_DL_DROP, f, ## a)
148#define IWL_DEBUG_DROP_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_DROP, f, ## a)
149#define IWL_DEBUG_AP(f, a...) IWL_DEBUG(IWL_DL_AP, f, ## a)
150#define IWL_DEBUG_TXPOWER(f, a...) IWL_DEBUG(IWL_DL_TXPOWER, f, ## a)
151#define IWL_DEBUG_IO(f, a...) IWL_DEBUG(IWL_DL_IO, f, ## a)
152#define IWL_DEBUG_RATE(f, a...) IWL_DEBUG(IWL_DL_RATE, f, ## a)
153#define IWL_DEBUG_RATE_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_RATE, f, ## a)
154#define IWL_DEBUG_NOTIF(f, a...) IWL_DEBUG(IWL_DL_NOTIF, f, ## a)
155#define IWL_DEBUG_ASSOC(f, a...) IWL_DEBUG(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
156#define IWL_DEBUG_ASSOC_LIMIT(f, a...) \
157 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
158#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a)
159#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a)
160#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a)
161#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a)
162#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a)
163#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a)
164#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a)
165#define IWL_DEBUG_11H(f, a...) IWL_DEBUG(IWL_DL_11H, f, ## a)
166
167#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
new file mode 100644
index 00000000000..08ce259a0e6
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -0,0 +1,188 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __attribute__ ((packed));
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __attribute__ ((packed));
185
186
187#endif /* __iwl_3945_fh_h__ */
188
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 94ea0e60c41..205603d082a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -69,77 +69,26 @@
69#ifndef __iwl_3945_hw__ 69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__ 70#define __iwl_3945_hw__
71 71
72#include "iwl-eeprom.h"
73
72/* 74/*
73 * uCode queue management definitions ... 75 * uCode queue management definitions ...
74 * Queue #4 is the command queue for 3945 and 4965. 76 * Queue #4 is the command queue for 3945 and 4965.
75 */ 77 */
76#define IWL_CMD_QUEUE_NUM 4 78#define IWL_CMD_QUEUE_NUM 4
77
78/* Tx rates */
79#define IWL_CCK_RATES 4
80#define IWL_OFDM_RATES 8
81#define IWL_HT_RATES 0
82#define IWL_MAX_RATES (IWL_CCK_RATES+IWL_OFDM_RATES+IWL_HT_RATES)
83 79
84/* Time constants */ 80/* Time constants */
85#define SHORT_SLOT_TIME 9 81#define SHORT_SLOT_TIME 9
86#define LONG_SLOT_TIME 20 82#define LONG_SLOT_TIME 20
87 83
88/* RSSI to dBm */ 84/* RSSI to dBm */
89#define IWL_RSSI_OFFSET 95 85#define IWL39_RSSI_OFFSET 95
90 86
91/* 87/*
92 * EEPROM related constants, enums, and structures. 88 * EEPROM related constants, enums, and structures.
93 */ 89 */
94
95/*
96 * EEPROM access time values:
97 *
98 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG,
99 * then clearing (with subsequent read/modify/write) CSR_EEPROM_REG bit
100 * CSR_EEPROM_REG_BIT_CMD (0x2).
101 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
102 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
103 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
104 */
105#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
106
107/*
108 * Regulatory channel usage flags in EEPROM struct iwl_eeprom_channel.flags.
109 *
110 * IBSS and/or AP operation is allowed *only* on those channels with
111 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
112 * RADAR detection is not supported by the 3945 driver, but is a
113 * requirement for establishing a new network for legal operation on channels
114 * requiring RADAR detection or restricting ACTIVE scanning.
115 *
116 * NOTE: "WIDE" flag indicates that 20 MHz channel is supported;
117 * 3945 does not support FAT 40 MHz-wide channels.
118 *
119 * NOTE: Using a channel inappropriately will result in a uCode error!
120 */
121enum {
122 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
123 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
124 /* Bit 2 Reserved */
125 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
126 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
127 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
128 /* Bit 6 Reserved (was Narrow Channel) */
129 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
130};
131
132/* SKU Capabilities */
133#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
134#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
135#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) 90#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
136 91
137/* *regulatory* channel data from eeprom, one for each channel */
138struct iwl3945_eeprom_channel {
139 u8 flags; /* flags copied from EEPROM */
140 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
141} __attribute__ ((packed));
142
143/* 92/*
144 * Mapping of a Tx power level, at factory calibration temperature, 93 * Mapping of a Tx power level, at factory calibration temperature,
145 * to a radio/DSP gain table index. 94 * to a radio/DSP gain table index.
@@ -233,7 +182,7 @@ struct iwl3945_eeprom {
233 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 182 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
234 */ 183 */
235 u16 band_1_count; /* abs.ofs: 196 */ 184 u16 band_1_count; /* abs.ofs: 196 */
236 struct iwl3945_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ 185 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
237 186
238/* 187/*
239 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, 188 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
@@ -241,28 +190,28 @@ struct iwl3945_eeprom {
241 * (4915-5080MHz) (none of these is ever supported) 190 * (4915-5080MHz) (none of these is ever supported)
242 */ 191 */
243 u16 band_2_count; /* abs.ofs: 226 */ 192 u16 band_2_count; /* abs.ofs: 226 */
244 struct iwl3945_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ 193 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
245 194
246/* 195/*
247 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 196 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
248 * (5170-5320MHz) 197 * (5170-5320MHz)
249 */ 198 */
250 u16 band_3_count; /* abs.ofs: 254 */ 199 u16 band_3_count; /* abs.ofs: 254 */
251 struct iwl3945_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ 200 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
252 201
253/* 202/*
254 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 203 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
255 * (5500-5700MHz) 204 * (5500-5700MHz)
256 */ 205 */
257 u16 band_4_count; /* abs.ofs: 280 */ 206 u16 band_4_count; /* abs.ofs: 280 */
258 struct iwl3945_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ 207 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
259 208
260/* 209/*
261 * 5.7 GHz channels 145, 149, 153, 157, 161, 165 210 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
262 * (5725-5825MHz) 211 * (5725-5825MHz)
263 */ 212 */
264 u16 band_5_count; /* abs.ofs: 304 */ 213 u16 band_5_count; /* abs.ofs: 304 */
265 struct iwl3945_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ 214 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
266 215
267 u8 reserved9[194]; 216 u8 reserved9[194];
268 217
@@ -276,125 +225,15 @@ struct iwl3945_eeprom {
276 u8 reserved16[172]; /* fill out to full 1024 byte block */ 225 u8 reserved16[172]; /* fill out to full 1024 byte block */
277} __attribute__ ((packed)); 226} __attribute__ ((packed));
278 227
279#define IWL_EEPROM_IMAGE_SIZE 1024 228#define IWL3945_EEPROM_IMG_SIZE 1024
280 229
281/* End of EEPROM */ 230/* End of EEPROM */
282 231
283
284#include "iwl-3945-commands.h"
285
286#define PCI_LINK_CTRL 0x0F0
287#define PCI_POWER_SOURCE 0x0C8
288#define PCI_REG_WUM8 0x0E8
289#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
290
291/*=== FH (data Flow Handler) ===*/
292#define FH_BASE (0x800)
293
294#define FH_CBCC_TABLE (FH_BASE+0x140)
295#define FH_TFDB_TABLE (FH_BASE+0x180)
296#define FH_RCSR_TABLE (FH_BASE+0x400)
297#define FH_RSSR_TABLE (FH_BASE+0x4c0)
298#define FH_TCSR_TABLE (FH_BASE+0x500)
299#define FH_TSSR_TABLE (FH_BASE+0x680)
300
301/* TFDB (Transmit Frame Buffer Descriptor) */
302#define FH_TFDB(_channel, buf) \
303 (FH_TFDB_TABLE+((_channel)*2+(buf))*0x28)
304#define ALM_FH_TFDB_CHNL_BUF_CTRL_REG(_channel) \
305 (FH_TFDB_TABLE + 0x50 * _channel)
306/* CBCC _channel is [0,2] */
307#define FH_CBCC(_channel) (FH_CBCC_TABLE+(_channel)*0x8)
308#define FH_CBCC_CTRL(_channel) (FH_CBCC(_channel)+0x00)
309#define FH_CBCC_BASE(_channel) (FH_CBCC(_channel)+0x04)
310
311/* RCSR _channel is [0,2] */
312#define FH_RCSR(_channel) (FH_RCSR_TABLE+(_channel)*0x40)
313#define FH_RCSR_CONFIG(_channel) (FH_RCSR(_channel)+0x00)
314#define FH_RCSR_RBD_BASE(_channel) (FH_RCSR(_channel)+0x04)
315#define FH_RCSR_WPTR(_channel) (FH_RCSR(_channel)+0x20)
316#define FH_RCSR_RPTR_ADDR(_channel) (FH_RCSR(_channel)+0x24)
317
318#define FH_RSCSR_CHNL0_WPTR (FH_RCSR_WPTR(0))
319
320/* RSSR */
321#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000)
322#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004)
323#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
324/* TCSR */
325#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20)
326#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00)
327#define FH_TCSR_CREDIT(_channel) (FH_TCSR(_channel)+0x04)
328#define FH_TCSR_BUFF_STTS(_channel) (FH_TCSR(_channel)+0x08)
329/* TSSR */
330#define FH_TSSR_CBB_BASE (FH_TSSR_TABLE+0x000)
331#define FH_TSSR_MSG_CONFIG (FH_TSSR_TABLE+0x008)
332#define FH_TSSR_TX_STATUS (FH_TSSR_TABLE+0x010)
333
334
335/* DBM */
336
337#define ALM_FH_SRVC_CHNL (6)
338
339#define ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
340#define ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
341
342#define ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
343
344#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
345
346#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
347
348#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
349
350#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
351
352#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
353
354#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
355#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
356
357#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
358#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
359
360#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
361
362#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
363
364#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
365#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
366
367#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
368
369#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
370
371#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
372#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
373
374#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
375
376#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
377#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
378
379#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
380#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
381
382#define ALM_TB_MAX_BYTES_COUNT (0xFFF0)
383
384#define ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) \
385 ((1LU << _channel) << 24)
386#define ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel) \
387 ((1LU << _channel) << 16)
388
389#define ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_channel) \
390 (ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) | \
391 ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel))
392#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ 232#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
393#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ 233#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
394 234
395#define TFD_QUEUE_MIN 0 235#define TFD_QUEUE_MIN 0
396#define TFD_QUEUE_MAX 6 236#define TFD_QUEUE_MAX 6
397#define TFD_QUEUE_SIZE_MAX (256)
398 237
399#define IWL_NUM_SCAN_RATES (2) 238#define IWL_NUM_SCAN_RATES (2)
400 239
@@ -416,12 +255,6 @@ struct iwl3945_eeprom {
416#define TFD_CTL_PAD_SET(n) (n << 28) 255#define TFD_CTL_PAD_SET(n) (n << 28)
417#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) 256#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
418 257
419#define TFD_TX_CMD_SLOTS 256
420#define TFD_CMD_SLOTS 32
421
422#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl3945_cmd) - \
423 sizeof(struct iwl3945_cmd_meta))
424
425/* 258/*
426 * RX related structures and functions 259 * RX related structures and functions
427 */ 260 */
@@ -430,45 +263,35 @@ struct iwl3945_eeprom {
430 263
431/* Sizes and addresses for instruction and data memory (SRAM) in 264/* Sizes and addresses for instruction and data memory (SRAM) in
432 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 265 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
433#define RTC_INST_LOWER_BOUND (0x000000) 266#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
434#define ALM_RTC_INST_UPPER_BOUND (0x014000) 267#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
435 268
436#define RTC_DATA_LOWER_BOUND (0x800000) 269#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
437#define ALM_RTC_DATA_UPPER_BOUND (0x808000) 270#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
438 271
439#define ALM_RTC_INST_SIZE (ALM_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 272#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
440#define ALM_RTC_DATA_SIZE (ALM_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) 273 IWL39_RTC_INST_LOWER_BOUND)
274#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
275 IWL39_RTC_DATA_LOWER_BOUND)
441 276
442#define IWL_MAX_INST_SIZE ALM_RTC_INST_SIZE 277#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
443#define IWL_MAX_DATA_SIZE ALM_RTC_DATA_SIZE 278#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
444 279
445/* Size of uCode instruction memory in bootstrap state machine */ 280/* Size of uCode instruction memory in bootstrap state machine */
446#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE 281#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
447 282
448#define IWL39_MAX_NUM_QUEUES 8 283#define IWL39_MAX_NUM_QUEUES 8
449 284
450static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) 285static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
451{ 286{
452 return (addr >= RTC_DATA_LOWER_BOUND) && 287 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
453 (addr < ALM_RTC_DATA_UPPER_BOUND); 288 (addr < IWL39_RTC_DATA_UPPER_BOUND);
454} 289}
455 290
456/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE 291/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
457 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ 292 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
458struct iwl3945_shared { 293struct iwl3945_shared {
459 __le32 tx_base_ptr[8]; 294 __le32 tx_base_ptr[8];
460 __le32 rx_read_ptr[3];
461} __attribute__ ((packed));
462
463struct iwl3945_tfd_frame_data {
464 __le32 addr;
465 __le32 len;
466} __attribute__ ((packed));
467
468struct iwl3945_tfd_frame {
469 __le32 control_flags;
470 struct iwl3945_tfd_frame_data pa[4];
471 u8 reserved[28];
472} __attribute__ ((packed)); 295} __attribute__ ((packed));
473 296
474static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) 297static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
deleted file mode 100644
index 2440fd664dd..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ /dev/null
@@ -1,404 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl3945_io_h__
30#define __iwl3945_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-3945-debug.h"
35
36/*
37 * IO, register, and NIC memory access functions
38 *
39 * NOTE on naming convention and macro usage for these
40 *
41 * A single _ prefix before a an access function means that no state
42 * check or debug information is printed when that function is called.
43 *
44 * A double __ prefix before an access function means that state is checked
45 * and the current line number is printed in addition to any other debug output.
46 *
47 * The non-prefixed name is the #define that maps the caller into a
48 * #define that provides the caller's __LINE__ to the double prefix version.
49 *
50 * If you wish to call the function without any debug or state checking,
51 * you should use the single _ prefix version (as is used by dependent IO
52 * routines, for example _iwl3945_read_direct32 calls the non-check version of
53 * _iwl3945_read32.)
54 *
55 * These declarations are *extremely* useful in quickly isolating code deltas
56 * which result in misconfiguration of the hardware I/O. In combination with
57 * git-bisect and the IO debug level you can quickly determine the specific
58 * commit which breaks the IO sequence to the hardware.
59 *
60 */
61
62#define _iwl3945_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
63#ifdef CONFIG_IWL3945_DEBUG
64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv,
65 u32 ofs, u32 val)
66{
67 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
68 _iwl3945_write32(priv, ofs, val);
69}
70#define iwl3945_write32(priv, ofs, val) \
71 __iwl3945_write32(__FILE__, __LINE__, priv, ofs, val)
72#else
73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val)
74#endif
75
76#define _iwl3945_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
77#ifdef CONFIG_IWL3945_DEBUG
78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs)
79{
80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
81 return _iwl3945_read32(priv, ofs);
82}
83#define iwl3945_read32(priv, ofs)__iwl3945_read32(__FILE__, __LINE__, priv, ofs)
84#else
85#define iwl3945_read32(p, o) _iwl3945_read32(p, o)
86#endif
87
88static inline int _iwl3945_poll_bit(struct iwl3945_priv *priv, u32 addr,
89 u32 bits, u32 mask, int timeout)
90{
91 int i = 0;
92
93 do {
94 if ((_iwl3945_read32(priv, addr) & mask) == (bits & mask))
95 return i;
96 udelay(10);
97 i += 10;
98 } while (i < timeout);
99
100 return -ETIMEDOUT;
101}
102#ifdef CONFIG_IWL3945_DEBUG
103static inline int __iwl3945_poll_bit(const char *f, u32 l,
104 struct iwl3945_priv *priv, u32 addr,
105 u32 bits, u32 mask, int timeout)
106{
107 int ret = _iwl3945_poll_bit(priv, addr, bits, mask, timeout);
108 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
109 addr, bits, mask,
110 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
111 return ret;
112}
113#define iwl3945_poll_bit(priv, addr, bits, mask, timeout) \
114 __iwl3945_poll_bit(__FILE__, __LINE__, priv, addr, bits, mask, timeout)
115#else
116#define iwl3945_poll_bit(p, a, b, m, t) _iwl3945_poll_bit(p, a, b, m, t)
117#endif
118
119static inline void _iwl3945_set_bit(struct iwl3945_priv *priv, u32 reg, u32 mask)
120{
121 _iwl3945_write32(priv, reg, _iwl3945_read32(priv, reg) | mask);
122}
123#ifdef CONFIG_IWL3945_DEBUG
124static inline void __iwl3945_set_bit(const char *f, u32 l,
125 struct iwl3945_priv *priv, u32 reg, u32 mask)
126{
127 u32 val = _iwl3945_read32(priv, reg) | mask;
128 IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
129 _iwl3945_write32(priv, reg, val);
130}
131#define iwl3945_set_bit(p, r, m) __iwl3945_set_bit(__FILE__, __LINE__, p, r, m)
132#else
133#define iwl3945_set_bit(p, r, m) _iwl3945_set_bit(p, r, m)
134#endif
135
136static inline void _iwl3945_clear_bit(struct iwl3945_priv *priv, u32 reg, u32 mask)
137{
138 _iwl3945_write32(priv, reg, _iwl3945_read32(priv, reg) & ~mask);
139}
140#ifdef CONFIG_IWL3945_DEBUG
141static inline void __iwl3945_clear_bit(const char *f, u32 l,
142 struct iwl3945_priv *priv, u32 reg, u32 mask)
143{
144 u32 val = _iwl3945_read32(priv, reg) & ~mask;
145 IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
146 _iwl3945_write32(priv, reg, val);
147}
148#define iwl3945_clear_bit(p, r, m) __iwl3945_clear_bit(__FILE__, __LINE__, p, r, m)
149#else
150#define iwl3945_clear_bit(p, r, m) _iwl3945_clear_bit(p, r, m)
151#endif
152
153static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv)
154{
155 int ret;
156#ifdef CONFIG_IWL3945_DEBUG
157 if (atomic_read(&priv->restrict_refcnt))
158 return 0;
159#endif
160 /* this bit wakes up the NIC */
161 _iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
162 ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL,
163 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
164 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
165 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50);
166 if (ret < 0) {
167 IWL_ERROR("MAC is in deep sleep!\n");
168 return -EIO;
169 }
170
171#ifdef CONFIG_IWL3945_DEBUG
172 atomic_inc(&priv->restrict_refcnt);
173#endif
174 return 0;
175}
176
177#ifdef CONFIG_IWL3945_DEBUG
178static inline int __iwl3945_grab_nic_access(const char *f, u32 l,
179 struct iwl3945_priv *priv)
180{
181 if (atomic_read(&priv->restrict_refcnt))
182 IWL_DEBUG_INFO("Grabbing access while already held at "
183 "line %d.\n", l);
184
185 IWL_DEBUG_IO("grabbing nic access - %s %d\n", f, l);
186 return _iwl3945_grab_nic_access(priv);
187}
188#define iwl3945_grab_nic_access(priv) \
189 __iwl3945_grab_nic_access(__FILE__, __LINE__, priv)
190#else
191#define iwl3945_grab_nic_access(priv) \
192 _iwl3945_grab_nic_access(priv)
193#endif
194
195static inline void _iwl3945_release_nic_access(struct iwl3945_priv *priv)
196{
197#ifdef CONFIG_IWL3945_DEBUG
198 if (atomic_dec_and_test(&priv->restrict_refcnt))
199#endif
200 _iwl3945_clear_bit(priv, CSR_GP_CNTRL,
201 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
202}
203#ifdef CONFIG_IWL3945_DEBUG
204static inline void __iwl3945_release_nic_access(const char *f, u32 l,
205 struct iwl3945_priv *priv)
206{
207 if (atomic_read(&priv->restrict_refcnt) <= 0)
208 IWL_ERROR("Release unheld nic access at line %d.\n", l);
209
210 IWL_DEBUG_IO("releasing nic access - %s %d\n", f, l);
211 _iwl3945_release_nic_access(priv);
212}
213#define iwl3945_release_nic_access(priv) \
214 __iwl3945_release_nic_access(__FILE__, __LINE__, priv)
215#else
216#define iwl3945_release_nic_access(priv) \
217 _iwl3945_release_nic_access(priv)
218#endif
219
220static inline u32 _iwl3945_read_direct32(struct iwl3945_priv *priv, u32 reg)
221{
222 return _iwl3945_read32(priv, reg);
223}
224#ifdef CONFIG_IWL3945_DEBUG
225static inline u32 __iwl3945_read_direct32(const char *f, u32 l,
226 struct iwl3945_priv *priv, u32 reg)
227{
228 u32 value = _iwl3945_read_direct32(priv, reg);
229 if (!atomic_read(&priv->restrict_refcnt))
230 IWL_ERROR("Nic access not held from %s %d\n", f, l);
231 IWL_DEBUG_IO("read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
232 f, l);
233 return value;
234}
235#define iwl3945_read_direct32(priv, reg) \
236 __iwl3945_read_direct32(__FILE__, __LINE__, priv, reg)
237#else
238#define iwl3945_read_direct32 _iwl3945_read_direct32
239#endif
240
241static inline void _iwl3945_write_direct32(struct iwl3945_priv *priv,
242 u32 reg, u32 value)
243{
244 _iwl3945_write32(priv, reg, value);
245}
246#ifdef CONFIG_IWL3945_DEBUG
247static void __iwl3945_write_direct32(u32 line,
248 struct iwl3945_priv *priv, u32 reg, u32 value)
249{
250 if (!atomic_read(&priv->restrict_refcnt))
251 IWL_ERROR("Nic access not held from line %d\n", line);
252 _iwl3945_write_direct32(priv, reg, value);
253}
254#define iwl3945_write_direct32(priv, reg, value) \
255 __iwl3945_write_direct32(__LINE__, priv, reg, value)
256#else
257#define iwl3945_write_direct32 _iwl3945_write_direct32
258#endif
259
260static inline void iwl3945_write_reg_buf(struct iwl3945_priv *priv,
261 u32 reg, u32 len, u32 *values)
262{
263 u32 count = sizeof(u32);
264
265 if ((priv != NULL) && (values != NULL)) {
266 for (; 0 < len; len -= count, reg += count, values++)
267 _iwl3945_write_direct32(priv, reg, *values);
268 }
269}
270
271static inline int _iwl3945_poll_direct_bit(struct iwl3945_priv *priv,
272 u32 addr, u32 mask, int timeout)
273{
274 return _iwl3945_poll_bit(priv, addr, mask, mask, timeout);
275}
276
277#ifdef CONFIG_IWL3945_DEBUG
278static inline int __iwl3945_poll_direct_bit(const char *f, u32 l,
279 struct iwl3945_priv *priv,
280 u32 addr, u32 mask, int timeout)
281{
282 int ret = _iwl3945_poll_direct_bit(priv, addr, mask, timeout);
283
284 if (unlikely(ret == -ETIMEDOUT))
285 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) - "
286 "timedout - %s %d\n", addr, mask, f, l);
287 else
288 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
289 "- %s %d\n", addr, mask, ret, f, l);
290 return ret;
291}
292#define iwl3945_poll_direct_bit(priv, addr, mask, timeout) \
293 __iwl3945_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
294#else
295#define iwl3945_poll_direct_bit _iwl3945_poll_direct_bit
296#endif
297
298static inline u32 _iwl3945_read_prph(struct iwl3945_priv *priv, u32 reg)
299{
300 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
301 rmb();
302 return _iwl3945_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
303}
304#ifdef CONFIG_IWL3945_DEBUG
305static inline u32 __iwl3945_read_prph(u32 line, struct iwl3945_priv *priv, u32 reg)
306{
307 if (!atomic_read(&priv->restrict_refcnt))
308 IWL_ERROR("Nic access not held from line %d\n", line);
309 return _iwl3945_read_prph(priv, reg);
310}
311
312#define iwl3945_read_prph(priv, reg) \
313 __iwl3945_read_prph(__LINE__, priv, reg)
314#else
315#define iwl3945_read_prph _iwl3945_read_prph
316#endif
317
318static inline void _iwl3945_write_prph(struct iwl3945_priv *priv,
319 u32 addr, u32 val)
320{
321 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
322 ((addr & 0x0000FFFF) | (3 << 24)));
323 wmb();
324 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
325}
326#ifdef CONFIG_IWL3945_DEBUG
327static inline void __iwl3945_write_prph(u32 line, struct iwl3945_priv *priv,
328 u32 addr, u32 val)
329{
330 if (!atomic_read(&priv->restrict_refcnt))
331 IWL_ERROR("Nic access from line %d\n", line);
332 _iwl3945_write_prph(priv, addr, val);
333}
334
335#define iwl3945_write_prph(priv, addr, val) \
336 __iwl3945_write_prph(__LINE__, priv, addr, val);
337#else
338#define iwl3945_write_prph _iwl3945_write_prph
339#endif
340
341#define _iwl3945_set_bits_prph(priv, reg, mask) \
342 _iwl3945_write_prph(priv, reg, (_iwl3945_read_prph(priv, reg) | mask))
343#ifdef CONFIG_IWL3945_DEBUG
344static inline void __iwl3945_set_bits_prph(u32 line, struct iwl3945_priv *priv,
345 u32 reg, u32 mask)
346{
347 if (!atomic_read(&priv->restrict_refcnt))
348 IWL_ERROR("Nic access not held from line %d\n", line);
349
350 _iwl3945_set_bits_prph(priv, reg, mask);
351}
352#define iwl3945_set_bits_prph(priv, reg, mask) \
353 __iwl3945_set_bits_prph(__LINE__, priv, reg, mask)
354#else
355#define iwl3945_set_bits_prph _iwl3945_set_bits_prph
356#endif
357
358#define _iwl3945_set_bits_mask_prph(priv, reg, bits, mask) \
359 _iwl3945_write_prph(priv, reg, ((_iwl3945_read_prph(priv, reg) & mask) | bits))
360
361#ifdef CONFIG_IWL3945_DEBUG
362static inline void __iwl3945_set_bits_mask_prph(u32 line,
363 struct iwl3945_priv *priv, u32 reg, u32 bits, u32 mask)
364{
365 if (!atomic_read(&priv->restrict_refcnt))
366 IWL_ERROR("Nic access not held from line %d\n", line);
367 _iwl3945_set_bits_mask_prph(priv, reg, bits, mask);
368}
369#define iwl3945_set_bits_mask_prph(priv, reg, bits, mask) \
370 __iwl3945_set_bits_mask_prph(__LINE__, priv, reg, bits, mask)
371#else
372#define iwl3945_set_bits_mask_prph _iwl3945_set_bits_mask_prph
373#endif
374
375static inline void iwl3945_clear_bits_prph(struct iwl3945_priv
376 *priv, u32 reg, u32 mask)
377{
378 u32 val = _iwl3945_read_prph(priv, reg);
379 _iwl3945_write_prph(priv, reg, (val & ~mask));
380}
381
382static inline u32 iwl3945_read_targ_mem(struct iwl3945_priv *priv, u32 addr)
383{
384 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
385 rmb();
386 return iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT);
387}
388
389static inline void iwl3945_write_targ_mem(struct iwl3945_priv *priv, u32 addr, u32 val)
390{
391 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
392 wmb();
393 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
394}
395
396static inline void iwl3945_write_targ_mem_buf(struct iwl3945_priv *priv, u32 addr,
397 u32 len, u32 *values)
398{
399 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
400 wmb();
401 for (; 0 < len; len -= sizeof(u32), values++)
402 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
403}
404#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 4c638909a7d..a973ac13a1d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -38,8 +38,10 @@
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-commands.h"
41#include "iwl-3945.h" 42#include "iwl-3945.h"
42#include "iwl-helpers.h" 43#include "iwl-core.h"
44#include "iwl-dev.h"
43 45
44 46
45static const struct { 47static const struct {
@@ -67,8 +69,8 @@ static const struct {
67#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/ 69#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/
68#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) 70#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
69 71
70static int iwl3945_led_cmd_callback(struct iwl3945_priv *priv, 72static int iwl3945_led_cmd_callback(struct iwl_priv *priv,
71 struct iwl3945_cmd *cmd, 73 struct iwl_cmd *cmd,
72 struct sk_buff *skb) 74 struct sk_buff *skb)
73{ 75{
74 return 1; 76 return 1;
@@ -80,27 +82,27 @@ static inline int iwl3945_brightness_to_idx(enum led_brightness brightness)
80} 82}
81 83
82/* Send led command */ 84/* Send led command */
83static int iwl_send_led_cmd(struct iwl3945_priv *priv, 85static int iwl_send_led_cmd(struct iwl_priv *priv,
84 struct iwl3945_led_cmd *led_cmd) 86 struct iwl_led_cmd *led_cmd)
85{ 87{
86 struct iwl3945_host_cmd cmd = { 88 struct iwl_host_cmd cmd = {
87 .id = REPLY_LEDS_CMD, 89 .id = REPLY_LEDS_CMD,
88 .len = sizeof(struct iwl3945_led_cmd), 90 .len = sizeof(struct iwl_led_cmd),
89 .data = led_cmd, 91 .data = led_cmd,
90 .meta.flags = CMD_ASYNC, 92 .meta.flags = CMD_ASYNC,
91 .meta.u.callback = iwl3945_led_cmd_callback, 93 .meta.u.callback = iwl3945_led_cmd_callback,
92 }; 94 };
93 95
94 return iwl3945_send_cmd(priv, &cmd); 96 return iwl_send_cmd(priv, &cmd);
95} 97}
96 98
97 99
98 100
99/* Set led on command */ 101/* Set led on command */
100static int iwl3945_led_pattern(struct iwl3945_priv *priv, int led_id, 102static int iwl3945_led_pattern(struct iwl_priv *priv, int led_id,
101 unsigned int idx) 103 unsigned int idx)
102{ 104{
103 struct iwl3945_led_cmd led_cmd = { 105 struct iwl_led_cmd led_cmd = {
104 .id = led_id, 106 .id = led_id,
105 .interval = IWL_DEF_LED_INTRVL 107 .interval = IWL_DEF_LED_INTRVL
106 }; 108 };
@@ -114,11 +116,10 @@ static int iwl3945_led_pattern(struct iwl3945_priv *priv, int led_id,
114} 116}
115 117
116 118
117#if 1
118/* Set led on command */ 119/* Set led on command */
119static int iwl3945_led_on(struct iwl3945_priv *priv, int led_id) 120static int iwl3945_led_on(struct iwl_priv *priv, int led_id)
120{ 121{
121 struct iwl3945_led_cmd led_cmd = { 122 struct iwl_led_cmd led_cmd = {
122 .id = led_id, 123 .id = led_id,
123 .on = IWL_LED_SOLID, 124 .on = IWL_LED_SOLID,
124 .off = 0, 125 .off = 0,
@@ -128,24 +129,22 @@ static int iwl3945_led_on(struct iwl3945_priv *priv, int led_id)
128} 129}
129 130
130/* Set led off command */ 131/* Set led off command */
131static int iwl3945_led_off(struct iwl3945_priv *priv, int led_id) 132static int iwl3945_led_off(struct iwl_priv *priv, int led_id)
132{ 133{
133 struct iwl3945_led_cmd led_cmd = { 134 struct iwl_led_cmd led_cmd = {
134 .id = led_id, 135 .id = led_id,
135 .on = 0, 136 .on = 0,
136 .off = 0, 137 .off = 0,
137 .interval = IWL_DEF_LED_INTRVL 138 .interval = IWL_DEF_LED_INTRVL
138 }; 139 };
139 IWL_DEBUG_LED("led off %d\n", led_id); 140 IWL_DEBUG_LED(priv, "led off %d\n", led_id);
140 return iwl_send_led_cmd(priv, &led_cmd); 141 return iwl_send_led_cmd(priv, &led_cmd);
141} 142}
142#endif
143
144 143
145/* 144/*
146 * brightness call back function for Tx/Rx LED 145 * brightness call back function for Tx/Rx LED
147 */ 146 */
148static int iwl3945_led_associated(struct iwl3945_priv *priv, int led_id) 147static int iwl3945_led_associated(struct iwl_priv *priv, int led_id)
149{ 148{
150 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 149 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
151 !test_bit(STATUS_READY, &priv->status)) 150 !test_bit(STATUS_READY, &priv->status))
@@ -166,7 +165,7 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
166{ 165{
167 struct iwl3945_led *led = container_of(led_cdev, 166 struct iwl3945_led *led = container_of(led_cdev,
168 struct iwl3945_led, led_dev); 167 struct iwl3945_led, led_dev);
169 struct iwl3945_priv *priv = led->priv; 168 struct iwl_priv *priv = led->priv;
170 169
171 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 170 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
172 return; 171 return;
@@ -175,7 +174,7 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
175 case LED_FULL: 174 case LED_FULL:
176 if (led->type == IWL_LED_TRG_ASSOC) { 175 if (led->type == IWL_LED_TRG_ASSOC) {
177 priv->allow_blinking = 1; 176 priv->allow_blinking = 1;
178 IWL_DEBUG_LED("MAC is associated\n"); 177 IWL_DEBUG_LED(priv, "MAC is associated\n");
179 } 178 }
180 if (led->led_on) 179 if (led->led_on)
181 led->led_on(priv, IWL_LED_LINK); 180 led->led_on(priv, IWL_LED_LINK);
@@ -183,7 +182,7 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
183 case LED_OFF: 182 case LED_OFF:
184 if (led->type == IWL_LED_TRG_ASSOC) { 183 if (led->type == IWL_LED_TRG_ASSOC) {
185 priv->allow_blinking = 0; 184 priv->allow_blinking = 0;
186 IWL_DEBUG_LED("MAC is disassociated\n"); 185 IWL_DEBUG_LED(priv, "MAC is disassociated\n");
187 } 186 }
188 if (led->led_off) 187 if (led->led_off)
189 led->led_off(priv, IWL_LED_LINK); 188 led->led_off(priv, IWL_LED_LINK);
@@ -202,7 +201,7 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
202/* 201/*
203 * Register led class with the system 202 * Register led class with the system
204 */ 203 */
205static int iwl3945_led_register_led(struct iwl3945_priv *priv, 204static int iwl3945_led_register_led(struct iwl_priv *priv,
206 struct iwl3945_led *led, 205 struct iwl3945_led *led,
207 enum led_type type, u8 set_led, 206 enum led_type type, u8 set_led,
208 char *trigger) 207 char *trigger)
@@ -219,7 +218,7 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv,
219 218
220 ret = led_classdev_register(device, &led->led_dev); 219 ret = led_classdev_register(device, &led->led_dev);
221 if (ret) { 220 if (ret) {
222 IWL_ERROR("Error: failed to register led handler.\n"); 221 IWL_ERR(priv, "Error: failed to register led handler.\n");
223 return ret; 222 return ret;
224 } 223 }
225 224
@@ -234,7 +233,7 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv,
234/* 233/*
235 * calculate blink rate according to last 2 sec Tx/Rx activities 234 * calculate blink rate according to last 2 sec Tx/Rx activities
236 */ 235 */
237static inline u8 get_blink_rate(struct iwl3945_priv *priv) 236static inline u8 get_blink_rate(struct iwl_priv *priv)
238{ 237{
239 int index; 238 int index;
240 u64 current_tpt = priv->rxtxpackets; 239 u64 current_tpt = priv->rxtxpackets;
@@ -253,7 +252,7 @@ static inline u8 get_blink_rate(struct iwl3945_priv *priv)
253 return index; 252 return index;
254} 253}
255 254
256static inline int is_rf_kill(struct iwl3945_priv *priv) 255static inline int is_rf_kill(struct iwl_priv *priv)
257{ 256{
258 return test_bit(STATUS_RF_KILL_HW, &priv->status) || 257 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
259 test_bit(STATUS_RF_KILL_SW, &priv->status); 258 test_bit(STATUS_RF_KILL_SW, &priv->status);
@@ -264,7 +263,7 @@ static inline int is_rf_kill(struct iwl3945_priv *priv)
264 * happen very frequent we postpone led command to be called from 263 * happen very frequent we postpone led command to be called from
265 * REPLY handler so we know ucode is up 264 * REPLY handler so we know ucode is up
266 */ 265 */
267void iwl3945_led_background(struct iwl3945_priv *priv) 266void iwl3945_led_background(struct iwl_priv *priv)
268{ 267{
269 u8 blink_idx; 268 u8 blink_idx;
270 269
@@ -304,7 +303,7 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
304 303
305 304
306/* Register all led handler */ 305/* Register all led handler */
307int iwl3945_led_register(struct iwl3945_priv *priv) 306int iwl3945_led_register(struct iwl_priv *priv)
308{ 307{
309 char *trigger; 308 char *trigger;
310 int ret; 309 int ret;
@@ -316,66 +315,66 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
316 priv->allow_blinking = 0; 315 priv->allow_blinking = 0;
317 316
318 trigger = ieee80211_get_radio_led_name(priv->hw); 317 trigger = ieee80211_get_radio_led_name(priv->hw);
319 snprintf(priv->led[IWL_LED_TRG_RADIO].name, 318 snprintf(priv->led39[IWL_LED_TRG_RADIO].name,
320 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio", 319 sizeof(priv->led39[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
321 wiphy_name(priv->hw->wiphy)); 320 wiphy_name(priv->hw->wiphy));
322 321
323 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on; 322 priv->led39[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
324 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off; 323 priv->led39[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off;
325 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL; 324 priv->led39[IWL_LED_TRG_RADIO].led_pattern = NULL;
326 325
327 ret = iwl3945_led_register_led(priv, 326 ret = iwl3945_led_register_led(priv,
328 &priv->led[IWL_LED_TRG_RADIO], 327 &priv->led39[IWL_LED_TRG_RADIO],
329 IWL_LED_TRG_RADIO, 1, trigger); 328 IWL_LED_TRG_RADIO, 1, trigger);
330 329
331 if (ret) 330 if (ret)
332 goto exit_fail; 331 goto exit_fail;
333 332
334 trigger = ieee80211_get_assoc_led_name(priv->hw); 333 trigger = ieee80211_get_assoc_led_name(priv->hw);
335 snprintf(priv->led[IWL_LED_TRG_ASSOC].name, 334 snprintf(priv->led39[IWL_LED_TRG_ASSOC].name,
336 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc", 335 sizeof(priv->led39[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
337 wiphy_name(priv->hw->wiphy)); 336 wiphy_name(priv->hw->wiphy));
338 337
339 ret = iwl3945_led_register_led(priv, 338 ret = iwl3945_led_register_led(priv,
340 &priv->led[IWL_LED_TRG_ASSOC], 339 &priv->led39[IWL_LED_TRG_ASSOC],
341 IWL_LED_TRG_ASSOC, 0, trigger); 340 IWL_LED_TRG_ASSOC, 0, trigger);
342 341
343 /* for assoc always turn led on */ 342 /* for assoc always turn led on */
344 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on; 343 priv->led39[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
345 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on; 344 priv->led39[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
346 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL; 345 priv->led39[IWL_LED_TRG_ASSOC].led_pattern = NULL;
347 346
348 if (ret) 347 if (ret)
349 goto exit_fail; 348 goto exit_fail;
350 349
351 trigger = ieee80211_get_rx_led_name(priv->hw); 350 trigger = ieee80211_get_rx_led_name(priv->hw);
352 snprintf(priv->led[IWL_LED_TRG_RX].name, 351 snprintf(priv->led39[IWL_LED_TRG_RX].name,
353 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX", 352 sizeof(priv->led39[IWL_LED_TRG_RX].name), "iwl-%s::RX",
354 wiphy_name(priv->hw->wiphy)); 353 wiphy_name(priv->hw->wiphy));
355 354
356 ret = iwl3945_led_register_led(priv, 355 ret = iwl3945_led_register_led(priv,
357 &priv->led[IWL_LED_TRG_RX], 356 &priv->led39[IWL_LED_TRG_RX],
358 IWL_LED_TRG_RX, 0, trigger); 357 IWL_LED_TRG_RX, 0, trigger);
359 358
360 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated; 359 priv->led39[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
361 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated; 360 priv->led39[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
362 priv->led[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern; 361 priv->led39[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern;
363 362
364 if (ret) 363 if (ret)
365 goto exit_fail; 364 goto exit_fail;
366 365
367 trigger = ieee80211_get_tx_led_name(priv->hw); 366 trigger = ieee80211_get_tx_led_name(priv->hw);
368 snprintf(priv->led[IWL_LED_TRG_TX].name, 367 snprintf(priv->led39[IWL_LED_TRG_TX].name,
369 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX", 368 sizeof(priv->led39[IWL_LED_TRG_TX].name), "iwl-%s::TX",
370 wiphy_name(priv->hw->wiphy)); 369 wiphy_name(priv->hw->wiphy));
371 370
372 ret = iwl3945_led_register_led(priv, 371 ret = iwl3945_led_register_led(priv,
373 &priv->led[IWL_LED_TRG_TX], 372 &priv->led39[IWL_LED_TRG_TX],
374 IWL_LED_TRG_TX, 0, trigger); 373 IWL_LED_TRG_TX, 0, trigger);
375 374
376 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated; 375 priv->led39[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
377 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated; 376 priv->led39[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
378 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern; 377 priv->led39[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
379 378
380 if (ret) 379 if (ret)
381 goto exit_fail; 380 goto exit_fail;
@@ -402,11 +401,11 @@ static void iwl3945_led_unregister_led(struct iwl3945_led *led, u8 set_led)
402} 401}
403 402
404/* Unregister all led handlers */ 403/* Unregister all led handlers */
405void iwl3945_led_unregister(struct iwl3945_priv *priv) 404void iwl3945_led_unregister(struct iwl_priv *priv)
406{ 405{
407 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0); 406 iwl3945_led_unregister_led(&priv->led39[IWL_LED_TRG_ASSOC], 0);
408 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RX], 0); 407 iwl3945_led_unregister_led(&priv->led39[IWL_LED_TRG_RX], 0);
409 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_TX], 0); 408 iwl3945_led_unregister_led(&priv->led39[IWL_LED_TRG_TX], 0);
410 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1); 409 iwl3945_led_unregister_led(&priv->led39[IWL_LED_TRG_RADIO], 1);
411} 410}
412 411
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 749ac035fd6..88185a6ccd6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -27,48 +27,34 @@
27#ifndef IWL3945_LEDS_H 27#ifndef IWL3945_LEDS_H
28#define IWL3945_LEDS_H 28#define IWL3945_LEDS_H
29 29
30struct iwl3945_priv; 30struct iwl_priv;
31 31
32#ifdef CONFIG_IWL3945_LEDS 32#ifdef CONFIG_IWL3945_LEDS
33#define IWL_LED_SOLID 11
34#define IWL_LED_NAME_LEN 31
35#define IWL_DEF_LED_INTRVL __constant_cpu_to_le32(1000)
36 33
37#define IWL_LED_ACTIVITY (0<<1) 34#include "iwl-led.h"
38#define IWL_LED_LINK (1<<1)
39
40enum led_type {
41 IWL_LED_TRG_TX,
42 IWL_LED_TRG_RX,
43 IWL_LED_TRG_ASSOC,
44 IWL_LED_TRG_RADIO,
45 IWL_LED_TRG_MAX,
46};
47
48#include <linux/leds.h>
49 35
50struct iwl3945_led { 36struct iwl3945_led {
51 struct iwl3945_priv *priv; 37 struct iwl_priv *priv;
52 struct led_classdev led_dev; 38 struct led_classdev led_dev;
53 char name[32]; 39 char name[32];
54 40
55 int (*led_on) (struct iwl3945_priv *priv, int led_id); 41 int (*led_on) (struct iwl_priv *priv, int led_id);
56 int (*led_off) (struct iwl3945_priv *priv, int led_id); 42 int (*led_off) (struct iwl_priv *priv, int led_id);
57 int (*led_pattern) (struct iwl3945_priv *priv, int led_id, 43 int (*led_pattern) (struct iwl_priv *priv, int led_id,
58 unsigned int idx); 44 unsigned int idx);
59 45
60 enum led_type type; 46 enum led_type type;
61 unsigned int registered; 47 unsigned int registered;
62}; 48};
63 49
64extern int iwl3945_led_register(struct iwl3945_priv *priv); 50extern int iwl3945_led_register(struct iwl_priv *priv);
65extern void iwl3945_led_unregister(struct iwl3945_priv *priv); 51extern void iwl3945_led_unregister(struct iwl_priv *priv);
66extern void iwl3945_led_background(struct iwl3945_priv *priv); 52extern void iwl3945_led_background(struct iwl_priv *priv);
67 53
68#else 54#else
69static inline int iwl3945_led_register(struct iwl3945_priv *priv) { return 0; } 55static inline int iwl3945_led_register(struct iwl_priv *priv) { return 0; }
70static inline void iwl3945_led_unregister(struct iwl3945_priv *priv) {} 56static inline void iwl3945_led_unregister(struct iwl_priv *priv) {}
71static inline void iwl3945_led_background(struct iwl3945_priv *priv) {} 57static inline void iwl3945_led_background(struct iwl_priv *priv) {}
72#endif /* CONFIG_IWL3945_LEDS */ 58#endif /* CONFIG_IWL3945_LEDS */
73 59
74#endif /* IWL3945_LEDS_H */ 60#endif /* IWL3945_LEDS_H */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 21c841847d8..7db8198c625 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -36,6 +36,7 @@
36 36
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38 38
39#include "iwl-commands.h"
39#include "iwl-3945.h" 40#include "iwl-3945.h"
40 41
41#define RS_NAME "iwl-3945-rs" 42#define RS_NAME "iwl-3945-rs"
@@ -51,6 +52,7 @@ struct iwl3945_rate_scale_data {
51 52
52struct iwl3945_rs_sta { 53struct iwl3945_rs_sta {
53 spinlock_t lock; 54 spinlock_t lock;
55 struct iwl_priv *priv;
54 s32 *expected_tpt; 56 s32 *expected_tpt;
55 unsigned long last_partial_flush; 57 unsigned long last_partial_flush;
56 unsigned long last_flush; 58 unsigned long last_flush;
@@ -62,7 +64,7 @@ struct iwl3945_rs_sta {
62 u8 start_rate; 64 u8 start_rate;
63 u8 ibss_sta_added; 65 u8 ibss_sta_added;
64 struct timer_list rate_scale_flush; 66 struct timer_list rate_scale_flush;
65 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT]; 67 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
66#ifdef CONFIG_MAC80211_DEBUGFS 68#ifdef CONFIG_MAC80211_DEBUGFS
67 struct dentry *rs_sta_dbgfs_stats_table_file; 69 struct dentry *rs_sta_dbgfs_stats_table_file;
68#endif 70#endif
@@ -71,19 +73,19 @@ struct iwl3945_rs_sta {
71 int last_txrate_idx; 73 int last_txrate_idx;
72}; 74};
73 75
74static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = { 76static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
75 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 77 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
76}; 78};
77 79
78static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT] = { 80static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
79 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 81 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
80}; 82};
81 83
82static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT] = { 84static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
83 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 85 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
84}; 86};
85 87
86static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT] = { 88static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
87 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 89 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
88}; 90};
89 91
@@ -119,7 +121,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
119#define IWL_RATE_MAX_WINDOW 62 121#define IWL_RATE_MAX_WINDOW 62
120#define IWL_RATE_FLUSH (3*HZ) 122#define IWL_RATE_FLUSH (3*HZ)
121#define IWL_RATE_WIN_FLUSH (HZ/2) 123#define IWL_RATE_WIN_FLUSH (HZ/2)
122#define IWL_RATE_HIGH_TH 11520 124#define IWL39_RATE_HIGH_TH 11520
123#define IWL_SUCCESS_UP_TH 8960 125#define IWL_SUCCESS_UP_TH 8960
124#define IWL_SUCCESS_DOWN_TH 10880 126#define IWL_SUCCESS_DOWN_TH 10880
125#define IWL_RATE_MIN_FAILURE_TH 8 127#define IWL_RATE_MIN_FAILURE_TH 8
@@ -165,7 +167,7 @@ static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
165 window->success_counter = 0; 167 window->success_counter = 0;
166 window->success_ratio = -1; 168 window->success_ratio = -1;
167 window->counter = 0; 169 window->counter = 0;
168 window->average_tpt = IWL_INV_TPT; 170 window->average_tpt = IWL_INVALID_VALUE;
169 window->stamp = 0; 171 window->stamp = 0;
170} 172}
171 173
@@ -181,20 +183,21 @@ static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
181 int unflushed = 0; 183 int unflushed = 0;
182 int i; 184 int i;
183 unsigned long flags; 185 unsigned long flags;
186 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
184 187
185 /* 188 /*
186 * For each rate, if we have collected data on that rate 189 * For each rate, if we have collected data on that rate
187 * and it has been more than IWL_RATE_WIN_FLUSH 190 * and it has been more than IWL_RATE_WIN_FLUSH
188 * since we flushed, clear out the gathered statistics 191 * since we flushed, clear out the gathered statistics
189 */ 192 */
190 for (i = 0; i < IWL_RATE_COUNT; i++) { 193 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
191 if (!rs_sta->win[i].counter) 194 if (!rs_sta->win[i].counter)
192 continue; 195 continue;
193 196
194 spin_lock_irqsave(&rs_sta->lock, flags); 197 spin_lock_irqsave(&rs_sta->lock, flags);
195 if (time_after(jiffies, rs_sta->win[i].stamp + 198 if (time_after(jiffies, rs_sta->win[i].stamp +
196 IWL_RATE_WIN_FLUSH)) { 199 IWL_RATE_WIN_FLUSH)) {
197 IWL_DEBUG_RATE("flushing %d samples of rate " 200 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
198 "index %d\n", 201 "index %d\n",
199 rs_sta->win[i].counter, i); 202 rs_sta->win[i].counter, i);
200 iwl3945_clear_window(&rs_sta->win[i]); 203 iwl3945_clear_window(&rs_sta->win[i]);
@@ -213,11 +216,12 @@ static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
213static void iwl3945_bg_rate_scale_flush(unsigned long data) 216static void iwl3945_bg_rate_scale_flush(unsigned long data)
214{ 217{
215 struct iwl3945_rs_sta *rs_sta = (void *)data; 218 struct iwl3945_rs_sta *rs_sta = (void *)data;
219 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
216 int unflushed = 0; 220 int unflushed = 0;
217 unsigned long flags; 221 unsigned long flags;
218 u32 packet_count, duration, pps; 222 u32 packet_count, duration, pps;
219 223
220 IWL_DEBUG_RATE("enter\n"); 224 IWL_DEBUG_RATE(priv, "enter\n");
221 225
222 unflushed = iwl3945_rate_scale_flush_windows(rs_sta); 226 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
223 227
@@ -232,7 +236,7 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
232 duration = 236 duration =
233 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); 237 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
234 238
235 IWL_DEBUG_RATE("Tx'd %d packets in %dms\n", 239 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
236 packet_count, duration); 240 packet_count, duration);
237 241
238 /* Determine packets per second */ 242 /* Determine packets per second */
@@ -252,7 +256,7 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
252 256
253 rs_sta->flush_time = msecs_to_jiffies(duration); 257 rs_sta->flush_time = msecs_to_jiffies(duration);
254 258
255 IWL_DEBUG_RATE("new flush period: %d msec ave %d\n", 259 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
256 duration, packet_count); 260 duration, packet_count);
257 261
258 mod_timer(&rs_sta->rate_scale_flush, jiffies + 262 mod_timer(&rs_sta->rate_scale_flush, jiffies +
@@ -270,7 +274,7 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
270 274
271 spin_unlock_irqrestore(&rs_sta->lock, flags); 275 spin_unlock_irqrestore(&rs_sta->lock, flags);
272 276
273 IWL_DEBUG_RATE("leave\n"); 277 IWL_DEBUG_RATE(priv, "leave\n");
274} 278}
275 279
276/** 280/**
@@ -286,9 +290,10 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
286{ 290{
287 unsigned long flags; 291 unsigned long flags;
288 s32 fail_count; 292 s32 fail_count;
293 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
289 294
290 if (!retries) { 295 if (!retries) {
291 IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n"); 296 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
292 return; 297 return;
293 } 298 }
294 299
@@ -329,7 +334,7 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
329 window->average_tpt = ((window->success_ratio * 334 window->average_tpt = ((window->success_ratio *
330 rs_sta->expected_tpt[index] + 64) / 128); 335 rs_sta->expected_tpt[index] + 64) / 128);
331 else 336 else
332 window->average_tpt = IWL_INV_TPT; 337 window->average_tpt = IWL_INVALID_VALUE;
333 338
334 spin_unlock_irqrestore(&rs_sta->lock, flags); 339 spin_unlock_irqrestore(&rs_sta->lock, flags);
335 340
@@ -339,10 +344,10 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
339 struct ieee80211_sta *sta, void *priv_sta) 344 struct ieee80211_sta *sta, void *priv_sta)
340{ 345{
341 struct iwl3945_rs_sta *rs_sta = priv_sta; 346 struct iwl3945_rs_sta *rs_sta = priv_sta;
342 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 347 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
343 int i; 348 int i;
344 349
345 IWL_DEBUG_RATE("enter\n"); 350 IWL_DEBUG_RATE(priv, "enter\n");
346 351
347 /* TODO: what is a good starting rate for STA? About middle? Maybe not 352 /* TODO: what is a good starting rate for STA? About middle? Maybe not
348 * the lowest or the highest rate.. Could consider using RSSI from 353 * the lowest or the highest rate.. Could consider using RSSI from
@@ -365,7 +370,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
365 } 370 }
366 371
367 372
368 IWL_DEBUG_RATE("leave\n"); 373 IWL_DEBUG_RATE(priv, "leave\n");
369} 374}
370 375
371static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 376static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -379,10 +384,11 @@ static void rs_free(void *priv)
379 return; 384 return;
380} 385}
381 386
382static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) 387static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
383{ 388{
384 struct iwl3945_rs_sta *rs_sta; 389 struct iwl3945_rs_sta *rs_sta;
385 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 390 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
391 struct iwl_priv *priv = iwl_priv;
386 int i; 392 int i;
387 393
388 /* 394 /*
@@ -390,11 +396,11 @@ static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
390 * as well just put all the information there. 396 * as well just put all the information there.
391 */ 397 */
392 398
393 IWL_DEBUG_RATE("enter\n"); 399 IWL_DEBUG_RATE(priv, "enter\n");
394 400
395 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp); 401 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp);
396 if (!rs_sta) { 402 if (!rs_sta) {
397 IWL_DEBUG_RATE("leave: ENOMEM\n"); 403 IWL_DEBUG_RATE(priv, "leave: ENOMEM\n");
398 return NULL; 404 return NULL;
399 } 405 }
400 406
@@ -402,6 +408,8 @@ static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
402 408
403 spin_lock_init(&rs_sta->lock); 409 spin_lock_init(&rs_sta->lock);
404 410
411 rs_sta->priv = priv;
412
405 rs_sta->start_rate = IWL_RATE_INVALID; 413 rs_sta->start_rate = IWL_RATE_INVALID;
406 414
407 /* default to just 802.11b */ 415 /* default to just 802.11b */
@@ -417,33 +425,34 @@ static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
417 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; 425 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
418 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush; 426 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush;
419 427
420 for (i = 0; i < IWL_RATE_COUNT; i++) 428 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
421 iwl3945_clear_window(&rs_sta->win[i]); 429 iwl3945_clear_window(&rs_sta->win[i]);
422 430
423 IWL_DEBUG_RATE("leave\n"); 431 IWL_DEBUG_RATE(priv, "leave\n");
424 432
425 return rs_sta; 433 return rs_sta;
426} 434}
427 435
428static void rs_free_sta(void *priv, struct ieee80211_sta *sta, 436static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
429 void *priv_sta) 437 void *priv_sta)
430{ 438{
431 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 439 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
432 struct iwl3945_rs_sta *rs_sta = priv_sta; 440 struct iwl3945_rs_sta *rs_sta = priv_sta;
441 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
433 442
434 psta->rs_sta = NULL; 443 psta->rs_sta = NULL;
435 444
436 IWL_DEBUG_RATE("enter\n"); 445 IWL_DEBUG_RATE(priv, "enter\n");
437 del_timer_sync(&rs_sta->rate_scale_flush); 446 del_timer_sync(&rs_sta->rate_scale_flush);
438 kfree(rs_sta); 447 kfree(rs_sta);
439 IWL_DEBUG_RATE("leave\n"); 448 IWL_DEBUG_RATE(priv, "leave\n");
440} 449}
441 450
442 451
443/** 452/**
444 * rs_tx_status - Update rate control values based on Tx results 453 * rs_tx_status - Update rate control values based on Tx results
445 * 454 *
446 * NOTE: Uses iwl3945_priv->retry_rate for the # of retries attempted by 455 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
447 * the hardware for each rate. 456 * the hardware for each rate.
448 */ 457 */
449static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, 458static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
@@ -453,22 +462,22 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
453 s8 retries = 0, current_count; 462 s8 retries = 0, current_count;
454 int scale_rate_index, first_index, last_index; 463 int scale_rate_index, first_index, last_index;
455 unsigned long flags; 464 unsigned long flags;
456 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate; 465 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
457 struct iwl3945_rs_sta *rs_sta = priv_sta; 466 struct iwl3945_rs_sta *rs_sta = priv_sta;
458 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 467 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
459 468
460 IWL_DEBUG_RATE("enter\n"); 469 IWL_DEBUG_RATE(priv, "enter\n");
461 470
462 retries = info->status.rates[0].count; 471 retries = info->status.rates[0].count;
463 472
464 first_index = sband->bitrates[info->status.rates[0].idx].hw_value; 473 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
465 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 474 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
466 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index); 475 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
467 return; 476 return;
468 } 477 }
469 478
470 if (!priv_sta) { 479 if (!priv_sta) {
471 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 480 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
472 return; 481 return;
473 } 482 }
474 483
@@ -502,7 +511,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
502 iwl3945_collect_tx_data(rs_sta, 511 iwl3945_collect_tx_data(rs_sta,
503 &rs_sta->win[scale_rate_index], 512 &rs_sta->win[scale_rate_index],
504 0, current_count, scale_rate_index); 513 0, current_count, scale_rate_index);
505 IWL_DEBUG_RATE("Update rate %d for %d retries.\n", 514 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
506 scale_rate_index, current_count); 515 scale_rate_index, current_count);
507 516
508 retries -= current_count; 517 retries -= current_count;
@@ -512,7 +521,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
512 521
513 522
514 /* Update the last index window with success/failure based on ACK */ 523 /* Update the last index window with success/failure based on ACK */
515 IWL_DEBUG_RATE("Update rate %d with %s.\n", 524 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
516 last_index, 525 last_index,
517 (info->flags & IEEE80211_TX_STAT_ACK) ? 526 (info->flags & IEEE80211_TX_STAT_ACK) ?
518 "success" : "failure"); 527 "success" : "failure");
@@ -537,7 +546,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
537 546
538 spin_unlock_irqrestore(&rs_sta->lock, flags); 547 spin_unlock_irqrestore(&rs_sta->lock, flags);
539 548
540 IWL_DEBUG_RATE("leave\n"); 549 IWL_DEBUG_RATE(priv, "leave\n");
541 550
542 return; 551 return;
543} 552}
@@ -547,6 +556,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
547{ 556{
548 u8 high = IWL_RATE_INVALID; 557 u8 high = IWL_RATE_INVALID;
549 u8 low = IWL_RATE_INVALID; 558 u8 low = IWL_RATE_INVALID;
559 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
550 560
551 /* 802.11A walks to the next literal adjacent rate in 561 /* 802.11A walks to the next literal adjacent rate in
552 * the rate table */ 562 * the rate table */
@@ -565,7 +575,8 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
565 575
566 /* Find the next rate that is in the rate mask */ 576 /* Find the next rate that is in the rate mask */
567 i = index + 1; 577 i = index + 1;
568 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { 578 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
579 i++, mask <<= 1) {
569 if (rate_mask & mask) { 580 if (rate_mask & mask) {
570 high = i; 581 high = i;
571 break; 582 break;
@@ -585,7 +596,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
585 break; 596 break;
586 if (rate_mask & (1 << low)) 597 if (rate_mask & (1 << low))
587 break; 598 break;
588 IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); 599 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
589 } 600 }
590 601
591 high = index; 602 high = index;
@@ -598,7 +609,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
598 break; 609 break;
599 if (rate_mask & (1 << high)) 610 if (rate_mask & (1 << high))
600 break; 611 break;
601 IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); 612 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
602 } 613 }
603 614
604 return (high << 8) | low; 615 return (high << 8) | low;
@@ -631,19 +642,20 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 int index; 642 int index;
632 struct iwl3945_rs_sta *rs_sta = priv_sta; 643 struct iwl3945_rs_sta *rs_sta = priv_sta;
633 struct iwl3945_rate_scale_data *window = NULL; 644 struct iwl3945_rate_scale_data *window = NULL;
634 int current_tpt = IWL_INV_TPT; 645 int current_tpt = IWL_INVALID_VALUE;
635 int low_tpt = IWL_INV_TPT; 646 int low_tpt = IWL_INVALID_VALUE;
636 int high_tpt = IWL_INV_TPT; 647 int high_tpt = IWL_INVALID_VALUE;
637 u32 fail_count; 648 u32 fail_count;
638 s8 scale_action = 0; 649 s8 scale_action = 0;
639 unsigned long flags; 650 unsigned long flags;
640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
641 u16 fc; 652 u16 fc;
642 u16 rate_mask = 0; 653 u16 rate_mask = 0;
643 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 654 s8 max_rate_idx = -1;
655 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 656 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
645 657
646 IWL_DEBUG_RATE("enter\n"); 658 IWL_DEBUG_RATE(priv, "enter\n");
647 659
648 if (sta) 660 if (sta)
649 rate_mask = sta->supp_rates[sband->band]; 661 rate_mask = sta->supp_rates[sband->band];
@@ -654,7 +666,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
654 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 666 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
655 is_multicast_ether_addr(hdr->addr1) || 667 is_multicast_ether_addr(hdr->addr1) ||
656 !sta || !priv_sta) { 668 !sta || !priv_sta) {
657 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 669 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
658 if (!rate_mask) 670 if (!rate_mask)
659 info->control.rates[0].idx = 671 info->control.rates[0].idx =
660 rate_lowest_index(sband, NULL); 672 rate_lowest_index(sband, NULL);
@@ -664,7 +676,14 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
664 return; 676 return;
665 } 677 }
666 678
667 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 679 /* get user max rate if set */
680 max_rate_idx = txrc->max_rate_idx;
681 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
682 max_rate_idx += IWL_FIRST_OFDM_RATE;
683 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
684 max_rate_idx = -1;
685
686 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
668 687
669 if (sband->band == IEEE80211_BAND_5GHZ) 688 if (sband->band == IEEE80211_BAND_5GHZ)
670 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 689 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
@@ -674,7 +693,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
674 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 693 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
675 694
676 if (sta_id == IWL_INVALID_STATION) { 695 if (sta_id == IWL_INVALID_STATION) {
677 IWL_DEBUG_RATE("LQ: ADD station %pm\n", 696 IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n",
678 hdr->addr1); 697 hdr->addr1);
679 sta_id = iwl3945_add_station(priv, 698 sta_id = iwl3945_add_station(priv,
680 hdr->addr1, 0, CMD_ASYNC); 699 hdr->addr1, 0, CMD_ASYNC);
@@ -695,6 +714,12 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
695 rs_sta->start_rate = IWL_RATE_INVALID; 714 rs_sta->start_rate = IWL_RATE_INVALID;
696 } 715 }
697 716
717 /* force user max rate if set by user */
718 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
719 if (rate_mask & (1 << max_rate_idx))
720 index = max_rate_idx;
721 }
722
698 window = &(rs_sta->win[index]); 723 window = &(rs_sta->win[index]);
699 724
700 fail_count = window->counter - window->success_counter; 725 fail_count = window->counter - window->success_counter;
@@ -703,7 +728,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
703 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { 728 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
704 spin_unlock_irqrestore(&rs_sta->lock, flags); 729 spin_unlock_irqrestore(&rs_sta->lock, flags);
705 730
706 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: " 731 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
707 "counter: %d, success_counter: %d, " 732 "counter: %d, success_counter: %d, "
708 "expected_tpt is %sNULL\n", 733 "expected_tpt is %sNULL\n",
709 index, 734 index,
@@ -721,6 +746,10 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
721 low = high_low & 0xff; 746 low = high_low & 0xff;
722 high = (high_low >> 8) & 0xff; 747 high = (high_low >> 8) & 0xff;
723 748
749 /* If user set max rate, dont allow higher than user constrain */
750 if ((max_rate_idx != -1) && (max_rate_idx < high))
751 high = IWL_RATE_INVALID;
752
724 if (low != IWL_RATE_INVALID) 753 if (low != IWL_RATE_INVALID)
725 low_tpt = rs_sta->win[low].average_tpt; 754 low_tpt = rs_sta->win[low].average_tpt;
726 755
@@ -732,29 +761,31 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
732 scale_action = 1; 761 scale_action = 1;
733 762
734 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { 763 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
735 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 764 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
736 scale_action = -1; 765 scale_action = -1;
737 } else if ((low_tpt == IWL_INV_TPT) && (high_tpt == IWL_INV_TPT)) 766 } else if ((low_tpt == IWL_INVALID_VALUE) &&
767 (high_tpt == IWL_INVALID_VALUE))
738 scale_action = 1; 768 scale_action = 1;
739 else if ((low_tpt != IWL_INV_TPT) && (high_tpt != IWL_INV_TPT) && 769 else if ((low_tpt != IWL_INVALID_VALUE) &&
770 (high_tpt != IWL_INVALID_VALUE) &&
740 (low_tpt < current_tpt) && (high_tpt < current_tpt)) { 771 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
741 IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < " 772 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
742 "current_tpt [%d]\n", 773 "current_tpt [%d]\n",
743 low_tpt, high_tpt, current_tpt); 774 low_tpt, high_tpt, current_tpt);
744 scale_action = 0; 775 scale_action = 0;
745 } else { 776 } else {
746 if (high_tpt != IWL_INV_TPT) { 777 if (high_tpt != IWL_INVALID_VALUE) {
747 if (high_tpt > current_tpt) 778 if (high_tpt > current_tpt)
748 scale_action = 1; 779 scale_action = 1;
749 else { 780 else {
750 IWL_DEBUG_RATE 781 IWL_DEBUG_RATE(priv,
751 ("decrease rate because of high tpt\n"); 782 "decrease rate because of high tpt\n");
752 scale_action = -1; 783 scale_action = -1;
753 } 784 }
754 } else if (low_tpt != IWL_INV_TPT) { 785 } else if (low_tpt != IWL_INVALID_VALUE) {
755 if (low_tpt > current_tpt) { 786 if (low_tpt > current_tpt) {
756 IWL_DEBUG_RATE 787 IWL_DEBUG_RATE(priv,
757 ("decrease rate because of low tpt\n"); 788 "decrease rate because of low tpt\n");
758 scale_action = -1; 789 scale_action = -1;
759 } else 790 } else
760 scale_action = 1; 791 scale_action = 1;
@@ -766,7 +797,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
766 scale_action = 0; 797 scale_action = 0;
767 } else if (scale_action == 1) { 798 } else if (scale_action == 1) {
768 if (window->success_ratio < IWL_SUCCESS_UP_TH) { 799 if (window->success_ratio < IWL_SUCCESS_UP_TH) {
769 IWL_DEBUG_RATE("No action -- success_ratio [%d] < " 800 IWL_DEBUG_RATE(priv, "No action -- success_ratio [%d] < "
770 "SUCCESS UP\n", window->success_ratio); 801 "SUCCESS UP\n", window->success_ratio);
771 scale_action = 0; 802 scale_action = 0;
772 } 803 }
@@ -789,7 +820,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
789 break; 820 break;
790 } 821 }
791 822
792 IWL_DEBUG_RATE("Selected %d (action %d) - low %d high %d\n", 823 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
793 index, scale_action, low, high); 824 index, scale_action, low, high);
794 825
795 out: 826 out:
@@ -801,7 +832,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
801 else 832 else
802 info->control.rates[0].idx = rs_sta->last_txrate_idx; 833 info->control.rates[0].idx = rs_sta->last_txrate_idx;
803 834
804 IWL_DEBUG_RATE("leave: %d\n", index); 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
805} 836}
806 837
807#ifdef CONFIG_MAC80211_DEBUGFS 838#ifdef CONFIG_MAC80211_DEBUGFS
@@ -825,7 +856,7 @@ static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
825 lq_sta->tx_packets, 856 lq_sta->tx_packets,
826 lq_sta->last_txrate_idx, 857 lq_sta->last_txrate_idx,
827 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); 858 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
828 for (j = 0; j < IWL_RATE_COUNT; j++) { 859 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
829 desc += sprintf(buff+desc, 860 desc += sprintf(buff+desc,
830 "counter=%d success=%d %%=%d\n", 861 "counter=%d success=%d %%=%d\n",
831 lq_sta->win[j].counter, 862 lq_sta->win[j].counter,
@@ -877,18 +908,18 @@ static struct rate_control_ops rs_ops = {
877 908
878void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 909void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
879{ 910{
880 struct iwl3945_priv *priv = hw->priv; 911 struct iwl_priv *priv = hw->priv;
881 s32 rssi = 0; 912 s32 rssi = 0;
882 unsigned long flags; 913 unsigned long flags;
883 struct iwl3945_rs_sta *rs_sta; 914 struct iwl3945_rs_sta *rs_sta;
884 struct ieee80211_sta *sta; 915 struct ieee80211_sta *sta;
885 struct iwl3945_sta_priv *psta; 916 struct iwl3945_sta_priv *psta;
886 917
887 IWL_DEBUG_RATE("enter\n"); 918 IWL_DEBUG_RATE(priv, "enter\n");
888 919
889 rcu_read_lock(); 920 rcu_read_lock();
890 921
891 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr); 922 sta = ieee80211_find_sta(hw, priv->stations_39[sta_id].sta.sta.addr);
892 if (!sta) { 923 if (!sta) {
893 rcu_read_unlock(); 924 rcu_read_unlock();
894 return; 925 return;
@@ -924,11 +955,11 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
924 if (rssi == 0) 955 if (rssi == 0)
925 rssi = IWL_MIN_RSSI_VAL; 956 rssi = IWL_MIN_RSSI_VAL;
926 957
927 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi); 958 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
928 959
929 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band); 960 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
930 961
931 IWL_DEBUG_RATE("leave: rssi %d assign rate index: " 962 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
932 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, 963 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
933 iwl3945_rates[rs_sta->start_rate].plcp); 964 iwl3945_rates[rs_sta->start_rate].plcp);
934 rcu_read_unlock(); 965 rcu_read_unlock();
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
deleted file mode 100644
index b5a66135ded..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ /dev/null
@@ -1,206 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_rs_h__
28#define __iwl_3945_rs_h__
29
30struct iwl3945_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
33 u8 prev_ieee; /* previous rate in IEEE speeds */
34 u8 next_ieee; /* next rate in IEEE speeds */
35 u8 prev_rs; /* previous rate used in rs algo */
36 u8 next_rs; /* next rate used in rs algo */
37 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
38 u8 next_rs_tgg; /* next rate used in TGG rs algo */
39 u8 table_rs_index; /* index in rate scale table cmd */
40 u8 prev_table_rs; /* prev in rate table cmd */
41};
42
43/*
44 * These serve as indexes into
45 * struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT];
46 */
47enum {
48 IWL_RATE_1M_INDEX = 0,
49 IWL_RATE_2M_INDEX,
50 IWL_RATE_5M_INDEX,
51 IWL_RATE_11M_INDEX,
52 IWL_RATE_6M_INDEX,
53 IWL_RATE_9M_INDEX,
54 IWL_RATE_12M_INDEX,
55 IWL_RATE_18M_INDEX,
56 IWL_RATE_24M_INDEX,
57 IWL_RATE_36M_INDEX,
58 IWL_RATE_48M_INDEX,
59 IWL_RATE_54M_INDEX,
60 IWL_RATE_COUNT,
61 IWL_RATE_INVM_INDEX,
62 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX
63};
64
65enum {
66 IWL_RATE_6M_INDEX_TABLE = 0,
67 IWL_RATE_9M_INDEX_TABLE,
68 IWL_RATE_12M_INDEX_TABLE,
69 IWL_RATE_18M_INDEX_TABLE,
70 IWL_RATE_24M_INDEX_TABLE,
71 IWL_RATE_36M_INDEX_TABLE,
72 IWL_RATE_48M_INDEX_TABLE,
73 IWL_RATE_54M_INDEX_TABLE,
74 IWL_RATE_1M_INDEX_TABLE,
75 IWL_RATE_2M_INDEX_TABLE,
76 IWL_RATE_5M_INDEX_TABLE,
77 IWL_RATE_11M_INDEX_TABLE,
78 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX,
79};
80
81enum {
82 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
83 IWL_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
84 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
85 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
86};
87
88/* #define vs. enum to keep from defaulting to 'large integer' */
89#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
90#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
91#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
92#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
93#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
94#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
95#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
96#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
97#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
98#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
99#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
100#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
101
102/* 3945 uCode API values for (legacy) bit rates, both OFDM and CCK */
103enum {
104 IWL_RATE_6M_PLCP = 13,
105 IWL_RATE_9M_PLCP = 15,
106 IWL_RATE_12M_PLCP = 5,
107 IWL_RATE_18M_PLCP = 7,
108 IWL_RATE_24M_PLCP = 9,
109 IWL_RATE_36M_PLCP = 11,
110 IWL_RATE_48M_PLCP = 1,
111 IWL_RATE_54M_PLCP = 3,
112 IWL_RATE_1M_PLCP = 10,
113 IWL_RATE_2M_PLCP = 20,
114 IWL_RATE_5M_PLCP = 55,
115 IWL_RATE_11M_PLCP = 110,
116};
117
118/* MAC header values for bit rates */
119enum {
120 IWL_RATE_6M_IEEE = 12,
121 IWL_RATE_9M_IEEE = 18,
122 IWL_RATE_12M_IEEE = 24,
123 IWL_RATE_18M_IEEE = 36,
124 IWL_RATE_24M_IEEE = 48,
125 IWL_RATE_36M_IEEE = 72,
126 IWL_RATE_48M_IEEE = 96,
127 IWL_RATE_54M_IEEE = 108,
128 IWL_RATE_1M_IEEE = 2,
129 IWL_RATE_2M_IEEE = 4,
130 IWL_RATE_5M_IEEE = 11,
131 IWL_RATE_11M_IEEE = 22,
132};
133
134#define IWL_CCK_BASIC_RATES_MASK \
135 (IWL_RATE_1M_MASK | \
136 IWL_RATE_2M_MASK)
137
138#define IWL_CCK_RATES_MASK \
139 (IWL_BASIC_RATES_MASK | \
140 IWL_RATE_5M_MASK | \
141 IWL_RATE_11M_MASK)
142
143#define IWL_OFDM_BASIC_RATES_MASK \
144 (IWL_RATE_6M_MASK | \
145 IWL_RATE_12M_MASK | \
146 IWL_RATE_24M_MASK)
147
148#define IWL_OFDM_RATES_MASK \
149 (IWL_OFDM_BASIC_RATES_MASK | \
150 IWL_RATE_9M_MASK | \
151 IWL_RATE_18M_MASK | \
152 IWL_RATE_36M_MASK | \
153 IWL_RATE_48M_MASK | \
154 IWL_RATE_54M_MASK)
155
156#define IWL_BASIC_RATES_MASK \
157 (IWL_OFDM_BASIC_RATES_MASK | \
158 IWL_CCK_BASIC_RATES_MASK)
159
160#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
161
162#define IWL_INV_TPT -1
163
164#define IWL_MIN_RSSI_VAL -100
165#define IWL_MAX_RSSI_VAL 0
166
167extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT];
168
169static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
170{
171 u8 rate = iwl3945_rates[rate_index].prev_ieee;
172
173 if (rate == IWL_RATE_INVALID)
174 rate = rate_index;
175 return rate;
176}
177
178/**
179 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
180 *
181 * The specific throughput table used is based on the type of network
182 * the associated with, including A, B, G, and G w/ TGG protection
183 */
184extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
185
186/**
187 * iwl3945_rate_control_register - Register the rate control algorithm callbacks
188 *
189 * Since the rate control algorithm is hardware specific, there is no need
190 * or reason to place it as a stand alone module. The driver can call
191 * iwl3945_rate_control_register in order to register the rate control callbacks
192 * with the mac80211 subsystem. This should be performed prior to calling
193 * ieee80211_register_hw
194 *
195 */
196extern int iwl3945_rate_control_register(void);
197
198/**
199 * iwl3945_rate_control_unregister - Unregister the rate control callbacks
200 *
201 * This should be called after calling ieee80211_unregister_hw, but before
202 * the driver is unloaded.
203 */
204extern void iwl3945_rate_control_unregister(void);
205
206#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 45cfa1cf194..d49e48b9b03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -38,10 +38,15 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <net/mac80211.h> 39#include <net/mac80211.h>
40 40
41#include "iwl-3945-core.h" 41#include "iwl-fh.h"
42#include "iwl-3945-fh.h"
43#include "iwl-commands.h"
44#include "iwl-sta.h"
42#include "iwl-3945.h" 45#include "iwl-3945.h"
46#include "iwl-eeprom.h"
43#include "iwl-helpers.h" 47#include "iwl-helpers.h"
44#include "iwl-3945-rs.h" 48#include "iwl-core.h"
49#include "iwl-agn-rs.h"
45 50
46#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 51#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
47 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 52 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -63,7 +68,7 @@
63 * maps to IWL_RATE_INVALID 68 * maps to IWL_RATE_INVALID
64 * 69 *
65 */ 70 */
66const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT] = { 71const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
67 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ 72 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
68 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ 73 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
69 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ 74 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
@@ -91,7 +96,7 @@ const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT] = {
91 * Use for only special debugging. This function is just a placeholder as-is, 96 * Use for only special debugging. This function is just a placeholder as-is,
92 * you'll need to provide the special bits! ... 97 * you'll need to provide the special bits! ...
93 * ... and set IWL_EVT_DISABLE to 1. */ 98 * ... and set IWL_EVT_DISABLE to 1. */
94void iwl3945_disable_events(struct iwl3945_priv *priv) 99void iwl3945_disable_events(struct iwl_priv *priv)
95{ 100{
96 int ret; 101 int ret;
97 int i; 102 int i;
@@ -150,34 +155,34 @@ void iwl3945_disable_events(struct iwl3945_priv *priv)
150 155
151 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 156 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
152 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 157 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
153 IWL_ERROR("Invalid event log pointer 0x%08X\n", base); 158 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
154 return; 159 return;
155 } 160 }
156 161
157 ret = iwl3945_grab_nic_access(priv); 162 ret = iwl_grab_nic_access(priv);
158 if (ret) { 163 if (ret) {
159 IWL_WARNING("Can not read from adapter at this time.\n"); 164 IWL_WARN(priv, "Can not read from adapter at this time.\n");
160 return; 165 return;
161 } 166 }
162 167
163 disable_ptr = iwl3945_read_targ_mem(priv, base + (4 * sizeof(u32))); 168 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
164 array_size = iwl3945_read_targ_mem(priv, base + (5 * sizeof(u32))); 169 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
165 iwl3945_release_nic_access(priv); 170 iwl_release_nic_access(priv);
166 171
167 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { 172 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
168 IWL_DEBUG_INFO("Disabling selected uCode log events at 0x%x\n", 173 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
169 disable_ptr); 174 disable_ptr);
170 ret = iwl3945_grab_nic_access(priv); 175 ret = iwl_grab_nic_access(priv);
171 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) 176 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
172 iwl3945_write_targ_mem(priv, 177 iwl_write_targ_mem(priv,
173 disable_ptr + (i * sizeof(u32)), 178 disable_ptr + (i * sizeof(u32)),
174 evt_disable[i]); 179 evt_disable[i]);
175 180
176 iwl3945_release_nic_access(priv); 181 iwl_release_nic_access(priv);
177 } else { 182 } else {
178 IWL_DEBUG_INFO("Selected uCode log events may be disabled\n"); 183 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
179 IWL_DEBUG_INFO(" by writing \"1\"s into disable bitmap\n"); 184 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
180 IWL_DEBUG_INFO(" in SRAM at 0x%x, size %d u32s\n", 185 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
181 disable_ptr, array_size); 186 disable_ptr, array_size);
182 } 187 }
183 188
@@ -193,40 +198,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
193 return -1; 198 return -1;
194} 199}
195 200
196/** 201#ifdef CONFIG_IWLWIFI_DEBUG
197 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
198 * @priv: eeprom and antenna fields are used to determine antenna flags
199 *
200 * priv->eeprom is used to determine if antenna AUX/MAIN are reversed
201 * priv->antenna specifies the antenna diversity mode:
202 *
203 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
204 * IWL_ANTENNA_MAIN - Force MAIN antenna
205 * IWL_ANTENNA_AUX - Force AUX antenna
206 */
207__le32 iwl3945_get_antenna_flags(const struct iwl3945_priv *priv)
208{
209 switch (priv->antenna) {
210 case IWL_ANTENNA_DIVERSITY:
211 return 0;
212
213 case IWL_ANTENNA_MAIN:
214 if (priv->eeprom.antenna_switch_type)
215 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
216 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
217
218 case IWL_ANTENNA_AUX:
219 if (priv->eeprom.antenna_switch_type)
220 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
221 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
222 }
223
224 /* bad antenna selector value */
225 IWL_ERROR("Bad antenna selector value (0x%x)\n", priv->antenna);
226 return 0; /* "diversity" is default if error */
227}
228
229#ifdef CONFIG_IWL3945_DEBUG
230#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 202#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
231 203
232static const char *iwl3945_get_tx_fail_reason(u32 status) 204static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -266,7 +238,7 @@ static inline const char *iwl3945_get_tx_fail_reason(u32 status)
266 * for A and B mode we need to overright prev 238 * for A and B mode we need to overright prev
267 * value 239 * value
268 */ 240 */
269int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate) 241int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
270{ 242{
271 int next_rate = iwl3945_get_prev_ieee_rate(rate); 243 int next_rate = iwl3945_get_prev_ieee_rate(rate);
272 244
@@ -279,7 +251,7 @@ int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate)
279 break; 251 break;
280 case IEEE80211_BAND_2GHZ: 252 case IEEE80211_BAND_2GHZ:
281 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 253 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
282 iwl3945_is_associated(priv)) { 254 iwl_is_associated(priv)) {
283 if (rate == IWL_RATE_11M_INDEX) 255 if (rate == IWL_RATE_11M_INDEX)
284 next_rate = IWL_RATE_5M_INDEX; 256 next_rate = IWL_RATE_5M_INDEX;
285 } 257 }
@@ -300,12 +272,12 @@ int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate)
300 * need to be reclaimed. As result, some free space forms. If there is 272 * need to be reclaimed. As result, some free space forms. If there is
301 * enough free space (> low mark), wake the stack that feeds us. 273 * enough free space (> low mark), wake the stack that feeds us.
302 */ 274 */
303static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv, 275static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
304 int txq_id, int index) 276 int txq_id, int index)
305{ 277{
306 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 278 struct iwl_tx_queue *txq = &priv->txq[txq_id];
307 struct iwl3945_queue *q = &txq->q; 279 struct iwl_queue *q = &txq->q;
308 struct iwl3945_tx_info *tx_info; 280 struct iwl_tx_info *tx_info;
309 281
310 BUG_ON(txq_id == IWL_CMD_QUEUE_NUM); 282 BUG_ON(txq_id == IWL_CMD_QUEUE_NUM);
311 283
@@ -315,10 +287,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
315 tx_info = &txq->txb[txq->q.read_ptr]; 287 tx_info = &txq->txb[txq->q.read_ptr];
316 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 288 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
317 tx_info->skb[0] = NULL; 289 tx_info->skb[0] = NULL;
318 iwl3945_hw_txq_free_tfd(priv, txq); 290 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
319 } 291 }
320 292
321 if (iwl3945_queue_space(q) > q->low_mark && (txq_id >= 0) && 293 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
322 (txq_id != IWL_CMD_QUEUE_NUM) && 294 (txq_id != IWL_CMD_QUEUE_NUM) &&
323 priv->mac80211_registered) 295 priv->mac80211_registered)
324 ieee80211_wake_queue(priv->hw, txq_id); 296 ieee80211_wake_queue(priv->hw, txq_id);
@@ -327,22 +299,22 @@ static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
327/** 299/**
328 * iwl3945_rx_reply_tx - Handle Tx response 300 * iwl3945_rx_reply_tx - Handle Tx response
329 */ 301 */
330static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv, 302static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
331 struct iwl3945_rx_mem_buffer *rxb) 303 struct iwl_rx_mem_buffer *rxb)
332{ 304{
333 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 305 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
334 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 306 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
335 int txq_id = SEQ_TO_QUEUE(sequence); 307 int txq_id = SEQ_TO_QUEUE(sequence);
336 int index = SEQ_TO_INDEX(sequence); 308 int index = SEQ_TO_INDEX(sequence);
337 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 309 struct iwl_tx_queue *txq = &priv->txq[txq_id];
338 struct ieee80211_tx_info *info; 310 struct ieee80211_tx_info *info;
339 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 311 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
340 u32 status = le32_to_cpu(tx_resp->status); 312 u32 status = le32_to_cpu(tx_resp->status);
341 int rate_idx; 313 int rate_idx;
342 int fail; 314 int fail;
343 315
344 if ((index >= txq->q.n_bd) || (iwl3945_x2_queue_used(&txq->q, index) == 0)) { 316 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
345 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " 317 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
346 "is out of range [0-%d] %d %d\n", txq_id, 318 "is out of range [0-%d] %d %d\n", txq_id,
347 index, txq->q.n_bd, txq->q.write_ptr, 319 index, txq->q.n_bd, txq->q.write_ptr,
348 txq->q.read_ptr); 320 txq->q.read_ptr);
@@ -366,15 +338,15 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
366 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? 338 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
367 IEEE80211_TX_STAT_ACK : 0; 339 IEEE80211_TX_STAT_ACK : 0;
368 340
369 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", 341 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
370 txq_id, iwl3945_get_tx_fail_reason(status), status, 342 txq_id, iwl3945_get_tx_fail_reason(status), status,
371 tx_resp->rate, tx_resp->failure_frame); 343 tx_resp->rate, tx_resp->failure_frame);
372 344
373 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 345 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
374 iwl3945_tx_queue_reclaim(priv, txq_id, index); 346 iwl3945_tx_queue_reclaim(priv, txq_id, index);
375 347
376 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 348 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
377 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 349 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
378} 350}
379 351
380 352
@@ -387,14 +359,14 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
387 * 359 *
388 *****************************************************************************/ 360 *****************************************************************************/
389 361
390void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_buffer *rxb) 362void iwl3945_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
391{ 363{
392 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 364 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
393 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", 365 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
394 (int)sizeof(struct iwl3945_notif_statistics), 366 (int)sizeof(struct iwl3945_notif_statistics),
395 le32_to_cpu(pkt->len)); 367 le32_to_cpu(pkt->len));
396 368
397 memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics)); 369 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
398 370
399 iwl3945_led_background(priv); 371 iwl3945_led_background(priv);
400 372
@@ -406,7 +378,7 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b
406 * Misc. internal state and helper functions 378 * Misc. internal state and helper functions
407 * 379 *
408 ******************************************************************************/ 380 ******************************************************************************/
409#ifdef CONFIG_IWL3945_DEBUG 381#ifdef CONFIG_IWLWIFI_DEBUG
410 382
411/** 383/**
412 * iwl3945_report_frame - dump frame to syslog during debug sessions 384 * iwl3945_report_frame - dump frame to syslog during debug sessions
@@ -415,8 +387,8 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b
415 * including selective frame dumps. 387 * including selective frame dumps.
416 * group100 parameter selects whether to show 1 out of 100 good frames. 388 * group100 parameter selects whether to show 1 out of 100 good frames.
417 */ 389 */
418static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv, 390static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
419 struct iwl3945_rx_packet *pkt, 391 struct iwl_rx_packet *pkt,
420 struct ieee80211_hdr *header, int group100) 392 struct ieee80211_hdr *header, int group100)
421{ 393{
422 u32 to_us; 394 u32 to_us;
@@ -524,13 +496,13 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
524 * MAC addresses show just the last byte (for brevity), 496 * MAC addresses show just the last byte (for brevity),
525 * but you can hack it to show more, if you'd like to. */ 497 * but you can hack it to show more, if you'd like to. */
526 if (dataframe) 498 if (dataframe)
527 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " 499 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
528 "len=%u, rssi=%d, chnl=%d, rate=%d, \n", 500 "len=%u, rssi=%d, chnl=%d, rate=%d, \n",
529 title, le16_to_cpu(fc), header->addr1[5], 501 title, le16_to_cpu(fc), header->addr1[5],
530 length, rssi, channel, rate); 502 length, rssi, channel, rate);
531 else { 503 else {
532 /* src/dst addresses assume managed mode */ 504 /* src/dst addresses assume managed mode */
533 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " 505 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, "
534 "src=0x%02x, rssi=%u, tim=%lu usec, " 506 "src=0x%02x, rssi=%u, tim=%lu usec, "
535 "phy=0x%02x, chnl=%d\n", 507 "phy=0x%02x, chnl=%d\n",
536 title, le16_to_cpu(fc), header->addr1[5], 508 title, le16_to_cpu(fc), header->addr1[5],
@@ -540,18 +512,27 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
540 } 512 }
541 } 513 }
542 if (print_dump) 514 if (print_dump)
543 iwl3945_print_hex_dump(IWL_DL_RX, data, length); 515 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
516}
517
518static void iwl3945_dbg_report_frame(struct iwl_priv *priv,
519 struct iwl_rx_packet *pkt,
520 struct ieee80211_hdr *header, int group100)
521{
522 if (priv->debug_level & IWL_DL_RX)
523 _iwl3945_dbg_report_frame(priv, pkt, header, group100);
544} 524}
525
545#else 526#else
546static inline void iwl3945_dbg_report_frame(struct iwl3945_priv *priv, 527static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv,
547 struct iwl3945_rx_packet *pkt, 528 struct iwl_rx_packet *pkt,
548 struct ieee80211_hdr *header, int group100) 529 struct ieee80211_hdr *header, int group100)
549{ 530{
550} 531}
551#endif 532#endif
552 533
553/* This is necessary only for a number of statistics, see the caller. */ 534/* This is necessary only for a number of statistics, see the caller. */
554static int iwl3945_is_network_packet(struct iwl3945_priv *priv, 535static int iwl3945_is_network_packet(struct iwl_priv *priv,
555 struct ieee80211_hdr *header) 536 struct ieee80211_hdr *header)
556{ 537{
557 /* Filter incoming packets to determine if they are targeted toward 538 /* Filter incoming packets to determine if they are targeted toward
@@ -568,11 +549,11 @@ static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
568 } 549 }
569} 550}
570 551
571static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv, 552static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
572 struct iwl3945_rx_mem_buffer *rxb, 553 struct iwl_rx_mem_buffer *rxb,
573 struct ieee80211_rx_status *stats) 554 struct ieee80211_rx_status *stats)
574{ 555{
575 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 556 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
576#ifdef CONFIG_IWL3945_LEDS 557#ifdef CONFIG_IWL3945_LEDS
577 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 558 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
578#endif 559#endif
@@ -581,15 +562,15 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
581 short len = le16_to_cpu(rx_hdr->len); 562 short len = le16_to_cpu(rx_hdr->len);
582 563
583 /* We received data from the HW, so stop the watchdog */ 564 /* We received data from the HW, so stop the watchdog */
584 if (unlikely((len + IWL_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 565 if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
585 IWL_DEBUG_DROP("Corruption detected!\n"); 566 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
586 return; 567 return;
587 } 568 }
588 569
589 /* We only process data packets if the interface is open */ 570 /* We only process data packets if the interface is open */
590 if (unlikely(!priv->is_open)) { 571 if (unlikely(!priv->is_open)) {
591 IWL_DEBUG_DROP_LIMIT 572 IWL_DEBUG_DROP_LIMIT(priv,
592 ("Dropping packet while interface is not open.\n"); 573 "Dropping packet while interface is not open.\n");
593 return; 574 return;
594 } 575 }
595 576
@@ -597,8 +578,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
597 /* Set the size of the skb to the size of the frame */ 578 /* Set the size of the skb to the size of the frame */
598 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); 579 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len));
599 580
600 if (iwl3945_param_hwcrypto) 581 if (!iwl3945_mod_params.sw_crypto)
601 iwl3945_set_decrypted_flag(priv, rxb->skb, 582 iwl_set_decrypted_flag(priv,
583 (struct ieee80211_hdr *)rxb->skb->data,
602 le32_to_cpu(rx_end->status), stats); 584 le32_to_cpu(rx_end->status), stats);
603 585
604#ifdef CONFIG_IWL3945_LEDS 586#ifdef CONFIG_IWL3945_LEDS
@@ -611,12 +593,12 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
611 593
612#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 594#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
613 595
614static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv, 596static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
615 struct iwl3945_rx_mem_buffer *rxb) 597 struct iwl_rx_mem_buffer *rxb)
616{ 598{
617 struct ieee80211_hdr *header; 599 struct ieee80211_hdr *header;
618 struct ieee80211_rx_status rx_status; 600 struct ieee80211_rx_status rx_status;
619 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 601 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
620 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 602 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
621 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 603 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
622 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 604 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -644,22 +626,21 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
644 rx_status.flag |= RX_FLAG_SHORTPRE; 626 rx_status.flag |= RX_FLAG_SHORTPRE;
645 627
646 if ((unlikely(rx_stats->phy_count > 20))) { 628 if ((unlikely(rx_stats->phy_count > 20))) {
647 IWL_DEBUG_DROP 629 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
648 ("dsp size out of range [0,20]: " 630 rx_stats->phy_count);
649 "%d/n", rx_stats->phy_count);
650 return; 631 return;
651 } 632 }
652 633
653 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) 634 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
654 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { 635 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
655 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); 636 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
656 return; 637 return;
657 } 638 }
658 639
659 640
660 641
661 /* Convert 3945's rssi indicator to dBm */ 642 /* Convert 3945's rssi indicator to dBm */
662 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET; 643 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
663 644
664 /* Set default noise value to -127 */ 645 /* Set default noise value to -127 */
665 if (priv->last_rx_noise == 0) 646 if (priv->last_rx_noise == 0)
@@ -691,7 +672,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
691 } 672 }
692 673
693 674
694 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 675 IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
695 rx_status.signal, rx_status.noise, rx_status.qual, 676 rx_status.signal, rx_status.noise, rx_status.qual,
696 rx_stats_sig_avg, rx_stats_noise_diff); 677 rx_stats_sig_avg, rx_stats_noise_diff);
697 678
@@ -699,17 +680,14 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
699 680
700 network_packet = iwl3945_is_network_packet(priv, header); 681 network_packet = iwl3945_is_network_packet(priv, header);
701 682
702 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 683 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
703 network_packet ? '*' : ' ', 684 network_packet ? '*' : ' ',
704 le16_to_cpu(rx_hdr->channel), 685 le16_to_cpu(rx_hdr->channel),
705 rx_status.signal, rx_status.signal, 686 rx_status.signal, rx_status.signal,
706 rx_status.noise, rx_status.rate_idx); 687 rx_status.noise, rx_status.rate_idx);
707 688
708#ifdef CONFIG_IWL3945_DEBUG 689 /* Set "1" to report good data frames in groups of 100 */
709 if (iwl3945_debug_level & (IWL_DL_RX)) 690 iwl3945_dbg_report_frame(priv, pkt, header, 1);
710 /* Set "1" to report good data frames in groups of 100 */
711 iwl3945_dbg_report_frame(priv, pkt, header, 1);
712#endif
713 691
714 if (network_packet) { 692 if (network_packet) {
715 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 693 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
@@ -721,24 +699,31 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
721 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); 699 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
722} 700}
723 701
724int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr, 702int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
725 dma_addr_t addr, u16 len) 703 struct iwl_tx_queue *txq,
704 dma_addr_t addr, u16 len, u8 reset, u8 pad)
726{ 705{
727 int count; 706 int count;
728 u32 pad; 707 struct iwl_queue *q;
729 struct iwl3945_tfd_frame *tfd = (struct iwl3945_tfd_frame *)ptr; 708 struct iwl3945_tfd *tfd, *tfd_tmp;
709
710 q = &txq->q;
711 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
712 tfd = &tfd_tmp[q->write_ptr];
713
714 if (reset)
715 memset(tfd, 0, sizeof(*tfd));
730 716
731 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); 717 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
732 pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));
733 718
734 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { 719 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
735 IWL_ERROR("Error can not send more than %d chunks\n", 720 IWL_ERR(priv, "Error can not send more than %d chunks\n",
736 NUM_TFD_CHUNKS); 721 NUM_TFD_CHUNKS);
737 return -EINVAL; 722 return -EINVAL;
738 } 723 }
739 724
740 tfd->pa[count].addr = cpu_to_le32(addr); 725 tfd->tbs[count].addr = cpu_to_le32(addr);
741 tfd->pa[count].len = cpu_to_le32(len); 726 tfd->tbs[count].len = cpu_to_le32(len);
742 727
743 count++; 728 count++;
744 729
@@ -753,10 +738,10 @@ int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr,
753 * 738 *
754 * Does NOT advance any indexes 739 * Does NOT advance any indexes
755 */ 740 */
756int iwl3945_hw_txq_free_tfd(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq) 741void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
757{ 742{
758 struct iwl3945_tfd_frame *bd_tmp = (struct iwl3945_tfd_frame *)&txq->bd[0]; 743 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
759 struct iwl3945_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 744 struct iwl3945_tfd *tfd = &tfd_tmp[txq->q.read_ptr];
760 struct pci_dev *dev = priv->pci_dev; 745 struct pci_dev *dev = priv->pci_dev;
761 int i; 746 int i;
762 int counter; 747 int counter;
@@ -764,21 +749,21 @@ int iwl3945_hw_txq_free_tfd(struct iwl3945_priv *priv, struct iwl3945_tx_queue *
764 /* classify bd */ 749 /* classify bd */
765 if (txq->q.id == IWL_CMD_QUEUE_NUM) 750 if (txq->q.id == IWL_CMD_QUEUE_NUM)
766 /* nothing to cleanup after for host commands */ 751 /* nothing to cleanup after for host commands */
767 return 0; 752 return;
768 753
769 /* sanity check */ 754 /* sanity check */
770 counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags)); 755 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
771 if (counter > NUM_TFD_CHUNKS) { 756 if (counter > NUM_TFD_CHUNKS) {
772 IWL_ERROR("Too many chunks: %i\n", counter); 757 IWL_ERR(priv, "Too many chunks: %i\n", counter);
773 /* @todo issue fatal error, it is quite serious situation */ 758 /* @todo issue fatal error, it is quite serious situation */
774 return 0; 759 return;
775 } 760 }
776 761
777 /* unmap chunks if any */ 762 /* unmap chunks if any */
778 763
779 for (i = 1; i < counter; i++) { 764 for (i = 1; i < counter; i++) {
780 pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr), 765 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
781 le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE); 766 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
782 if (txq->txb[txq->q.read_ptr].skb[0]) { 767 if (txq->txb[txq->q.read_ptr].skb[0]) {
783 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0]; 768 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
784 if (txq->txb[txq->q.read_ptr].skb[0]) { 769 if (txq->txb[txq->q.read_ptr].skb[0]) {
@@ -788,10 +773,10 @@ int iwl3945_hw_txq_free_tfd(struct iwl3945_priv *priv, struct iwl3945_tx_queue *
788 } 773 }
789 } 774 }
790 } 775 }
791 return 0; 776 return ;
792} 777}
793 778
794u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr) 779u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr)
795{ 780{
796 int i, start = IWL_AP_ID; 781 int i, start = IWL_AP_ID;
797 int ret = IWL_INVALID_STATION; 782 int ret = IWL_INVALID_STATION;
@@ -802,18 +787,18 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
802 start = IWL_STA_ID; 787 start = IWL_STA_ID;
803 788
804 if (is_broadcast_ether_addr(addr)) 789 if (is_broadcast_ether_addr(addr))
805 return priv->hw_setting.bcast_sta_id; 790 return priv->hw_params.bcast_sta_id;
806 791
807 spin_lock_irqsave(&priv->sta_lock, flags); 792 spin_lock_irqsave(&priv->sta_lock, flags);
808 for (i = start; i < priv->hw_setting.max_stations; i++) 793 for (i = start; i < priv->hw_params.max_stations; i++)
809 if ((priv->stations[i].used) && 794 if ((priv->stations_39[i].used) &&
810 (!compare_ether_addr 795 (!compare_ether_addr
811 (priv->stations[i].sta.sta.addr, addr))) { 796 (priv->stations_39[i].sta.sta.addr, addr))) {
812 ret = i; 797 ret = i;
813 goto out; 798 goto out;
814 } 799 }
815 800
816 IWL_DEBUG_INFO("can not find STA %pM (total %d)\n", 801 IWL_DEBUG_INFO(priv, "can not find STA %pM (total %d)\n",
817 addr, priv->num_stations); 802 addr, priv->num_stations);
818 out: 803 out:
819 spin_unlock_irqrestore(&priv->sta_lock, flags); 804 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -824,12 +809,10 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
824 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: 809 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
825 * 810 *
826*/ 811*/
827void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 812void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, struct iwl_cmd *cmd,
828 struct iwl3945_cmd *cmd,
829 struct ieee80211_tx_info *info, 813 struct ieee80211_tx_info *info,
830 struct ieee80211_hdr *hdr, int sta_id, int tx_id) 814 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
831{ 815{
832 unsigned long flags;
833 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; 816 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
834 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1); 817 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1);
835 u16 rate_mask; 818 u16 rate_mask;
@@ -838,25 +821,15 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
838 u8 data_retry_limit; 821 u8 data_retry_limit;
839 __le32 tx_flags; 822 __le32 tx_flags;
840 __le16 fc = hdr->frame_control; 823 __le16 fc = hdr->frame_control;
824 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
841 825
842 rate = iwl3945_rates[rate_index].plcp; 826 rate = iwl3945_rates[rate_index].plcp;
843 tx_flags = cmd->cmd.tx.tx_flags; 827 tx_flags = tx->tx_flags;
844 828
845 /* We need to figure out how to get the sta->supp_rates while 829 /* We need to figure out how to get the sta->supp_rates while
846 * in this running context */ 830 * in this running context */
847 rate_mask = IWL_RATES_MASK; 831 rate_mask = IWL_RATES_MASK;
848 832
849 spin_lock_irqsave(&priv->sta_lock, flags);
850
851 priv->stations[sta_id].current_rate.rate_n_flags = rate;
852
853 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
854 (sta_id != priv->hw_setting.bcast_sta_id) &&
855 (sta_id != IWL_MULTICAST_ID))
856 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
857
858 spin_unlock_irqrestore(&priv->sta_lock, flags);
859
860 if (tx_id >= IWL_CMD_QUEUE_NUM) 833 if (tx_id >= IWL_CMD_QUEUE_NUM)
861 rts_retry_limit = 3; 834 rts_retry_limit = 3;
862 else 835 else
@@ -888,25 +861,25 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
888 } 861 }
889 } 862 }
890 863
891 cmd->cmd.tx.rts_retry_limit = rts_retry_limit; 864 tx->rts_retry_limit = rts_retry_limit;
892 cmd->cmd.tx.data_retry_limit = data_retry_limit; 865 tx->data_retry_limit = data_retry_limit;
893 cmd->cmd.tx.rate = rate; 866 tx->rate = rate;
894 cmd->cmd.tx.tx_flags = tx_flags; 867 tx->tx_flags = tx_flags;
895 868
896 /* OFDM */ 869 /* OFDM */
897 cmd->cmd.tx.supp_rates[0] = 870 tx->supp_rates[0] =
898 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; 871 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
899 872
900 /* CCK */ 873 /* CCK */
901 cmd->cmd.tx.supp_rates[1] = (rate_mask & 0xF); 874 tx->supp_rates[1] = (rate_mask & 0xF);
902 875
903 IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " 876 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
904 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, 877 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
905 cmd->cmd.tx.rate, le32_to_cpu(cmd->cmd.tx.tx_flags), 878 tx->rate, le32_to_cpu(tx->tx_flags),
906 cmd->cmd.tx.supp_rates[1], cmd->cmd.tx.supp_rates[0]); 879 tx->supp_rates[1], tx->supp_rates[0]);
907} 880}
908 881
909u8 iwl3945_sync_sta(struct iwl3945_priv *priv, int sta_id, u16 tx_rate, u8 flags) 882u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
910{ 883{
911 unsigned long flags_spin; 884 unsigned long flags_spin;
912 struct iwl3945_station_entry *station; 885 struct iwl3945_station_entry *station;
@@ -915,138 +888,133 @@ u8 iwl3945_sync_sta(struct iwl3945_priv *priv, int sta_id, u16 tx_rate, u8 flags
915 return IWL_INVALID_STATION; 888 return IWL_INVALID_STATION;
916 889
917 spin_lock_irqsave(&priv->sta_lock, flags_spin); 890 spin_lock_irqsave(&priv->sta_lock, flags_spin);
918 station = &priv->stations[sta_id]; 891 station = &priv->stations_39[sta_id];
919 892
920 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 893 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
921 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 894 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
922 station->current_rate.rate_n_flags = tx_rate;
923 station->sta.mode = STA_CONTROL_MODIFY_MSK; 895 station->sta.mode = STA_CONTROL_MODIFY_MSK;
924 896
925 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 897 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
926 898
927 iwl3945_send_add_station(priv, &station->sta, flags); 899 iwl_send_add_sta(priv,
928 IWL_DEBUG_RATE("SCALE sync station %d to rate %d\n", 900 (struct iwl_addsta_cmd *)&station->sta, flags);
901 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
929 sta_id, tx_rate); 902 sta_id, tx_rate);
930 return sta_id; 903 return sta_id;
931} 904}
932 905
933static int iwl3945_nic_set_pwr_src(struct iwl3945_priv *priv, int pwr_max) 906static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
934{ 907{
935 int rc; 908 int ret;
936 unsigned long flags; 909 unsigned long flags;
937 910
938 spin_lock_irqsave(&priv->lock, flags); 911 spin_lock_irqsave(&priv->lock, flags);
939 rc = iwl3945_grab_nic_access(priv); 912 ret = iwl_grab_nic_access(priv);
940 if (rc) { 913 if (ret) {
941 spin_unlock_irqrestore(&priv->lock, flags); 914 spin_unlock_irqrestore(&priv->lock, flags);
942 return rc; 915 return ret;
943 } 916 }
944 917
945 if (!pwr_max) { 918 if (src == IWL_PWR_SRC_VAUX) {
946 u32 val; 919 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
947 920 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
948 rc = pci_read_config_dword(priv->pci_dev,
949 PCI_POWER_SOURCE, &val);
950 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
951 iwl3945_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
952 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 921 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
953 ~APMG_PS_CTRL_MSK_PWR_SRC); 922 ~APMG_PS_CTRL_MSK_PWR_SRC);
954 iwl3945_release_nic_access(priv); 923 iwl_release_nic_access(priv);
955 924
956 iwl3945_poll_bit(priv, CSR_GPIO_IN, 925 iwl_poll_bit(priv, CSR_GPIO_IN,
957 CSR_GPIO_IN_VAL_VAUX_PWR_SRC, 926 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
958 CSR_GPIO_IN_BIT_AUX_POWER, 5000); 927 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
959 } else 928 } else {
960 iwl3945_release_nic_access(priv); 929 iwl_release_nic_access(priv);
930 }
961 } else { 931 } else {
962 iwl3945_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 932 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
963 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 933 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
964 ~APMG_PS_CTRL_MSK_PWR_SRC); 934 ~APMG_PS_CTRL_MSK_PWR_SRC);
965 935
966 iwl3945_release_nic_access(priv); 936 iwl_release_nic_access(priv);
967 iwl3945_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, 937 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
968 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ 938 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
969 } 939 }
970 spin_unlock_irqrestore(&priv->lock, flags); 940 spin_unlock_irqrestore(&priv->lock, flags);
971 941
972 return rc; 942 return ret;
973} 943}
974 944
975static int iwl3945_rx_init(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq) 945static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
976{ 946{
977 int rc; 947 int rc;
978 unsigned long flags; 948 unsigned long flags;
979 949
980 spin_lock_irqsave(&priv->lock, flags); 950 spin_lock_irqsave(&priv->lock, flags);
981 rc = iwl3945_grab_nic_access(priv); 951 rc = iwl_grab_nic_access(priv);
982 if (rc) { 952 if (rc) {
983 spin_unlock_irqrestore(&priv->lock, flags); 953 spin_unlock_irqrestore(&priv->lock, flags);
984 return rc; 954 return rc;
985 } 955 }
986 956
987 iwl3945_write_direct32(priv, FH_RCSR_RBD_BASE(0), rxq->dma_addr); 957 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
988 iwl3945_write_direct32(priv, FH_RCSR_RPTR_ADDR(0), 958 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
989 priv->hw_setting.shared_phys + 959 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
990 offsetof(struct iwl3945_shared, rx_read_ptr[0])); 960 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
991 iwl3945_write_direct32(priv, FH_RCSR_WPTR(0), 0); 961 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
992 iwl3945_write_direct32(priv, FH_RCSR_CONFIG(0), 962 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
993 ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | 963 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
994 ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | 964 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
995 ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | 965 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
996 ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | 966 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
997 (RX_QUEUE_SIZE_LOG << ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | 967 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
998 ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | 968 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
999 (1 << ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
1000 ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
1001 969
1002 /* fake read to flush all prev I/O */ 970 /* fake read to flush all prev I/O */
1003 iwl3945_read_direct32(priv, FH_RSSR_CTRL); 971 iwl_read_direct32(priv, FH39_RSSR_CTRL);
1004 972
1005 iwl3945_release_nic_access(priv); 973 iwl_release_nic_access(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags); 974 spin_unlock_irqrestore(&priv->lock, flags);
1007 975
1008 return 0; 976 return 0;
1009} 977}
1010 978
1011static int iwl3945_tx_reset(struct iwl3945_priv *priv) 979static int iwl3945_tx_reset(struct iwl_priv *priv)
1012{ 980{
1013 int rc; 981 int rc;
1014 unsigned long flags; 982 unsigned long flags;
1015 983
1016 spin_lock_irqsave(&priv->lock, flags); 984 spin_lock_irqsave(&priv->lock, flags);
1017 rc = iwl3945_grab_nic_access(priv); 985 rc = iwl_grab_nic_access(priv);
1018 if (rc) { 986 if (rc) {
1019 spin_unlock_irqrestore(&priv->lock, flags); 987 spin_unlock_irqrestore(&priv->lock, flags);
1020 return rc; 988 return rc;
1021 } 989 }
1022 990
1023 /* bypass mode */ 991 /* bypass mode */
1024 iwl3945_write_prph(priv, ALM_SCD_MODE_REG, 0x2); 992 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
1025 993
1026 /* RA 0 is active */ 994 /* RA 0 is active */
1027 iwl3945_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); 995 iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
1028 996
1029 /* all 6 fifo are active */ 997 /* all 6 fifo are active */
1030 iwl3945_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); 998 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
1031 999
1032 iwl3945_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); 1000 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
1033 iwl3945_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); 1001 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
1034 iwl3945_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); 1002 iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
1035 iwl3945_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 1003 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
1036 1004
1037 iwl3945_write_direct32(priv, FH_TSSR_CBB_BASE, 1005 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
1038 priv->hw_setting.shared_phys); 1006 priv->shared_phys);
1039 1007
1040 iwl3945_write_direct32(priv, FH_TSSR_MSG_CONFIG, 1008 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
1041 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 1009 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
1042 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | 1010 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
1043 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | 1011 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
1044 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | 1012 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
1045 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | 1013 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
1046 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | 1014 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
1047 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); 1015 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
1048 1016
1049 iwl3945_release_nic_access(priv); 1017 iwl_release_nic_access(priv);
1050 spin_unlock_irqrestore(&priv->lock, flags); 1018 spin_unlock_irqrestore(&priv->lock, flags);
1051 1019
1052 return 0; 1020 return 0;
@@ -1057,7 +1025,7 @@ static int iwl3945_tx_reset(struct iwl3945_priv *priv)
1057 * 1025 *
1058 * Destroys all DMA structures and initialize them again 1026 * Destroys all DMA structures and initialize them again
1059 */ 1027 */
1060static int iwl3945_txq_ctx_reset(struct iwl3945_priv *priv) 1028static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
1061{ 1029{
1062 int rc; 1030 int rc;
1063 int txq_id, slots_num; 1031 int txq_id, slots_num;
@@ -1073,10 +1041,10 @@ static int iwl3945_txq_ctx_reset(struct iwl3945_priv *priv)
1073 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { 1041 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
1074 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 1042 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
1075 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1043 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
1076 rc = iwl3945_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 1044 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
1077 txq_id); 1045 txq_id);
1078 if (rc) { 1046 if (rc) {
1079 IWL_ERROR("Tx %d queue init failed\n", txq_id); 1047 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
1080 goto error; 1048 goto error;
1081 } 1049 }
1082 } 1050 }
@@ -1088,111 +1056,140 @@ static int iwl3945_txq_ctx_reset(struct iwl3945_priv *priv)
1088 return rc; 1056 return rc;
1089} 1057}
1090 1058
1091int iwl3945_hw_nic_init(struct iwl3945_priv *priv) 1059static int iwl3945_apm_init(struct iwl_priv *priv)
1092{ 1060{
1093 u8 rev_id; 1061 int ret = 0;
1094 int rc;
1095 unsigned long flags;
1096 struct iwl3945_rx_queue *rxq = &priv->rxq;
1097 1062
1098 iwl3945_power_init_handle(priv); 1063 iwl_power_initialize(priv);
1099 1064
1100 spin_lock_irqsave(&priv->lock, flags); 1065 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1101 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, CSR39_ANA_PLL_CFG_VAL); 1066 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1102 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1103 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1104
1105 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1106 rc = iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
1107 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1108 if (rc < 0) {
1109 spin_unlock_irqrestore(&priv->lock, flags);
1110 IWL_DEBUG_INFO("Failed to init the card\n");
1111 return rc;
1112 }
1113 1067
1114 rc = iwl3945_grab_nic_access(priv); 1068 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
1115 if (rc) { 1069 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1116 spin_unlock_irqrestore(&priv->lock, flags); 1070 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1117 return rc; 1071
1072 /* set "initialization complete" bit to move adapter
1073 * D0U* --> D0A* state */
1074 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1075
1076 iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1078 if (ret < 0) {
1079 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1080 goto out;
1118 } 1081 }
1119 iwl3945_write_prph(priv, APMG_CLK_EN_REG, 1082
1120 APMG_CLK_VAL_DMA_CLK_RQT | 1083 ret = iwl_grab_nic_access(priv);
1121 APMG_CLK_VAL_BSM_CLK_RQT); 1084 if (ret)
1085 goto out;
1086
1087 /* enable DMA */
1088 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
1089 APMG_CLK_VAL_BSM_CLK_RQT);
1090
1122 udelay(20); 1091 udelay(20);
1123 iwl3945_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1124 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1125 iwl3945_release_nic_access(priv);
1126 spin_unlock_irqrestore(&priv->lock, flags);
1127 1092
1128 /* Determine HW type */ 1093 /* disable L1-Active */
1129 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); 1094 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1130 if (rc) 1095 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1131 return rc; 1096
1132 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); 1097 iwl_release_nic_access(priv);
1098out:
1099 return ret;
1100}
1101
1102static void iwl3945_nic_config(struct iwl_priv *priv)
1103{
1104 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1105 unsigned long flags;
1106 u8 rev_id = 0;
1133 1107
1134 iwl3945_nic_set_pwr_src(priv, 1);
1135 spin_lock_irqsave(&priv->lock, flags); 1108 spin_lock_irqsave(&priv->lock, flags);
1136 1109
1137 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 1110 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
1138 IWL_DEBUG_INFO("RTP type \n"); 1111 IWL_DEBUG_INFO(priv, "RTP type \n");
1139 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 1112 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
1140 IWL_DEBUG_INFO("3945 RADIO-MB type\n"); 1113 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
1141 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1114 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1142 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); 1115 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
1143 } else { 1116 } else {
1144 IWL_DEBUG_INFO("3945 RADIO-MM type\n"); 1117 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
1145 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1118 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1146 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); 1119 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
1147 } 1120 }
1148 1121
1149 if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) { 1122 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
1150 IWL_DEBUG_INFO("SKU OP mode is mrc\n"); 1123 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
1151 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1124 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1152 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); 1125 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
1153 } else 1126 } else
1154 IWL_DEBUG_INFO("SKU OP mode is basic\n"); 1127 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
1155 1128
1156 if ((priv->eeprom.board_revision & 0xF0) == 0xD0) { 1129 if ((eeprom->board_revision & 0xF0) == 0xD0) {
1157 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", 1130 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
1158 priv->eeprom.board_revision); 1131 eeprom->board_revision);
1159 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1132 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1160 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 1133 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1161 } else { 1134 } else {
1162 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", 1135 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
1163 priv->eeprom.board_revision); 1136 eeprom->board_revision);
1164 iwl3945_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 1137 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
1165 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 1138 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1166 } 1139 }
1167 1140
1168 if (priv->eeprom.almgor_m_version <= 1) { 1141 if (eeprom->almgor_m_version <= 1) {
1169 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1142 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1170 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); 1143 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
1171 IWL_DEBUG_INFO("Card M type A version is 0x%X\n", 1144 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
1172 priv->eeprom.almgor_m_version); 1145 eeprom->almgor_m_version);
1173 } else { 1146 } else {
1174 IWL_DEBUG_INFO("Card M type B version is 0x%X\n", 1147 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
1175 priv->eeprom.almgor_m_version); 1148 eeprom->almgor_m_version);
1176 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1149 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1177 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); 1150 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
1178 } 1151 }
1179 spin_unlock_irqrestore(&priv->lock, flags); 1152 spin_unlock_irqrestore(&priv->lock, flags);
1180 1153
1181 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) 1154 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
1182 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); 1155 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
1156
1157 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
1158 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
1159}
1160
1161int iwl3945_hw_nic_init(struct iwl_priv *priv)
1162{
1163 u8 rev_id;
1164 int rc;
1165 unsigned long flags;
1166 struct iwl_rx_queue *rxq = &priv->rxq;
1167
1168 spin_lock_irqsave(&priv->lock, flags);
1169 priv->cfg->ops->lib->apm_ops.init(priv);
1170 spin_unlock_irqrestore(&priv->lock, flags);
1171
1172 /* Determine HW type */
1173 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
1174 if (rc)
1175 return rc;
1176 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
1177
1178 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
1179 if(rc)
1180 return rc;
1183 1181
1184 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) 1182 priv->cfg->ops->lib->apm_ops.config(priv);
1185 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
1186 1183
1187 /* Allocate the RX queue, or reset if it is already allocated */ 1184 /* Allocate the RX queue, or reset if it is already allocated */
1188 if (!rxq->bd) { 1185 if (!rxq->bd) {
1189 rc = iwl3945_rx_queue_alloc(priv); 1186 rc = iwl_rx_queue_alloc(priv);
1190 if (rc) { 1187 if (rc) {
1191 IWL_ERROR("Unable to initialize Rx queue\n"); 1188 IWL_ERR(priv, "Unable to initialize Rx queue\n");
1192 return -ENOMEM; 1189 return -ENOMEM;
1193 } 1190 }
1194 } else 1191 } else
1195 iwl3945_rx_queue_reset(priv, rxq); 1192 iwl_rx_queue_reset(priv, rxq);
1196 1193
1197 iwl3945_rx_replenish(priv); 1194 iwl3945_rx_replenish(priv);
1198 1195
@@ -1202,16 +1199,16 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1202 1199
1203 /* Look at using this instead: 1200 /* Look at using this instead:
1204 rxq->need_update = 1; 1201 rxq->need_update = 1;
1205 iwl3945_rx_queue_update_write_ptr(priv, rxq); 1202 iwl_rx_queue_update_write_ptr(priv, rxq);
1206 */ 1203 */
1207 1204
1208 rc = iwl3945_grab_nic_access(priv); 1205 rc = iwl_grab_nic_access(priv);
1209 if (rc) { 1206 if (rc) {
1210 spin_unlock_irqrestore(&priv->lock, flags); 1207 spin_unlock_irqrestore(&priv->lock, flags);
1211 return rc; 1208 return rc;
1212 } 1209 }
1213 iwl3945_write_direct32(priv, FH_RCSR_WPTR(0), rxq->write & ~7); 1210 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
1214 iwl3945_release_nic_access(priv); 1211 iwl_release_nic_access(priv);
1215 1212
1216 spin_unlock_irqrestore(&priv->lock, flags); 1213 spin_unlock_irqrestore(&priv->lock, flags);
1217 1214
@@ -1229,116 +1226,121 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1229 * 1226 *
1230 * Destroy all TX DMA queues and structures 1227 * Destroy all TX DMA queues and structures
1231 */ 1228 */
1232void iwl3945_hw_txq_ctx_free(struct iwl3945_priv *priv) 1229void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1233{ 1230{
1234 int txq_id; 1231 int txq_id;
1235 1232
1236 /* Tx queues */ 1233 /* Tx queues */
1237 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) 1234 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++)
1238 iwl3945_tx_queue_free(priv, &priv->txq[txq_id]); 1235 iwl_tx_queue_free(priv, txq_id);
1239} 1236}
1240 1237
1241void iwl3945_hw_txq_ctx_stop(struct iwl3945_priv *priv) 1238void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1242{ 1239{
1243 int queue; 1240 int txq_id;
1244 unsigned long flags; 1241 unsigned long flags;
1245 1242
1246 spin_lock_irqsave(&priv->lock, flags); 1243 spin_lock_irqsave(&priv->lock, flags);
1247 if (iwl3945_grab_nic_access(priv)) { 1244 if (iwl_grab_nic_access(priv)) {
1248 spin_unlock_irqrestore(&priv->lock, flags); 1245 spin_unlock_irqrestore(&priv->lock, flags);
1249 iwl3945_hw_txq_ctx_free(priv); 1246 iwl3945_hw_txq_ctx_free(priv);
1250 return; 1247 return;
1251 } 1248 }
1252 1249
1253 /* stop SCD */ 1250 /* stop SCD */
1254 iwl3945_write_prph(priv, ALM_SCD_MODE_REG, 0); 1251 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1255 1252
1256 /* reset TFD queues */ 1253 /* reset TFD queues */
1257 for (queue = TFD_QUEUE_MIN; queue < TFD_QUEUE_MAX; queue++) { 1254 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
1258 iwl3945_write_direct32(priv, FH_TCSR_CONFIG(queue), 0x0); 1255 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1259 iwl3945_poll_direct_bit(priv, FH_TSSR_TX_STATUS, 1256 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1260 ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(queue), 1257 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1261 1000); 1258 1000);
1262 } 1259 }
1263 1260
1264 iwl3945_release_nic_access(priv); 1261 iwl_release_nic_access(priv);
1265 spin_unlock_irqrestore(&priv->lock, flags); 1262 spin_unlock_irqrestore(&priv->lock, flags);
1266 1263
1267 iwl3945_hw_txq_ctx_free(priv); 1264 iwl3945_hw_txq_ctx_free(priv);
1268} 1265}
1269 1266
1270int iwl3945_hw_nic_stop_master(struct iwl3945_priv *priv) 1267static int iwl3945_apm_stop_master(struct iwl_priv *priv)
1271{ 1268{
1272 int rc = 0; 1269 int ret = 0;
1273 u32 reg_val;
1274 unsigned long flags; 1270 unsigned long flags;
1275 1271
1276 spin_lock_irqsave(&priv->lock, flags); 1272 spin_lock_irqsave(&priv->lock, flags);
1277 1273
1278 /* set stop master bit */ 1274 /* set stop master bit */
1279 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 1275 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1280 1276
1281 reg_val = iwl3945_read32(priv, CSR_GP_CNTRL); 1277 iwl_poll_direct_bit(priv, CSR_RESET,
1278 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1282 1279
1283 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == 1280 if (ret < 0)
1284 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) 1281 goto out;
1285 IWL_DEBUG_INFO("Card in power save, master is already "
1286 "stopped\n");
1287 else {
1288 rc = iwl3945_poll_direct_bit(priv, CSR_RESET,
1289 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1290 if (rc < 0) {
1291 spin_unlock_irqrestore(&priv->lock, flags);
1292 return rc;
1293 }
1294 }
1295 1282
1283out:
1296 spin_unlock_irqrestore(&priv->lock, flags); 1284 spin_unlock_irqrestore(&priv->lock, flags);
1297 IWL_DEBUG_INFO("stop master\n"); 1285 IWL_DEBUG_INFO(priv, "stop master\n");
1298 1286
1299 return rc; 1287 return ret;
1300} 1288}
1301 1289
1302int iwl3945_hw_nic_reset(struct iwl3945_priv *priv) 1290static void iwl3945_apm_stop(struct iwl_priv *priv)
1291{
1292 unsigned long flags;
1293
1294 iwl3945_apm_stop_master(priv);
1295
1296 spin_lock_irqsave(&priv->lock, flags);
1297
1298 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1299
1300 udelay(10);
1301 /* clear "init complete" move adapter D0A* --> D0U state */
1302 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1303 spin_unlock_irqrestore(&priv->lock, flags);
1304}
1305
1306static int iwl3945_apm_reset(struct iwl_priv *priv)
1303{ 1307{
1304 int rc; 1308 int rc;
1305 unsigned long flags; 1309 unsigned long flags;
1306 1310
1307 iwl3945_hw_nic_stop_master(priv); 1311 iwl3945_apm_stop_master(priv);
1308 1312
1309 spin_lock_irqsave(&priv->lock, flags); 1313 spin_lock_irqsave(&priv->lock, flags);
1310 1314
1311 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1315 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1316 udelay(10);
1317
1318 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1312 1319
1313 iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL, 1320 iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1314 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 1321 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1315 1322
1316 rc = iwl3945_grab_nic_access(priv); 1323 rc = iwl_grab_nic_access(priv);
1317 if (!rc) { 1324 if (!rc) {
1318 iwl3945_write_prph(priv, APMG_CLK_CTRL_REG, 1325 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1319 APMG_CLK_VAL_BSM_CLK_RQT); 1326 APMG_CLK_VAL_BSM_CLK_RQT);
1320 1327
1321 udelay(10); 1328 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1322 1329 iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
1323 iwl3945_set_bit(priv, CSR_GP_CNTRL,
1324 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1325
1326 iwl3945_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1327 iwl3945_write_prph(priv, APMG_RTC_INT_STT_REG,
1328 0xFFFFFFFF); 1330 0xFFFFFFFF);
1329 1331
1330 /* enable DMA */ 1332 /* enable DMA */
1331 iwl3945_write_prph(priv, APMG_CLK_EN_REG, 1333 iwl_write_prph(priv, APMG_CLK_EN_REG,
1332 APMG_CLK_VAL_DMA_CLK_RQT | 1334 APMG_CLK_VAL_DMA_CLK_RQT |
1333 APMG_CLK_VAL_BSM_CLK_RQT); 1335 APMG_CLK_VAL_BSM_CLK_RQT);
1334 udelay(10); 1336 udelay(10);
1335 1337
1336 iwl3945_set_bits_prph(priv, APMG_PS_CTRL_REG, 1338 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
1337 APMG_PS_CTRL_VAL_RESET_REQ); 1339 APMG_PS_CTRL_VAL_RESET_REQ);
1338 udelay(5); 1340 udelay(5);
1339 iwl3945_clear_bits_prph(priv, APMG_PS_CTRL_REG, 1341 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
1340 APMG_PS_CTRL_VAL_RESET_REQ); 1342 APMG_PS_CTRL_VAL_RESET_REQ);
1341 iwl3945_release_nic_access(priv); 1343 iwl_release_nic_access(priv);
1342 } 1344 }
1343 1345
1344 /* Clear the 'host command active' bit... */ 1346 /* Clear the 'host command active' bit... */
@@ -1367,33 +1369,34 @@ static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1367 return ((temperature < -260) || (temperature > 25)) ? 1 : 0; 1369 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1368} 1370}
1369 1371
1370int iwl3945_hw_get_temperature(struct iwl3945_priv *priv) 1372int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1371{ 1373{
1372 return iwl3945_read32(priv, CSR_UCODE_DRV_GP2); 1374 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1373} 1375}
1374 1376
1375/** 1377/**
1376 * iwl3945_hw_reg_txpower_get_temperature 1378 * iwl3945_hw_reg_txpower_get_temperature
1377 * get the current temperature by reading from NIC 1379 * get the current temperature by reading from NIC
1378*/ 1380*/
1379static int iwl3945_hw_reg_txpower_get_temperature(struct iwl3945_priv *priv) 1381static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1380{ 1382{
1383 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1381 int temperature; 1384 int temperature;
1382 1385
1383 temperature = iwl3945_hw_get_temperature(priv); 1386 temperature = iwl3945_hw_get_temperature(priv);
1384 1387
1385 /* driver's okay range is -260 to +25. 1388 /* driver's okay range is -260 to +25.
1386 * human readable okay range is 0 to +285 */ 1389 * human readable okay range is 0 to +285 */
1387 IWL_DEBUG_INFO("Temperature: %d\n", temperature + IWL_TEMP_CONVERT); 1390 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1388 1391
1389 /* handle insane temp reading */ 1392 /* handle insane temp reading */
1390 if (iwl3945_hw_reg_temp_out_of_range(temperature)) { 1393 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1391 IWL_ERROR("Error bad temperature value %d\n", temperature); 1394 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1392 1395
1393 /* if really really hot(?), 1396 /* if really really hot(?),
1394 * substitute the 3rd band/group's temp measured at factory */ 1397 * substitute the 3rd band/group's temp measured at factory */
1395 if (priv->last_temperature > 100) 1398 if (priv->last_temperature > 100)
1396 temperature = priv->eeprom.groups[2].temperature; 1399 temperature = eeprom->groups[2].temperature;
1397 else /* else use most recent "sane" value from driver */ 1400 else /* else use most recent "sane" value from driver */
1398 temperature = priv->last_temperature; 1401 temperature = priv->last_temperature;
1399 } 1402 }
@@ -1412,7 +1415,7 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl3945_priv *priv)
1412 * records new temperature in tx_mgr->temperature. 1415 * records new temperature in tx_mgr->temperature.
1413 * replaces tx_mgr->last_temperature *only* if calib needed 1416 * replaces tx_mgr->last_temperature *only* if calib needed
1414 * (assumes caller will actually do the calibration!). */ 1417 * (assumes caller will actually do the calibration!). */
1415static int is_temp_calib_needed(struct iwl3945_priv *priv) 1418static int is_temp_calib_needed(struct iwl_priv *priv)
1416{ 1419{
1417 int temp_diff; 1420 int temp_diff;
1418 1421
@@ -1421,20 +1424,20 @@ static int is_temp_calib_needed(struct iwl3945_priv *priv)
1421 1424
1422 /* get absolute value */ 1425 /* get absolute value */
1423 if (temp_diff < 0) { 1426 if (temp_diff < 0) {
1424 IWL_DEBUG_POWER("Getting cooler, delta %d,\n", temp_diff); 1427 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1425 temp_diff = -temp_diff; 1428 temp_diff = -temp_diff;
1426 } else if (temp_diff == 0) 1429 } else if (temp_diff == 0)
1427 IWL_DEBUG_POWER("Same temp,\n"); 1430 IWL_DEBUG_POWER(priv, "Same temp,\n");
1428 else 1431 else
1429 IWL_DEBUG_POWER("Getting warmer, delta %d,\n", temp_diff); 1432 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1430 1433
1431 /* if we don't need calibration, *don't* update last_temperature */ 1434 /* if we don't need calibration, *don't* update last_temperature */
1432 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { 1435 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1433 IWL_DEBUG_POWER("Timed thermal calib not needed\n"); 1436 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1434 return 0; 1437 return 0;
1435 } 1438 }
1436 1439
1437 IWL_DEBUG_POWER("Timed thermal calib needed\n"); 1440 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1438 1441
1439 /* assume that caller will actually do calib ... 1442 /* assume that caller will actually do calib ...
1440 * update the "last temperature" value */ 1443 * update the "last temperature" value */
@@ -1627,9 +1630,9 @@ static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1627 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) 1630 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1628 * or 6 Mbit (OFDM) rates. 1631 * or 6 Mbit (OFDM) rates.
1629 */ 1632 */
1630static void iwl3945_hw_reg_set_scan_power(struct iwl3945_priv *priv, u32 scan_tbl_index, 1633static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1631 s32 rate_index, const s8 *clip_pwrs, 1634 s32 rate_index, const s8 *clip_pwrs,
1632 struct iwl3945_channel_info *ch_info, 1635 struct iwl_channel_info *ch_info,
1633 int band_index) 1636 int band_index)
1634{ 1637{
1635 struct iwl3945_scan_power_info *scan_power_info; 1638 struct iwl3945_scan_power_info *scan_power_info;
@@ -1646,7 +1649,7 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl3945_priv *priv, u32 scan_tb
1646 /* further limit to user's max power preference. 1649 /* further limit to user's max power preference.
1647 * FIXME: Other spectrum management power limitations do not 1650 * FIXME: Other spectrum management power limitations do not
1648 * seem to apply?? */ 1651 * seem to apply?? */
1649 power = min(power, priv->user_txpower_limit); 1652 power = min(power, priv->tx_power_user_lmt);
1650 scan_power_info->requested_power = power; 1653 scan_power_info->requested_power = power;
1651 1654
1652 /* find difference between new scan *power* and current "normal" 1655 /* find difference between new scan *power* and current "normal"
@@ -1678,32 +1681,32 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl3945_priv *priv, u32 scan_tb
1678} 1681}
1679 1682
1680/** 1683/**
1681 * iwl3945_hw_reg_send_txpower - fill in Tx Power command with gain settings 1684 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1682 * 1685 *
1683 * Configures power settings for all rates for the current channel, 1686 * Configures power settings for all rates for the current channel,
1684 * using values from channel info struct, and send to NIC 1687 * using values from channel info struct, and send to NIC
1685 */ 1688 */
1686int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv) 1689static int iwl3945_send_tx_power(struct iwl_priv *priv)
1687{ 1690{
1688 int rate_idx, i; 1691 int rate_idx, i;
1689 const struct iwl3945_channel_info *ch_info = NULL; 1692 const struct iwl_channel_info *ch_info = NULL;
1690 struct iwl3945_txpowertable_cmd txpower = { 1693 struct iwl3945_txpowertable_cmd txpower = {
1691 .channel = priv->active_rxon.channel, 1694 .channel = priv->active_rxon.channel,
1692 }; 1695 };
1693 1696
1694 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1697 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1695 ch_info = iwl3945_get_channel_info(priv, 1698 ch_info = iwl_get_channel_info(priv,
1696 priv->band, 1699 priv->band,
1697 le16_to_cpu(priv->active_rxon.channel)); 1700 le16_to_cpu(priv->active_rxon.channel));
1698 if (!ch_info) { 1701 if (!ch_info) {
1699 IWL_ERROR 1702 IWL_ERR(priv,
1700 ("Failed to get channel info for channel %d [%d]\n", 1703 "Failed to get channel info for channel %d [%d]\n",
1701 le16_to_cpu(priv->active_rxon.channel), priv->band); 1704 le16_to_cpu(priv->active_rxon.channel), priv->band);
1702 return -EINVAL; 1705 return -EINVAL;
1703 } 1706 }
1704 1707
1705 if (!is_channel_valid(ch_info)) { 1708 if (!is_channel_valid(ch_info)) {
1706 IWL_DEBUG_POWER("Not calling TX_PWR_TABLE_CMD on " 1709 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1707 "non-Tx channel.\n"); 1710 "non-Tx channel.\n");
1708 return 0; 1711 return 0;
1709 } 1712 }
@@ -1711,12 +1714,12 @@ int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv)
1711 /* fill cmd with power settings for all rates for current channel */ 1714 /* fill cmd with power settings for all rates for current channel */
1712 /* Fill OFDM rate */ 1715 /* Fill OFDM rate */
1713 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; 1716 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1714 rate_idx <= IWL_LAST_OFDM_RATE; rate_idx++, i++) { 1717 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1715 1718
1716 txpower.power[i].tpc = ch_info->power_info[i].tpc; 1719 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1717 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; 1720 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1718 1721
1719 IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", 1722 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1720 le16_to_cpu(txpower.channel), 1723 le16_to_cpu(txpower.channel),
1721 txpower.band, 1724 txpower.band,
1722 txpower.power[i].tpc.tx_gain, 1725 txpower.power[i].tpc.tx_gain,
@@ -1729,7 +1732,7 @@ int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv)
1729 txpower.power[i].tpc = ch_info->power_info[i].tpc; 1732 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1730 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; 1733 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1731 1734
1732 IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", 1735 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1733 le16_to_cpu(txpower.channel), 1736 le16_to_cpu(txpower.channel),
1734 txpower.band, 1737 txpower.band,
1735 txpower.power[i].tpc.tx_gain, 1738 txpower.power[i].tpc.tx_gain,
@@ -1737,8 +1740,9 @@ int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv)
1737 txpower.power[i].rate); 1740 txpower.power[i].rate);
1738 } 1741 }
1739 1742
1740 return iwl3945_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, 1743 return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1741 sizeof(struct iwl3945_txpowertable_cmd), &txpower); 1744 sizeof(struct iwl3945_txpowertable_cmd),
1745 &txpower);
1742 1746
1743} 1747}
1744 1748
@@ -1758,8 +1762,8 @@ int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv)
1758 * properly fill out the scan powers, and actual h/w gain settings, 1762 * properly fill out the scan powers, and actual h/w gain settings,
1759 * and send changes to NIC 1763 * and send changes to NIC
1760 */ 1764 */
1761static int iwl3945_hw_reg_set_new_power(struct iwl3945_priv *priv, 1765static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1762 struct iwl3945_channel_info *ch_info) 1766 struct iwl_channel_info *ch_info)
1763{ 1767{
1764 struct iwl3945_channel_power_info *power_info; 1768 struct iwl3945_channel_power_info *power_info;
1765 int power_changed = 0; 1769 int power_changed = 0;
@@ -1768,7 +1772,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl3945_priv *priv,
1768 int power; 1772 int power;
1769 1773
1770 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1774 /* Get this chnlgrp's rate-to-max/clip-powers table */
1771 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; 1775 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
1772 1776
1773 /* Get this channel's rate-to-current-power settings table */ 1777 /* Get this channel's rate-to-current-power settings table */
1774 power_info = ch_info->power_info; 1778 power_info = ch_info->power_info;
@@ -1821,7 +1825,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl3945_priv *priv,
1821 * based strictly on regulatory (eeprom and spectrum mgt) limitations 1825 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1822 * (no consideration for h/w clipping limitations). 1826 * (no consideration for h/w clipping limitations).
1823 */ 1827 */
1824static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl3945_channel_info *ch_info) 1828static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1825{ 1829{
1826 s8 max_power; 1830 s8 max_power;
1827 1831
@@ -1849,9 +1853,10 @@ static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl3945_channel_info *ch_i
1849 * 1853 *
1850 * If RxOn is "associated", this sends the new Txpower to NIC! 1854 * If RxOn is "associated", this sends the new Txpower to NIC!
1851 */ 1855 */
1852static int iwl3945_hw_reg_comp_txpower_temp(struct iwl3945_priv *priv) 1856static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1853{ 1857{
1854 struct iwl3945_channel_info *ch_info = NULL; 1858 struct iwl_channel_info *ch_info = NULL;
1859 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1855 int delta_index; 1860 int delta_index;
1856 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ 1861 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1857 u8 a_band; 1862 u8 a_band;
@@ -1867,7 +1872,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl3945_priv *priv)
1867 a_band = is_channel_a_band(ch_info); 1872 a_band = is_channel_a_band(ch_info);
1868 1873
1869 /* Get this chnlgrp's factory calibration temperature */ 1874 /* Get this chnlgrp's factory calibration temperature */
1870 ref_temp = (s16)priv->eeprom.groups[ch_info->group_index]. 1875 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1871 temperature; 1876 temperature;
1872 1877
1873 /* get power index adjustment based on current and factory 1878 /* get power index adjustment based on current and factory
@@ -1893,7 +1898,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl3945_priv *priv)
1893 } 1898 }
1894 1899
1895 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1900 /* Get this chnlgrp's rate-to-max/clip-powers table */
1896 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; 1901 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
1897 1902
1898 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ 1903 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1899 for (scan_tbl_index = 0; 1904 for (scan_tbl_index = 0;
@@ -1907,24 +1912,24 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl3945_priv *priv)
1907 } 1912 }
1908 1913
1909 /* send Txpower command for current channel to ucode */ 1914 /* send Txpower command for current channel to ucode */
1910 return iwl3945_hw_reg_send_txpower(priv); 1915 return priv->cfg->ops->lib->send_tx_power(priv);
1911} 1916}
1912 1917
1913int iwl3945_hw_reg_set_txpower(struct iwl3945_priv *priv, s8 power) 1918int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1914{ 1919{
1915 struct iwl3945_channel_info *ch_info; 1920 struct iwl_channel_info *ch_info;
1916 s8 max_power; 1921 s8 max_power;
1917 u8 a_band; 1922 u8 a_band;
1918 u8 i; 1923 u8 i;
1919 1924
1920 if (priv->user_txpower_limit == power) { 1925 if (priv->tx_power_user_lmt == power) {
1921 IWL_DEBUG_POWER("Requested Tx power same as current " 1926 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1922 "limit: %ddBm.\n", power); 1927 "limit: %ddBm.\n", power);
1923 return 0; 1928 return 0;
1924 } 1929 }
1925 1930
1926 IWL_DEBUG_POWER("Setting upper limit clamp to %ddBm.\n", power); 1931 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1927 priv->user_txpower_limit = power; 1932 priv->tx_power_user_lmt = power;
1928 1933
1929 /* set up new Tx powers for each and every channel, 2.4 and 5.x */ 1934 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1930 1935
@@ -1953,7 +1958,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl3945_priv *priv, s8 power)
1953} 1958}
1954 1959
1955/* will add 3945 channel switch cmd handling later */ 1960/* will add 3945 channel switch cmd handling later */
1956int iwl3945_hw_channel_switch(struct iwl3945_priv *priv, u16 channel) 1961int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1957{ 1962{
1958 return 0; 1963 return 0;
1959} 1964}
@@ -1968,7 +1973,7 @@ int iwl3945_hw_channel_switch(struct iwl3945_priv *priv, u16 channel)
1968 * -- send new set of gain settings to NIC 1973 * -- send new set of gain settings to NIC
1969 * NOTE: This should continue working, even when we're not associated, 1974 * NOTE: This should continue working, even when we're not associated,
1970 * so we can keep our internal table of scan powers current. */ 1975 * so we can keep our internal table of scan powers current. */
1971void iwl3945_reg_txpower_periodic(struct iwl3945_priv *priv) 1976void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1972{ 1977{
1973 /* This will kick in the "brute force" 1978 /* This will kick in the "brute force"
1974 * iwl3945_hw_reg_comp_txpower_temp() below */ 1979 * iwl3945_hw_reg_comp_txpower_temp() below */
@@ -1987,7 +1992,7 @@ void iwl3945_reg_txpower_periodic(struct iwl3945_priv *priv)
1987 1992
1988static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) 1993static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1989{ 1994{
1990 struct iwl3945_priv *priv = container_of(work, struct iwl3945_priv, 1995 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1991 thermal_periodic.work); 1996 thermal_periodic.work);
1992 1997
1993 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1998 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -2009,10 +2014,11 @@ static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
2009 * on A-band, EEPROM's "group frequency" entries represent the top 2014 * on A-band, EEPROM's "group frequency" entries represent the top
2010 * channel in each group 1-4. Group 5 All B/G channels are in group 0. 2015 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
2011 */ 2016 */
2012static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl3945_priv *priv, 2017static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
2013 const struct iwl3945_channel_info *ch_info) 2018 const struct iwl_channel_info *ch_info)
2014{ 2019{
2015 struct iwl3945_eeprom_txpower_group *ch_grp = &priv->eeprom.groups[0]; 2020 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2021 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
2016 u8 group; 2022 u8 group;
2017 u16 group_index = 0; /* based on factory calib frequencies */ 2023 u16 group_index = 0; /* based on factory calib frequencies */
2018 u8 grp_channel; 2024 u8 grp_channel;
@@ -2032,7 +2038,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl3945_priv *priv,
2032 } else 2038 } else
2033 group_index = 0; /* 2.4 GHz, group 0 */ 2039 group_index = 0; /* 2.4 GHz, group 0 */
2034 2040
2035 IWL_DEBUG_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, 2041 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
2036 group_index); 2042 group_index);
2037 return group_index; 2043 return group_index;
2038} 2044}
@@ -2043,11 +2049,12 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl3945_priv *priv,
2043 * Interpolate to get nominal (i.e. at factory calibration temperature) index 2049 * Interpolate to get nominal (i.e. at factory calibration temperature) index
2044 * into radio/DSP gain settings table for requested power. 2050 * into radio/DSP gain settings table for requested power.
2045 */ 2051 */
2046static int iwl3945_hw_reg_get_matched_power_index(struct iwl3945_priv *priv, 2052static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
2047 s8 requested_power, 2053 s8 requested_power,
2048 s32 setting_index, s32 *new_index) 2054 s32 setting_index, s32 *new_index)
2049{ 2055{
2050 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; 2056 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
2057 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2051 s32 index0, index1; 2058 s32 index0, index1;
2052 s32 power = 2 * requested_power; 2059 s32 power = 2 * requested_power;
2053 s32 i; 2060 s32 i;
@@ -2056,7 +2063,7 @@ static int iwl3945_hw_reg_get_matched_power_index(struct iwl3945_priv *priv,
2056 s32 res; 2063 s32 res;
2057 s32 denominator; 2064 s32 denominator;
2058 2065
2059 chnl_grp = &priv->eeprom.groups[setting_index]; 2066 chnl_grp = &eeprom->groups[setting_index];
2060 samples = chnl_grp->samples; 2067 samples = chnl_grp->samples;
2061 for (i = 0; i < 5; i++) { 2068 for (i = 0; i < 5; i++) {
2062 if (power == samples[i].power) { 2069 if (power == samples[i].power) {
@@ -2091,22 +2098,23 @@ static int iwl3945_hw_reg_get_matched_power_index(struct iwl3945_priv *priv,
2091 return 0; 2098 return 0;
2092} 2099}
2093 2100
2094static void iwl3945_hw_reg_init_channel_groups(struct iwl3945_priv *priv) 2101static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
2095{ 2102{
2096 u32 i; 2103 u32 i;
2097 s32 rate_index; 2104 s32 rate_index;
2105 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2098 const struct iwl3945_eeprom_txpower_group *group; 2106 const struct iwl3945_eeprom_txpower_group *group;
2099 2107
2100 IWL_DEBUG_POWER("Initializing factory calib info from EEPROM\n"); 2108 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
2101 2109
2102 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { 2110 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
2103 s8 *clip_pwrs; /* table of power levels for each rate */ 2111 s8 *clip_pwrs; /* table of power levels for each rate */
2104 s8 satur_pwr; /* saturation power for each chnl group */ 2112 s8 satur_pwr; /* saturation power for each chnl group */
2105 group = &priv->eeprom.groups[i]; 2113 group = &eeprom->groups[i];
2106 2114
2107 /* sanity check on factory saturation power value */ 2115 /* sanity check on factory saturation power value */
2108 if (group->saturation_power < 40) { 2116 if (group->saturation_power < 40) {
2109 IWL_WARNING("Error: saturation power is %d, " 2117 IWL_WARN(priv, "Error: saturation power is %d, "
2110 "less than minimum expected 40\n", 2118 "less than minimum expected 40\n",
2111 group->saturation_power); 2119 group->saturation_power);
2112 return; 2120 return;
@@ -2121,7 +2129,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl3945_priv *priv)
2121 * power peaks, without too much distortion (clipping). 2129 * power peaks, without too much distortion (clipping).
2122 */ 2130 */
2123 /* we'll fill in this array with h/w max power levels */ 2131 /* we'll fill in this array with h/w max power levels */
2124 clip_pwrs = (s8 *) priv->clip_groups[i].clip_powers; 2132 clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers;
2125 2133
2126 /* divide factory saturation power by 2 to find -3dB level */ 2134 /* divide factory saturation power by 2 to find -3dB level */
2127 satur_pwr = (s8) (group->saturation_power >> 1); 2135 satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2171,10 +2179,11 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl3945_priv *priv)
2171 * 2179 *
2172 * This does *not* write values to NIC, just sets up our internal table. 2180 * This does *not* write values to NIC, just sets up our internal table.
2173 */ 2181 */
2174int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv) 2182int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2175{ 2183{
2176 struct iwl3945_channel_info *ch_info = NULL; 2184 struct iwl_channel_info *ch_info = NULL;
2177 struct iwl3945_channel_power_info *pwr_info; 2185 struct iwl3945_channel_power_info *pwr_info;
2186 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2178 int delta_index; 2187 int delta_index;
2179 u8 rate_index; 2188 u8 rate_index;
2180 u8 scan_tbl_index; 2189 u8 scan_tbl_index;
@@ -2204,15 +2213,15 @@ int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv)
2204 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); 2213 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2205 2214
2206 /* Get this chnlgrp's rate->max/clip-powers table */ 2215 /* Get this chnlgrp's rate->max/clip-powers table */
2207 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; 2216 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
2208 2217
2209 /* calculate power index *adjustment* value according to 2218 /* calculate power index *adjustment* value according to
2210 * diff between current temperature and factory temperature */ 2219 * diff between current temperature and factory temperature */
2211 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, 2220 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2212 priv->eeprom.groups[ch_info->group_index]. 2221 eeprom->groups[ch_info->group_index].
2213 temperature); 2222 temperature);
2214 2223
2215 IWL_DEBUG_POWER("Delta index for channel %d: %d [%d]\n", 2224 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2216 ch_info->channel, delta_index, temperature + 2225 ch_info->channel, delta_index, temperature +
2217 IWL_TEMP_CONVERT); 2226 IWL_TEMP_CONVERT);
2218 2227
@@ -2235,7 +2244,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv)
2235 ch_info->group_index, 2244 ch_info->group_index,
2236 &power_idx); 2245 &power_idx);
2237 if (rc) { 2246 if (rc) {
2238 IWL_ERROR("Invalid power index\n"); 2247 IWL_ERR(priv, "Invalid power index\n");
2239 return rc; 2248 return rc;
2240 } 2249 }
2241 pwr_info->base_power_index = (u8) power_idx; 2250 pwr_info->base_power_index = (u8) power_idx;
@@ -2295,75 +2304,90 @@ int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv)
2295 return 0; 2304 return 0;
2296} 2305}
2297 2306
2298int iwl3945_hw_rxq_stop(struct iwl3945_priv *priv) 2307int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2299{ 2308{
2300 int rc; 2309 int rc;
2301 unsigned long flags; 2310 unsigned long flags;
2302 2311
2303 spin_lock_irqsave(&priv->lock, flags); 2312 spin_lock_irqsave(&priv->lock, flags);
2304 rc = iwl3945_grab_nic_access(priv); 2313 rc = iwl_grab_nic_access(priv);
2305 if (rc) { 2314 if (rc) {
2306 spin_unlock_irqrestore(&priv->lock, flags); 2315 spin_unlock_irqrestore(&priv->lock, flags);
2307 return rc; 2316 return rc;
2308 } 2317 }
2309 2318
2310 iwl3945_write_direct32(priv, FH_RCSR_CONFIG(0), 0); 2319 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2311 rc = iwl3945_poll_direct_bit(priv, FH_RSSR_STATUS, 2320 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2312 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 2321 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2313 if (rc < 0) 2322 if (rc < 0)
2314 IWL_ERROR("Can't stop Rx DMA.\n"); 2323 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2315 2324
2316 iwl3945_release_nic_access(priv); 2325 iwl_release_nic_access(priv);
2317 spin_unlock_irqrestore(&priv->lock, flags); 2326 spin_unlock_irqrestore(&priv->lock, flags);
2318 2327
2319 return 0; 2328 return 0;
2320} 2329}
2321 2330
2322int iwl3945_hw_tx_queue_init(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq) 2331int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2323{ 2332{
2324 int rc; 2333 int rc;
2325 unsigned long flags; 2334 unsigned long flags;
2326 int txq_id = txq->q.id; 2335 int txq_id = txq->q.id;
2327 2336
2328 struct iwl3945_shared *shared_data = priv->hw_setting.shared_virt; 2337 struct iwl3945_shared *shared_data = priv->shared_virt;
2329 2338
2330 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2339 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2331 2340
2332 spin_lock_irqsave(&priv->lock, flags); 2341 spin_lock_irqsave(&priv->lock, flags);
2333 rc = iwl3945_grab_nic_access(priv); 2342 rc = iwl_grab_nic_access(priv);
2334 if (rc) { 2343 if (rc) {
2335 spin_unlock_irqrestore(&priv->lock, flags); 2344 spin_unlock_irqrestore(&priv->lock, flags);
2336 return rc; 2345 return rc;
2337 } 2346 }
2338 iwl3945_write_direct32(priv, FH_CBCC_CTRL(txq_id), 0); 2347 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2339 iwl3945_write_direct32(priv, FH_CBCC_BASE(txq_id), 0); 2348 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2340 2349
2341 iwl3945_write_direct32(priv, FH_TCSR_CONFIG(txq_id), 2350 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2342 ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | 2351 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2343 ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | 2352 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2344 ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | 2353 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2345 ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | 2354 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2346 ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); 2355 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2347 iwl3945_release_nic_access(priv); 2356 iwl_release_nic_access(priv);
2348 2357
2349 /* fake read to flush all prev. writes */ 2358 /* fake read to flush all prev. writes */
2350 iwl3945_read32(priv, FH_TSSR_CBB_BASE); 2359 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2351 spin_unlock_irqrestore(&priv->lock, flags); 2360 spin_unlock_irqrestore(&priv->lock, flags);
2352 2361
2353 return 0; 2362 return 0;
2354} 2363}
2355 2364
2356int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv) 2365/*
2366 * HCMD utils
2367 */
2368static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2357{ 2369{
2358 struct iwl3945_shared *shared_data = priv->hw_setting.shared_virt; 2370 switch (cmd_id) {
2371 case REPLY_RXON:
2372 return sizeof(struct iwl3945_rxon_cmd);
2373 case POWER_TABLE_CMD:
2374 return sizeof(struct iwl3945_powertable_cmd);
2375 default:
2376 return len;
2377 }
2378}
2359 2379
2360 return le32_to_cpu(shared_data->rx_read_ptr[0]); 2380static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2381{
2382 u16 size = (u16)sizeof(struct iwl3945_addsta_cmd);
2383 memcpy(data, cmd, size);
2384 return size;
2361} 2385}
2362 2386
2363/** 2387/**
2364 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table 2388 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2365 */ 2389 */
2366int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv) 2390int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2367{ 2391{
2368 int rc, i, index, prev_index; 2392 int rc, i, index, prev_index;
2369 struct iwl3945_rate_scaling_cmd rate_cmd = { 2393 struct iwl3945_rate_scaling_cmd rate_cmd = {
@@ -2384,7 +2408,7 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2384 2408
2385 switch (priv->band) { 2409 switch (priv->band) {
2386 case IEEE80211_BAND_5GHZ: 2410 case IEEE80211_BAND_5GHZ:
2387 IWL_DEBUG_RATE("Select A mode rate scale\n"); 2411 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2388 /* If one of the following CCK rates is used, 2412 /* If one of the following CCK rates is used,
2389 * have it fall back to the 6M OFDM rate */ 2413 * have it fall back to the 6M OFDM rate */
2390 for (i = IWL_RATE_1M_INDEX_TABLE; 2414 for (i = IWL_RATE_1M_INDEX_TABLE;
@@ -2402,12 +2426,12 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2402 break; 2426 break;
2403 2427
2404 case IEEE80211_BAND_2GHZ: 2428 case IEEE80211_BAND_2GHZ:
2405 IWL_DEBUG_RATE("Select B/G mode rate scale\n"); 2429 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2406 /* If an OFDM rate is used, have it fall back to the 2430 /* If an OFDM rate is used, have it fall back to the
2407 * 1M CCK rates */ 2431 * 1M CCK rates */
2408 2432
2409 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 2433 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2410 iwl3945_is_associated(priv)) { 2434 iwl_is_associated(priv)) {
2411 2435
2412 index = IWL_FIRST_CCK_RATE; 2436 index = IWL_FIRST_CCK_RATE;
2413 for (i = IWL_RATE_6M_INDEX_TABLE; 2437 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2428,47 +2452,48 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2428 2452
2429 /* Update the rate scaling for control frame Tx */ 2453 /* Update the rate scaling for control frame Tx */
2430 rate_cmd.table_id = 0; 2454 rate_cmd.table_id = 0;
2431 rc = iwl3945_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2455 rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2432 &rate_cmd); 2456 &rate_cmd);
2433 if (rc) 2457 if (rc)
2434 return rc; 2458 return rc;
2435 2459
2436 /* Update the rate scaling for data frame Tx */ 2460 /* Update the rate scaling for data frame Tx */
2437 rate_cmd.table_id = 1; 2461 rate_cmd.table_id = 1;
2438 return iwl3945_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2462 return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2439 &rate_cmd); 2463 &rate_cmd);
2440} 2464}
2441 2465
2442/* Called when initializing driver */ 2466/* Called when initializing driver */
2443int iwl3945_hw_set_hw_setting(struct iwl3945_priv *priv) 2467int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2444{ 2468{
2445 memset((void *)&priv->hw_setting, 0, 2469 memset((void *)&priv->hw_params, 0,
2446 sizeof(struct iwl3945_driver_hw_info)); 2470 sizeof(struct iwl_hw_params));
2447 2471
2448 priv->hw_setting.shared_virt = 2472 priv->shared_virt =
2449 pci_alloc_consistent(priv->pci_dev, 2473 pci_alloc_consistent(priv->pci_dev,
2450 sizeof(struct iwl3945_shared), 2474 sizeof(struct iwl3945_shared),
2451 &priv->hw_setting.shared_phys); 2475 &priv->shared_phys);
2452 2476
2453 if (!priv->hw_setting.shared_virt) { 2477 if (!priv->shared_virt) {
2454 IWL_ERROR("failed to allocate pci memory\n"); 2478 IWL_ERR(priv, "failed to allocate pci memory\n");
2455 mutex_unlock(&priv->mutex); 2479 mutex_unlock(&priv->mutex);
2456 return -ENOMEM; 2480 return -ENOMEM;
2457 } 2481 }
2458 2482
2459 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE; 2483 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2460 priv->hw_setting.max_pkt_size = 2342; 2484 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
2461 priv->hw_setting.tx_cmd_len = sizeof(struct iwl3945_tx_cmd); 2485 priv->hw_params.max_pkt_size = 2342;
2462 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; 2486 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2463 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; 2487 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2464 priv->hw_setting.max_stations = IWL3945_STATION_COUNT; 2488 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2465 priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID; 2489 priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID;
2490
2491 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2466 2492
2467 priv->hw_setting.tx_ant_num = 2;
2468 return 0; 2493 return 0;
2469} 2494}
2470 2495
2471unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv, 2496unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2472 struct iwl3945_frame *frame, u8 rate) 2497 struct iwl3945_frame *frame, u8 rate)
2473{ 2498{
2474 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; 2499 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
@@ -2477,7 +2502,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2477 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; 2502 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2478 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 2503 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2479 2504
2480 tx_beacon_cmd->tx.sta_id = priv->hw_setting.bcast_sta_id; 2505 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
2481 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2506 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2482 2507
2483 frame_size = iwl3945_fill_beacon_frame(priv, 2508 frame_size = iwl3945_fill_beacon_frame(priv,
@@ -2501,37 +2526,261 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2501 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; 2526 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2502} 2527}
2503 2528
2504void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv) 2529void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2505{ 2530{
2506 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; 2531 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2507 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; 2532 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2508} 2533}
2509 2534
2510void iwl3945_hw_setup_deferred_work(struct iwl3945_priv *priv) 2535void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2511{ 2536{
2512 INIT_DELAYED_WORK(&priv->thermal_periodic, 2537 INIT_DELAYED_WORK(&priv->thermal_periodic,
2513 iwl3945_bg_reg_txpower_periodic); 2538 iwl3945_bg_reg_txpower_periodic);
2514} 2539}
2515 2540
2516void iwl3945_hw_cancel_deferred_work(struct iwl3945_priv *priv) 2541void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2517{ 2542{
2518 cancel_delayed_work(&priv->thermal_periodic); 2543 cancel_delayed_work(&priv->thermal_periodic);
2519} 2544}
2520 2545
2521static struct iwl_3945_cfg iwl3945_bg_cfg = { 2546/* check contents of special bootstrap uCode SRAM */
2547static int iwl3945_verify_bsm(struct iwl_priv *priv)
2548 {
2549 __le32 *image = priv->ucode_boot.v_addr;
2550 u32 len = priv->ucode_boot.len;
2551 u32 reg;
2552 u32 val;
2553
2554 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2555
2556 /* verify BSM SRAM contents */
2557 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
2558 for (reg = BSM_SRAM_LOWER_BOUND;
2559 reg < BSM_SRAM_LOWER_BOUND + len;
2560 reg += sizeof(u32), image++) {
2561 val = iwl_read_prph(priv, reg);
2562 if (val != le32_to_cpu(*image)) {
2563 IWL_ERR(priv, "BSM uCode verification failed at "
2564 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2565 BSM_SRAM_LOWER_BOUND,
2566 reg - BSM_SRAM_LOWER_BOUND, len,
2567 val, le32_to_cpu(*image));
2568 return -EIO;
2569 }
2570 }
2571
2572 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2573
2574 return 0;
2575}
2576
2577
2578/******************************************************************************
2579 *
2580 * EEPROM related functions
2581 *
2582 ******************************************************************************/
2583
2584/*
2585 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2586 * embedded controller) as EEPROM reader; each read is a series of pulses
2587 * to/from the EEPROM chip, not a single event, so even reads could conflict
2588 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2589 * simply claims ownership, which should be safe when this function is called
2590 * (i.e. before loading uCode!).
2591 */
2592static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2593{
2594 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2595 return 0;
2596}
2597
2598
2599static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2600{
2601 return;
2602}
2603
2604 /**
2605 * iwl3945_load_bsm - Load bootstrap instructions
2606 *
2607 * BSM operation:
2608 *
2609 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2610 * in special SRAM that does not power down during RFKILL. When powering back
2611 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2612 * the bootstrap program into the on-board processor, and starts it.
2613 *
2614 * The bootstrap program loads (via DMA) instructions and data for a new
2615 * program from host DRAM locations indicated by the host driver in the
2616 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2617 * automatically.
2618 *
2619 * When initializing the NIC, the host driver points the BSM to the
2620 * "initialize" uCode image. This uCode sets up some internal data, then
2621 * notifies host via "initialize alive" that it is complete.
2622 *
2623 * The host then replaces the BSM_DRAM_* pointer values to point to the
2624 * normal runtime uCode instructions and a backup uCode data cache buffer
2625 * (filled initially with starting data values for the on-board processor),
2626 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2627 * which begins normal operation.
2628 *
2629 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2630 * the backup data cache in DRAM before SRAM is powered down.
2631 *
2632 * When powering back up, the BSM loads the bootstrap program. This reloads
2633 * the runtime uCode instructions and the backup data cache into SRAM,
2634 * and re-launches the runtime uCode from where it left off.
2635 */
2636static int iwl3945_load_bsm(struct iwl_priv *priv)
2637{
2638 __le32 *image = priv->ucode_boot.v_addr;
2639 u32 len = priv->ucode_boot.len;
2640 dma_addr_t pinst;
2641 dma_addr_t pdata;
2642 u32 inst_len;
2643 u32 data_len;
2644 int rc;
2645 int i;
2646 u32 done;
2647 u32 reg_offset;
2648
2649 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2650
2651 /* make sure bootstrap program is no larger than BSM's SRAM size */
2652 if (len > IWL39_MAX_BSM_SIZE)
2653 return -EINVAL;
2654
2655 /* Tell bootstrap uCode where to find the "Initialize" uCode
2656 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2657 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2658 * after the "initialize" uCode has run, to point to
2659 * runtime/protocol instructions and backup data cache. */
2660 pinst = priv->ucode_init.p_addr;
2661 pdata = priv->ucode_init_data.p_addr;
2662 inst_len = priv->ucode_init.len;
2663 data_len = priv->ucode_init_data.len;
2664
2665 rc = iwl_grab_nic_access(priv);
2666 if (rc)
2667 return rc;
2668
2669 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2670 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2671 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2672 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2673
2674 /* Fill BSM memory with bootstrap instructions */
2675 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2676 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2677 reg_offset += sizeof(u32), image++)
2678 _iwl_write_prph(priv, reg_offset,
2679 le32_to_cpu(*image));
2680
2681 rc = iwl3945_verify_bsm(priv);
2682 if (rc) {
2683 iwl_release_nic_access(priv);
2684 return rc;
2685 }
2686
2687 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2688 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2689 iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
2690 IWL39_RTC_INST_LOWER_BOUND);
2691 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2692
2693 /* Load bootstrap code into instruction SRAM now,
2694 * to prepare to load "initialize" uCode */
2695 iwl_write_prph(priv, BSM_WR_CTRL_REG,
2696 BSM_WR_CTRL_REG_BIT_START);
2697
2698 /* Wait for load of bootstrap uCode to finish */
2699 for (i = 0; i < 100; i++) {
2700 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
2701 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2702 break;
2703 udelay(10);
2704 }
2705 if (i < 100)
2706 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2707 else {
2708 IWL_ERR(priv, "BSM write did not complete!\n");
2709 return -EIO;
2710 }
2711
2712 /* Enable future boot loads whenever power management unit triggers it
2713 * (e.g. when powering back up after power-save shutdown) */
2714 iwl_write_prph(priv, BSM_WR_CTRL_REG,
2715 BSM_WR_CTRL_REG_BIT_START_EN);
2716
2717 iwl_release_nic_access(priv);
2718
2719 return 0;
2720}
2721
2722static struct iwl_lib_ops iwl3945_lib = {
2723 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2724 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2725 .txq_init = iwl3945_hw_tx_queue_init,
2726 .load_ucode = iwl3945_load_bsm,
2727 .apm_ops = {
2728 .init = iwl3945_apm_init,
2729 .reset = iwl3945_apm_reset,
2730 .stop = iwl3945_apm_stop,
2731 .config = iwl3945_nic_config,
2732 .set_pwr_src = iwl3945_set_pwr_src,
2733 },
2734 .eeprom_ops = {
2735 .regulatory_bands = {
2736 EEPROM_REGULATORY_BAND_1_CHANNELS,
2737 EEPROM_REGULATORY_BAND_2_CHANNELS,
2738 EEPROM_REGULATORY_BAND_3_CHANNELS,
2739 EEPROM_REGULATORY_BAND_4_CHANNELS,
2740 EEPROM_REGULATORY_BAND_5_CHANNELS,
2741 EEPROM_REGULATORY_BAND_NO_FAT,
2742 EEPROM_REGULATORY_BAND_NO_FAT,
2743 },
2744 .verify_signature = iwlcore_eeprom_verify_signature,
2745 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2746 .release_semaphore = iwl3945_eeprom_release_semaphore,
2747 .query_addr = iwlcore_eeprom_query_addr,
2748 },
2749 .send_tx_power = iwl3945_send_tx_power,
2750};
2751
2752static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2753 .get_hcmd_size = iwl3945_get_hcmd_size,
2754 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2755};
2756
2757static struct iwl_ops iwl3945_ops = {
2758 .lib = &iwl3945_lib,
2759 .utils = &iwl3945_hcmd_utils,
2760};
2761
2762static struct iwl_cfg iwl3945_bg_cfg = {
2522 .name = "3945BG", 2763 .name = "3945BG",
2523 .fw_name_pre = IWL3945_FW_PRE, 2764 .fw_name_pre = IWL3945_FW_PRE,
2524 .ucode_api_max = IWL3945_UCODE_API_MAX, 2765 .ucode_api_max = IWL3945_UCODE_API_MAX,
2525 .ucode_api_min = IWL3945_UCODE_API_MIN, 2766 .ucode_api_min = IWL3945_UCODE_API_MIN,
2526 .sku = IWL_SKU_G, 2767 .sku = IWL_SKU_G,
2768 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2769 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2770 .ops = &iwl3945_ops,
2771 .mod_params = &iwl3945_mod_params
2527}; 2772};
2528 2773
2529static struct iwl_3945_cfg iwl3945_abg_cfg = { 2774static struct iwl_cfg iwl3945_abg_cfg = {
2530 .name = "3945ABG", 2775 .name = "3945ABG",
2531 .fw_name_pre = IWL3945_FW_PRE, 2776 .fw_name_pre = IWL3945_FW_PRE,
2532 .ucode_api_max = IWL3945_UCODE_API_MAX, 2777 .ucode_api_max = IWL3945_UCODE_API_MAX,
2533 .ucode_api_min = IWL3945_UCODE_API_MIN, 2778 .ucode_api_min = IWL3945_UCODE_API_MIN,
2534 .sku = IWL_SKU_A|IWL_SKU_G, 2779 .sku = IWL_SKU_A|IWL_SKU_G,
2780 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2781 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2782 .ops = &iwl3945_ops,
2783 .mod_params = &iwl3945_mod_params
2535}; 2784};
2536 2785
2537struct pci_device_id iwl3945_hw_card_ids[] = { 2786struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 2c0ddc5110c..ab7aaf6872c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -43,11 +43,13 @@
43/* Hardware specific file defines the PCI IDs table for that hardware module */ 43/* Hardware specific file defines the PCI IDs table for that hardware module */
44extern struct pci_device_id iwl3945_hw_card_ids[]; 44extern struct pci_device_id iwl3945_hw_card_ids[];
45 45
46#define DRV_NAME "iwl3945"
47#include "iwl-csr.h" 46#include "iwl-csr.h"
48#include "iwl-prph.h" 47#include "iwl-prph.h"
48#include "iwl-fh.h"
49#include "iwl-3945-hw.h" 49#include "iwl-3945-hw.h"
50#include "iwl-3945-debug.h" 50#include "iwl-debug.h"
51#include "iwl-power.h"
52#include "iwl-dev.h"
51#include "iwl-3945-led.h" 53#include "iwl-3945-led.h"
52 54
53/* Highest firmware API version supported */ 55/* Highest firmware API version supported */
@@ -74,8 +76,7 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
74#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) 76#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
75 77
76/* Module parameters accessible from iwl-*.c */ 78/* Module parameters accessible from iwl-*.c */
77extern int iwl3945_param_hwcrypto; 79extern struct iwl_mod_params iwl3945_mod_params;
78extern int iwl3945_param_queues_num;
79 80
80struct iwl3945_sta_priv { 81struct iwl3945_sta_priv {
81 struct iwl3945_rs_sta *rs_sta; 82 struct iwl3945_rs_sta *rs_sta;
@@ -95,7 +96,6 @@ enum iwl3945_antenna {
95 * else RTS for data/management frames where MPDU is larger 96 * else RTS for data/management frames where MPDU is larger
96 * than RTS value. 97 * than RTS value.
97 */ 98 */
98#define IWL_RX_BUF_SIZE 3000U
99#define DEFAULT_RTS_THRESHOLD 2347U 99#define DEFAULT_RTS_THRESHOLD 2347U
100#define MIN_RTS_THRESHOLD 0U 100#define MIN_RTS_THRESHOLD 0U
101#define MAX_RTS_THRESHOLD 2347U 101#define MAX_RTS_THRESHOLD 2347U
@@ -105,136 +105,7 @@ enum iwl3945_antenna {
105#define DEFAULT_SHORT_RETRY_LIMIT 7U 105#define DEFAULT_SHORT_RETRY_LIMIT 7U
106#define DEFAULT_LONG_RETRY_LIMIT 4U 106#define DEFAULT_LONG_RETRY_LIMIT 4U
107 107
108struct iwl3945_rx_mem_buffer { 108#include "iwl-agn-rs.h"
109 dma_addr_t dma_addr;
110 struct sk_buff *skb;
111 struct list_head list;
112};
113
114/*
115 * Generic queue structure
116 *
117 * Contains common data for Rx and Tx queues
118 */
119struct iwl3945_queue {
120 int n_bd; /* number of BDs in this queue */
121 int write_ptr; /* 1-st empty entry (index) host_w*/
122 int read_ptr; /* last used entry (index) host_r*/
123 dma_addr_t dma_addr; /* physical addr for BD's */
124 int n_window; /* safe queue window */
125 u32 id;
126 int low_mark; /* low watermark, resume queue if free
127 * space more than this */
128 int high_mark; /* high watermark, stop queue if free
129 * space less than this */
130} __attribute__ ((packed));
131
132int iwl3945_queue_space(const struct iwl3945_queue *q);
133int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i);
134
135#define MAX_NUM_OF_TBS (20)
136
137/* One for each TFD */
138struct iwl3945_tx_info {
139 struct sk_buff *skb[MAX_NUM_OF_TBS];
140};
141
142/**
143 * struct iwl3945_tx_queue - Tx Queue for DMA
144 * @q: generic Rx/Tx queue descriptor
145 * @bd: base of circular buffer of TFDs
146 * @cmd: array of command/Tx buffers
147 * @dma_addr_cmd: physical address of cmd/tx buffer array
148 * @txb: array of per-TFD driver data
149 * @need_update: indicates need to update read/write index
150 *
151 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
152 * descriptors) and required locking structures.
153 */
154struct iwl3945_tx_queue {
155 struct iwl3945_queue q;
156 struct iwl3945_tfd_frame *bd;
157 struct iwl3945_cmd *cmd;
158 dma_addr_t dma_addr_cmd;
159 struct iwl3945_tx_info *txb;
160 int need_update;
161 int active;
162};
163
164#define IWL_NUM_SCAN_RATES (2)
165
166struct iwl3945_channel_tgd_info {
167 u8 type;
168 s8 max_power;
169};
170
171struct iwl3945_channel_tgh_info {
172 s64 last_radar_time;
173};
174
175/* current Tx power values to use, one for each rate for each channel.
176 * requested power is limited by:
177 * -- regulatory EEPROM limits for this channel
178 * -- hardware capabilities (clip-powers)
179 * -- spectrum management
180 * -- user preference (e.g. iwconfig)
181 * when requested power is set, base power index must also be set. */
182struct iwl3945_channel_power_info {
183 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
184 s8 power_table_index; /* actual (compenst'd) index into gain table */
185 s8 base_power_index; /* gain index for power at factory temp. */
186 s8 requested_power; /* power (dBm) requested for this chnl/rate */
187};
188
189/* current scan Tx power values to use, one for each scan rate for each
190 * channel. */
191struct iwl3945_scan_power_info {
192 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
193 s8 power_table_index; /* actual (compenst'd) index into gain table */
194 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
195};
196
197/*
198 * One for each channel, holds all channel setup data
199 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
200 * with one another!
201 */
202#define IWL4965_MAX_RATE (33)
203
204struct iwl3945_channel_info {
205 struct iwl3945_channel_tgd_info tgd;
206 struct iwl3945_channel_tgh_info tgh;
207 struct iwl3945_eeprom_channel eeprom; /* EEPROM regulatory limit */
208 struct iwl3945_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for
209 * FAT channel */
210
211 u8 channel; /* channel number */
212 u8 flags; /* flags copied from EEPROM */
213 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
214 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */
215 s8 min_power; /* always 0 */
216 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
217
218 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
219 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
220 enum ieee80211_band band;
221
222 /* Radio/DSP gain settings for each "normal" data Tx rate.
223 * These include, in addition to RF and DSP gain, a few fields for
224 * remembering/modifying gain settings (indexes). */
225 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
226
227 /* Radio/DSP gain settings for each scan rate, for directed scans. */
228 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
229};
230
231struct iwl3945_clip_group {
232 /* maximum power level to prevent clipping for each rate, derived by
233 * us from this band's saturation power in EEPROM */
234 const s8 clip_powers[IWL_MAX_RATES];
235};
236
237#include "iwl-3945-rs.h"
238 109
239#define IWL_TX_FIFO_AC0 0 110#define IWL_TX_FIFO_AC0 0
240#define IWL_TX_FIFO_AC1 1 111#define IWL_TX_FIFO_AC1 1
@@ -247,33 +118,6 @@ struct iwl3945_clip_group {
247/* Minimum number of queues. MAX_NUM is defined in hw specific files */ 118/* Minimum number of queues. MAX_NUM is defined in hw specific files */
248#define IWL_MIN_NUM_QUEUES 4 119#define IWL_MIN_NUM_QUEUES 4
249 120
250/* Power management (not Tx power) structures */
251
252struct iwl3945_power_vec_entry {
253 struct iwl3945_powertable_cmd cmd;
254 u8 no_dtim;
255};
256#define IWL_POWER_RANGE_0 (0)
257#define IWL_POWER_RANGE_1 (1)
258
259#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
260#define IWL_POWER_INDEX_3 0x03
261#define IWL_POWER_INDEX_5 0x05
262#define IWL_POWER_AC 0x06
263#define IWL_POWER_BATTERY 0x07
264#define IWL_POWER_LIMIT 0x07
265#define IWL_POWER_MASK 0x0F
266#define IWL_POWER_ENABLED 0x10
267#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK)
268
269struct iwl3945_power_mgr {
270 spinlock_t lock;
271 struct iwl3945_power_vec_entry pwr_range_0[IWL_POWER_AC];
272 struct iwl3945_power_vec_entry pwr_range_1[IWL_POWER_AC];
273 u8 active_index;
274 u32 dtim_val;
275};
276
277#define IEEE80211_DATA_LEN 2304 121#define IEEE80211_DATA_LEN 2304
278#define IEEE80211_4ADDR_LEN 30 122#define IEEE80211_4ADDR_LEN 30
279#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 123#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
@@ -289,81 +133,10 @@ struct iwl3945_frame {
289 struct list_head list; 133 struct list_head list;
290}; 134};
291 135
292#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
293#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
294#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
295#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
296#define SEQ_HUGE_FRAME (0x4000)
297#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
298#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 136#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
299#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 137#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
300#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 138#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
301 139
302enum {
303 /* CMD_SIZE_NORMAL = 0, */
304 CMD_SIZE_HUGE = (1 << 0),
305 /* CMD_SYNC = 0, */
306 CMD_ASYNC = (1 << 1),
307 /* CMD_NO_SKB = 0, */
308 CMD_WANT_SKB = (1 << 2),
309};
310
311struct iwl3945_cmd;
312struct iwl3945_priv;
313
314struct iwl3945_cmd_meta {
315 struct iwl3945_cmd_meta *source;
316 union {
317 struct sk_buff *skb;
318 int (*callback)(struct iwl3945_priv *priv,
319 struct iwl3945_cmd *cmd, struct sk_buff *skb);
320 } __attribute__ ((packed)) u;
321
322 /* The CMD_SIZE_HUGE flag bit indicates that the command
323 * structure is stored at the end of the shared queue memory. */
324 u32 flags;
325
326} __attribute__ ((packed));
327
328/**
329 * struct iwl3945_cmd
330 *
331 * For allocation of the command and tx queues, this establishes the overall
332 * size of the largest command we send to uCode, except for a scan command
333 * (which is relatively huge; space is allocated separately).
334 */
335struct iwl3945_cmd {
336 struct iwl3945_cmd_meta meta;
337 struct iwl3945_cmd_header hdr;
338 union {
339 struct iwl3945_addsta_cmd addsta;
340 struct iwl3945_led_cmd led;
341 u32 flags;
342 u8 val8;
343 u16 val16;
344 u32 val32;
345 struct iwl3945_bt_cmd bt;
346 struct iwl3945_rxon_time_cmd rxon_time;
347 struct iwl3945_powertable_cmd powertable;
348 struct iwl3945_qosparam_cmd qosparam;
349 struct iwl3945_tx_cmd tx;
350 struct iwl3945_tx_beacon_cmd tx_beacon;
351 struct iwl3945_rxon_assoc_cmd rxon_assoc;
352 u8 *indirect;
353 u8 payload[360];
354 } __attribute__ ((packed)) cmd;
355} __attribute__ ((packed));
356
357struct iwl3945_host_cmd {
358 u8 id;
359 u16 len;
360 struct iwl3945_cmd_meta meta;
361 const void *data;
362};
363
364#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl3945_cmd) - \
365 sizeof(struct iwl3945_cmd_meta))
366
367/* 140/*
368 * RX related structures and functions 141 * RX related structures and functions
369 */ 142 */
@@ -374,33 +147,6 @@ struct iwl3945_host_cmd {
374#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 147#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
375#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 148#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
376 149
377/**
378 * struct iwl3945_rx_queue - Rx queue
379 * @processed: Internal index to last handled Rx packet
380 * @read: Shared index to newest available Rx buffer
381 * @write: Shared index to oldest written Rx packet
382 * @free_count: Number of pre-allocated buffers in rx_free
383 * @rx_free: list of free SKBs for use
384 * @rx_used: List of Rx buffers with no SKB
385 * @need_update: flag to indicate we need to update read/write index
386 *
387 * NOTE: rx_free and rx_used are used as a FIFO for iwl3945_rx_mem_buffers
388 */
389struct iwl3945_rx_queue {
390 __le32 *bd;
391 dma_addr_t dma_addr;
392 struct iwl3945_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
393 struct iwl3945_rx_mem_buffer *queue[RX_QUEUE_SIZE];
394 u32 processed;
395 u32 read;
396 u32 write;
397 u32 free_count;
398 struct list_head rx_free;
399 struct list_head rx_used;
400 int need_update;
401 spinlock_t lock;
402};
403
404#define IWL_SUPPORTED_RATES_IE_LEN 8 150#define IWL_SUPPORTED_RATES_IE_LEN 8
405 151
406#define SCAN_INTERVAL 100 152#define SCAN_INTERVAL 100
@@ -430,87 +176,9 @@ struct iwl3945_rx_queue {
430#define IWL_INVALID_RATE 0xFF 176#define IWL_INVALID_RATE 0xFF
431#define IWL_INVALID_VALUE -1 177#define IWL_INVALID_VALUE -1
432 178
433struct iwl3945_tid_data {
434 u16 seq_number;
435};
436
437struct iwl3945_hw_key {
438 enum ieee80211_key_alg alg;
439 int keylen;
440 u8 key[32];
441};
442
443union iwl3945_ht_rate_supp {
444 u16 rates;
445 struct {
446 u8 siso_rate;
447 u8 mimo_rate;
448 };
449};
450
451union iwl3945_qos_capabity {
452 struct {
453 u8 edca_count:4; /* bit 0-3 */
454 u8 q_ack:1; /* bit 4 */
455 u8 queue_request:1; /* bit 5 */
456 u8 txop_request:1; /* bit 6 */
457 u8 reserved:1; /* bit 7 */
458 } q_AP;
459 struct {
460 u8 acvo_APSD:1; /* bit 0 */
461 u8 acvi_APSD:1; /* bit 1 */
462 u8 ac_bk_APSD:1; /* bit 2 */
463 u8 ac_be_APSD:1; /* bit 3 */
464 u8 q_ack:1; /* bit 4 */
465 u8 max_len:2; /* bit 5-6 */
466 u8 more_data_ack:1; /* bit 7 */
467 } q_STA;
468 u8 val;
469};
470
471/* QoS structures */
472struct iwl3945_qos_info {
473 int qos_active;
474 union iwl3945_qos_capabity qos_cap;
475 struct iwl3945_qosparam_cmd def_qos_parm;
476};
477
478#define STA_PS_STATUS_WAKE 0 179#define STA_PS_STATUS_WAKE 0
479#define STA_PS_STATUS_SLEEP 1 180#define STA_PS_STATUS_SLEEP 1
480 181
481struct iwl3945_station_entry {
482 struct iwl3945_addsta_cmd sta;
483 struct iwl3945_tid_data tid[MAX_TID_COUNT];
484 union {
485 struct {
486 u8 rate;
487 u8 flags;
488 } s;
489 u16 rate_n_flags;
490 } current_rate;
491 u8 used;
492 u8 ps_status;
493 struct iwl3945_hw_key keyinfo;
494};
495
496/* one for each uCode image (inst/data, boot/init/runtime) */
497struct fw_desc {
498 void *v_addr; /* access by driver */
499 dma_addr_t p_addr; /* access by card's busmaster DMA */
500 u32 len; /* bytes */
501};
502
503/* uCode file layout */
504struct iwl3945_ucode {
505 __le32 ver; /* major/minor/API/serial */
506 __le32 inst_size; /* bytes of runtime instructions */
507 __le32 data_size; /* bytes of runtime data */
508 __le32 init_size; /* bytes of initialization instructions */
509 __le32 init_data_size; /* bytes of initialization data */
510 __le32 boot_size; /* bytes of bootstrap instructions */
511 u8 data[0]; /* data in same order as "size" elements */
512};
513
514struct iwl3945_ibss_seq { 182struct iwl3945_ibss_seq {
515 u8 mac[ETH_ALEN]; 183 u8 mac[ETH_ALEN];
516 u16 seq_num; 184 u16 seq_num;
@@ -519,34 +187,6 @@ struct iwl3945_ibss_seq {
519 struct list_head list; 187 struct list_head list;
520}; 188};
521 189
522/**
523 * struct iwl3945_driver_hw_info
524 * @max_txq_num: Max # Tx queues supported
525 * @tx_cmd_len: Size of Tx command (but not including frame itself)
526 * @tx_ant_num: Number of TX antennas
527 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
528 * @rx_buf_size:
529 * @max_pkt_size:
530 * @max_rxq_log: Log-base-2 of max_rxq_size
531 * @max_stations:
532 * @bcast_sta_id:
533 * @shared_virt: Pointer to driver/uCode shared Tx Byte Counts and Rx status
534 * @shared_phys: Physical Pointer to Tx Byte Counts and Rx status
535 */
536struct iwl3945_driver_hw_info {
537 u16 max_txq_num;
538 u16 tx_cmd_len;
539 u16 tx_ant_num;
540 u16 max_rxq_size;
541 u32 rx_buf_size;
542 u32 max_pkt_size;
543 u16 max_rxq_log;
544 u8 max_stations;
545 u8 bcast_sta_id;
546 void *shared_virt;
547 dma_addr_t shared_phys;
548};
549
550#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\ 190#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
551 x->u.rx_frame.stats.payload + \ 191 x->u.rx_frame.stats.payload + \
552 x->u.rx_frame.stats.phy_count)) 192 x->u.rx_frame.stats.phy_count))
@@ -564,40 +204,30 @@ struct iwl3945_driver_hw_info {
564 * 204 *
565 *****************************************************************************/ 205 *****************************************************************************/
566struct iwl3945_addsta_cmd; 206struct iwl3945_addsta_cmd;
567extern int iwl3945_send_add_station(struct iwl3945_priv *priv, 207extern int iwl3945_send_add_station(struct iwl_priv *priv,
568 struct iwl3945_addsta_cmd *sta, u8 flags); 208 struct iwl3945_addsta_cmd *sta, u8 flags);
569extern u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *bssid, 209extern u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *bssid,
570 int is_ap, u8 flags); 210 int is_ap, u8 flags);
571extern int iwl3945_power_init_handle(struct iwl3945_priv *priv); 211extern int iwl3945_power_init_handle(struct iwl_priv *priv);
572extern int iwl3945_eeprom_init(struct iwl3945_priv *priv); 212extern int iwl3945_eeprom_init(struct iwl_priv *priv);
573extern int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv);
574extern void iwl3945_rx_queue_reset(struct iwl3945_priv *priv,
575 struct iwl3945_rx_queue *rxq);
576extern int iwl3945_calc_db_from_ratio(int sig_ratio); 213extern int iwl3945_calc_db_from_ratio(int sig_ratio);
577extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm); 214extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
578extern int iwl3945_tx_queue_init(struct iwl3945_priv *priv, 215extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
579 struct iwl3945_tx_queue *txq, int count, u32 id); 216 struct iwl_tx_queue *txq, int count, u32 id);
580extern void iwl3945_rx_replenish(void *data); 217extern void iwl3945_rx_replenish(void *data);
581extern void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq); 218extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
582extern int iwl3945_send_cmd_pdu(struct iwl3945_priv *priv, u8 id, u16 len, 219extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
583 const void *data); 220 const void *data);
584extern int __must_check iwl3945_send_cmd(struct iwl3945_priv *priv, 221extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
585 struct iwl3945_host_cmd *cmd); 222 struct iwl_host_cmd *cmd);
586extern unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv, 223extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
587 struct ieee80211_hdr *hdr,int left); 224 struct ieee80211_hdr *hdr,int left);
588extern int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv,
589 struct iwl3945_rx_queue *q);
590extern int iwl3945_send_statistics_request(struct iwl3945_priv *priv);
591extern void iwl3945_set_decrypted_flag(struct iwl3945_priv *priv, struct sk_buff *skb,
592 u32 decrypt_res,
593 struct ieee80211_rx_status *stats);
594extern const u8 iwl3945_broadcast_addr[ETH_ALEN];
595 225
596/* 226/*
597 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't 227 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
598 * call this... todo... fix that. 228 * call this... todo... fix that.
599*/ 229*/
600extern u8 iwl3945_sync_station(struct iwl3945_priv *priv, int sta_id, 230extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id,
601 u16 tx_rate, u8 flags); 231 u16 tx_rate, u8 flags);
602 232
603/****************************************************************************** 233/******************************************************************************
@@ -616,36 +246,37 @@ extern u8 iwl3945_sync_station(struct iwl3945_priv *priv, int sta_id,
616 * iwl3945_mac_ <-- mac80211 callback 246 * iwl3945_mac_ <-- mac80211 callback
617 * 247 *
618 ****************************************************************************/ 248 ****************************************************************************/
619extern void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv); 249extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
620extern void iwl3945_hw_setup_deferred_work(struct iwl3945_priv *priv); 250extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
621extern void iwl3945_hw_cancel_deferred_work(struct iwl3945_priv *priv); 251extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
622extern int iwl3945_hw_rxq_stop(struct iwl3945_priv *priv); 252extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
623extern int iwl3945_hw_set_hw_setting(struct iwl3945_priv *priv); 253extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
624extern int iwl3945_hw_nic_init(struct iwl3945_priv *priv); 254extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
625extern int iwl3945_hw_nic_stop_master(struct iwl3945_priv *priv); 255extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
626extern void iwl3945_hw_txq_ctx_free(struct iwl3945_priv *priv); 256extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
627extern void iwl3945_hw_txq_ctx_stop(struct iwl3945_priv *priv); 257extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
628extern int iwl3945_hw_nic_reset(struct iwl3945_priv *priv); 258extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
629extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *tfd, 259extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
630 dma_addr_t addr, u16 len); 260 struct iwl_tx_queue *txq,
631extern int iwl3945_hw_txq_free_tfd(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq); 261 dma_addr_t addr, u16 len,
632extern int iwl3945_hw_get_temperature(struct iwl3945_priv *priv); 262 u8 reset, u8 pad);
633extern int iwl3945_hw_tx_queue_init(struct iwl3945_priv *priv, 263extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
634 struct iwl3945_tx_queue *txq); 264 struct iwl_tx_queue *txq);
635extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv, 265extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
266extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
267 struct iwl_tx_queue *txq);
268extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
636 struct iwl3945_frame *frame, u8 rate); 269 struct iwl3945_frame *frame, u8 rate);
637extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv); 270void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, struct iwl_cmd *cmd,
638extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
639 struct iwl3945_cmd *cmd,
640 struct ieee80211_tx_info *info, 271 struct ieee80211_tx_info *info,
641 struct ieee80211_hdr *hdr, 272 struct ieee80211_hdr *hdr,
642 int sta_id, int tx_id); 273 int sta_id, int tx_id);
643extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv); 274extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
644extern int iwl3945_hw_reg_set_txpower(struct iwl3945_priv *priv, s8 power); 275extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
645extern void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, 276extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
646 struct iwl3945_rx_mem_buffer *rxb); 277 struct iwl_rx_mem_buffer *rxb);
647extern void iwl3945_disable_events(struct iwl3945_priv *priv); 278extern void iwl3945_disable_events(struct iwl_priv *priv);
648extern int iwl4965_get_temperature(const struct iwl3945_priv *priv); 279extern int iwl4965_get_temperature(const struct iwl_priv *priv);
649 280
650/** 281/**
651 * iwl3945_hw_find_station - Find station id for a given BSSID 282 * iwl3945_hw_find_station - Find station id for a given BSSID
@@ -655,302 +286,26 @@ extern int iwl4965_get_temperature(const struct iwl3945_priv *priv);
655 * not yet been merged into a single common layer for managing the 286 * not yet been merged into a single common layer for managing the
656 * station tables. 287 * station tables.
657 */ 288 */
658extern u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *bssid); 289extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
659 290
660extern int iwl3945_hw_channel_switch(struct iwl3945_priv *priv, u16 channel); 291extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
661 292
662/* 293/*
663 * Forward declare iwl-3945.c functions for iwl-base.c 294 * Forward declare iwl-3945.c functions for iwl-base.c
664 */ 295 */
665extern __le32 iwl3945_get_antenna_flags(const struct iwl3945_priv *priv); 296extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
666extern int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv); 297extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
667extern void iwl3945_reg_txpower_periodic(struct iwl3945_priv *priv); 298extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
668extern int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv); 299extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
669extern u8 iwl3945_sync_sta(struct iwl3945_priv *priv, int sta_id, 300extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
670 u16 tx_rate, u8 flags); 301 u16 tx_rate, u8 flags);
671 302
303extern const struct iwl_channel_info *iwl3945_get_channel_info(
304 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
672 305
673#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 306extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
674
675enum {
676 MEASUREMENT_READY = (1 << 0),
677 MEASUREMENT_ACTIVE = (1 << 1),
678};
679
680#endif
681
682#ifdef CONFIG_IWL3945_RFKILL
683struct iwl3945_priv;
684
685void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv);
686void iwl3945_rfkill_unregister(struct iwl3945_priv *priv);
687int iwl3945_rfkill_init(struct iwl3945_priv *priv);
688#else
689static inline void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv) {}
690static inline void iwl3945_rfkill_unregister(struct iwl3945_priv *priv) {}
691static inline int iwl3945_rfkill_init(struct iwl3945_priv *priv) { return 0; }
692#endif
693
694#define IWL_MAX_NUM_QUEUES IWL39_MAX_NUM_QUEUES
695
696struct iwl3945_priv {
697
698 /* ieee device used by generic ieee processing code */
699 struct ieee80211_hw *hw;
700 struct ieee80211_channel *ieee_channels;
701 struct ieee80211_rate *ieee_rates;
702 struct iwl_3945_cfg *cfg; /* device configuration */
703
704 /* temporary frame storage list */
705 struct list_head free_frames;
706 int frames_count;
707
708 enum ieee80211_band band;
709 int alloc_rxb_skb;
710
711 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv,
712 struct iwl3945_rx_mem_buffer *rxb);
713
714 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
715
716#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
717 /* spectrum measurement report caching */
718 struct iwl3945_spectrum_notification measure_report;
719 u8 measurement_status;
720#endif
721 /* ucode beacon time */
722 u32 ucode_beacon_time;
723
724 /* we allocate array of iwl3945_channel_info for NIC's valid channels.
725 * Access via channel # using indirect index array */
726 struct iwl3945_channel_info *channel_info; /* channel info array */
727 u8 channel_count; /* # of channels */
728
729 /* each calibration channel group in the EEPROM has a derived
730 * clip setting for each rate. */
731 const struct iwl3945_clip_group clip_groups[5];
732
733 /* thermal calibration */
734 s32 temperature; /* degrees Kelvin */
735 s32 last_temperature;
736
737 /* Scan related variables */
738 unsigned long last_scan_jiffies;
739 unsigned long next_scan_jiffies;
740 unsigned long scan_start;
741 unsigned long scan_pass_start;
742 unsigned long scan_start_tsf;
743 int scan_bands;
744 int one_direct_scan;
745 u8 direct_ssid_len;
746 u8 direct_ssid[IW_ESSID_MAX_SIZE];
747 struct iwl3945_scan_cmd *scan;
748
749 /* spinlock */
750 spinlock_t lock; /* protect general shared data */
751 spinlock_t hcmd_lock; /* protect hcmd */
752 struct mutex mutex;
753
754 /* basic pci-network driver stuff */
755 struct pci_dev *pci_dev;
756
757 /* pci hardware address support */
758 void __iomem *hw_base;
759
760 /* uCode images, save to reload in case of failure */
761 u32 ucode_ver; /* ucode version, copy of
762 iwl3945_ucode.ver */
763 struct fw_desc ucode_code; /* runtime inst */
764 struct fw_desc ucode_data; /* runtime data original */
765 struct fw_desc ucode_data_backup; /* runtime data save/restore */
766 struct fw_desc ucode_init; /* initialization inst */
767 struct fw_desc ucode_init_data; /* initialization data */
768 struct fw_desc ucode_boot; /* bootstrap inst */
769
770
771 struct iwl3945_rxon_time_cmd rxon_timing;
772
773 /* We declare this const so it can only be
774 * changed via explicit cast within the
775 * routines that actually update the physical
776 * hardware */
777 const struct iwl3945_rxon_cmd active_rxon;
778 struct iwl3945_rxon_cmd staging_rxon;
779
780 int error_recovering;
781 struct iwl3945_rxon_cmd recovery_rxon;
782
783 /* 1st responses from initialize and runtime uCode images.
784 * 4965's initialize alive response contains some calibration data. */
785 struct iwl3945_init_alive_resp card_alive_init;
786 struct iwl3945_alive_resp card_alive;
787
788#ifdef CONFIG_IWL3945_RFKILL
789 struct rfkill *rfkill;
790#endif
791
792#ifdef CONFIG_IWL3945_LEDS
793 struct iwl3945_led led[IWL_LED_TRG_MAX];
794 unsigned long last_blink_time;
795 u8 last_blink_rate;
796 u8 allow_blinking;
797 unsigned int rxtxpackets;
798 u64 led_tpt;
799#endif
800
801
802 u16 active_rate;
803 u16 active_rate_basic;
804
805 u32 sta_supp_rates;
806
807 u8 call_post_assoc_from_beacon;
808 /* Rate scaling data */
809 s8 data_retry_limit;
810 u8 retry_rate;
811
812 wait_queue_head_t wait_command_queue;
813
814 int activity_timer_active;
815
816 /* Rx and Tx DMA processing queues */
817 struct iwl3945_rx_queue rxq;
818 struct iwl3945_tx_queue txq[IWL_MAX_NUM_QUEUES];
819
820 unsigned long status;
821
822 int last_rx_rssi; /* From Rx packet statisitics */
823 int last_rx_noise; /* From beacon statistics */
824
825 struct iwl3945_power_mgr power_data;
826
827 struct iwl3945_notif_statistics statistics;
828 unsigned long last_statistics_time;
829
830 /* context information */
831 u16 rates_mask;
832
833 u32 power_mode;
834 u32 antenna;
835 u8 bssid[ETH_ALEN];
836 u16 rts_threshold;
837 u8 mac_addr[ETH_ALEN];
838
839 /*station table variables */
840 spinlock_t sta_lock;
841 int num_stations;
842 struct iwl3945_station_entry stations[IWL_STATION_COUNT];
843
844 /* Indication if ieee80211_ops->open has been called */
845 u8 is_open;
846 307
847 u8 mac80211_registered; 308/* Requires full declaration of iwl_priv before including */
848 309#include "iwl-io.h"
849 /* Rx'd packet timing information */
850 u32 last_beacon_time;
851 u64 last_tsf;
852
853 /* eeprom */
854 struct iwl3945_eeprom eeprom;
855
856 enum nl80211_iftype iw_mode;
857
858 struct sk_buff *ibss_beacon;
859
860 /* Last Rx'd beacon timestamp */
861 u32 timestamp0;
862 u32 timestamp1;
863 u16 beacon_int;
864 struct iwl3945_driver_hw_info hw_setting;
865 struct ieee80211_vif *vif;
866
867 /* Current association information needed to configure the
868 * hardware */
869 u16 assoc_id;
870 u16 assoc_capability;
871 u8 ps_mode;
872
873 struct iwl3945_qos_info qos_data;
874
875 struct workqueue_struct *workqueue;
876
877 struct work_struct up;
878 struct work_struct restart;
879 struct work_struct calibrated_work;
880 struct work_struct scan_completed;
881 struct work_struct rx_replenish;
882 struct work_struct rf_kill;
883 struct work_struct abort_scan;
884 struct work_struct update_link_led;
885 struct work_struct auth_work;
886 struct work_struct report_work;
887 struct work_struct request_scan;
888 struct work_struct beacon_update;
889
890 struct tasklet_struct irq_tasklet;
891
892 struct delayed_work init_alive_start;
893 struct delayed_work alive_start;
894 struct delayed_work activity_timer;
895 struct delayed_work thermal_periodic;
896 struct delayed_work gather_stats;
897 struct delayed_work scan_check;
898
899#define IWL_DEFAULT_TX_POWER 0x0F
900 s8 user_txpower_limit;
901 s8 max_channel_txpower_limit;
902
903
904#ifdef CONFIG_IWL3945_DEBUG
905 /* debugging info */
906 u32 framecnt_to_us;
907 atomic_t restrict_refcnt;
908#endif
909}; /*iwl3945_priv */
910
911static inline int iwl3945_is_associated(struct iwl3945_priv *priv)
912{
913 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
914}
915
916static inline int is_channel_valid(const struct iwl3945_channel_info *ch_info)
917{
918 if (ch_info == NULL)
919 return 0;
920 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
921}
922
923static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info)
924{
925 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
926}
927
928static inline u8 is_channel_a_band(const struct iwl3945_channel_info *ch_info)
929{
930 return ch_info->band == IEEE80211_BAND_5GHZ;
931}
932
933static inline u8 is_channel_bg_band(const struct iwl3945_channel_info *ch_info)
934{
935 return ch_info->band == IEEE80211_BAND_2GHZ;
936}
937
938static inline int is_channel_passive(const struct iwl3945_channel_info *ch)
939{
940 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
941}
942
943static inline int is_channel_ibss(const struct iwl3945_channel_info *ch)
944{
945 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
946}
947
948extern const struct iwl3945_channel_info *iwl3945_get_channel_info(
949 const struct iwl3945_priv *priv, enum ieee80211_band band, u16 channel);
950
951extern int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate);
952
953/* Requires full declaration of iwl3945_priv before including */
954#include "iwl-3945-io.h"
955 310
956#endif 311#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 6649f7b5565..a71a489096f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -89,64 +89,43 @@
89#define LONG_SLOT_TIME 20 89#define LONG_SLOT_TIME 20
90 90
91/* RSSI to dBm */ 91/* RSSI to dBm */
92#define IWL_RSSI_OFFSET 44 92#define IWL49_RSSI_OFFSET 44
93
94 93
95 94
96/* PCI registers */ 95/* PCI registers */
97#define PCI_CFG_RETRY_TIMEOUT 0x041 96#define PCI_CFG_RETRY_TIMEOUT 0x041
98#define PCI_CFG_POWER_SOURCE 0x0C8
99#define PCI_REG_WUM8 0x0E8
100#define PCI_CFG_LINK_CTRL 0x0F0
101 97
102/* PCI register values */ 98/* PCI register values */
103#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 99#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
104#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 100#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
105#define PCI_CFG_CMD_REG_INT_DIS_MSK 0x04
106#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
107
108 101
109#define IWL_NUM_SCAN_RATES (2) 102#define IWL_NUM_SCAN_RATES (2)
110 103
111#define IWL_DEFAULT_TX_RETRY 15 104#define IWL_DEFAULT_TX_RETRY 15
112 105
113#define RX_QUEUE_SIZE 256
114#define RX_QUEUE_MASK 255
115#define RX_QUEUE_SIZE_LOG 8
116
117#define TFD_TX_CMD_SLOTS 256
118#define TFD_CMD_SLOTS 32
119
120/*
121 * RX related structures and functions
122 */
123#define RX_FREE_BUFFERS 64
124#define RX_LOW_WATERMARK 8
125
126/* Size of one Rx buffer in host DRAM */
127#define IWL_RX_BUF_SIZE_4K (4 * 1024)
128#define IWL_RX_BUF_SIZE_8K (8 * 1024)
129 106
130/* Sizes and addresses for instruction and data memory (SRAM) in 107/* Sizes and addresses for instruction and data memory (SRAM) in
131 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 108 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
132#define RTC_INST_LOWER_BOUND (0x000000) 109#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
133#define IWL49_RTC_INST_UPPER_BOUND (0x018000) 110#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
134 111
135#define RTC_DATA_LOWER_BOUND (0x800000) 112#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
136#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) 113#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
137 114
138#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 115#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
139#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) 116 IWL49_RTC_INST_LOWER_BOUND)
117#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
118 IWL49_RTC_DATA_LOWER_BOUND)
140 119
141#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE 120#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
142#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE 121#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
143 122
144/* Size of uCode instruction memory in bootstrap state machine */ 123/* Size of uCode instruction memory in bootstrap state machine */
145#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE 124#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
146 125
147static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) 126static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
148{ 127{
149 return (addr >= RTC_DATA_LOWER_BOUND) && 128 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
150 (addr < IWL49_RTC_DATA_UPPER_BOUND); 129 (addr < IWL49_RTC_DATA_UPPER_BOUND);
151} 130}
152 131
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 5a72bc0377d..bd0140be774 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -76,7 +76,7 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
76 u32 reg; 76 u32 reg;
77 u32 val; 77 u32 val;
78 78
79 IWL_DEBUG_INFO("Begin verify bsm\n"); 79 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
80 80
81 /* verify BSM SRAM contents */ 81 /* verify BSM SRAM contents */
82 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 82 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
@@ -85,7 +85,7 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
85 reg += sizeof(u32), image++) { 85 reg += sizeof(u32), image++) {
86 val = iwl_read_prph(priv, reg); 86 val = iwl_read_prph(priv, reg);
87 if (val != le32_to_cpu(*image)) { 87 if (val != le32_to_cpu(*image)) {
88 IWL_ERROR("BSM uCode verification failed at " 88 IWL_ERR(priv, "BSM uCode verification failed at "
89 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 89 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
90 BSM_SRAM_LOWER_BOUND, 90 BSM_SRAM_LOWER_BOUND,
91 reg - BSM_SRAM_LOWER_BOUND, len, 91 reg - BSM_SRAM_LOWER_BOUND, len,
@@ -94,7 +94,7 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
94 } 94 }
95 } 95 }
96 96
97 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); 97 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
98 98
99 return 0; 99 return 0;
100} 100}
@@ -144,12 +144,12 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
144 u32 reg_offset; 144 u32 reg_offset;
145 int ret; 145 int ret;
146 146
147 IWL_DEBUG_INFO("Begin load bsm\n"); 147 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
148 148
149 priv->ucode_type = UCODE_RT; 149 priv->ucode_type = UCODE_RT;
150 150
151 /* make sure bootstrap program is no larger than BSM's SRAM size */ 151 /* make sure bootstrap program is no larger than BSM's SRAM size */
152 if (len > IWL_MAX_BSM_SIZE) 152 if (len > IWL49_MAX_BSM_SIZE)
153 return -EINVAL; 153 return -EINVAL;
154 154
155 /* Tell bootstrap uCode where to find the "Initialize" uCode 155 /* Tell bootstrap uCode where to find the "Initialize" uCode
@@ -186,7 +186,7 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
186 186
187 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 187 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
188 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 188 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
189 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); 189 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
190 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 190 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
191 191
192 /* Load bootstrap code into instruction SRAM now, 192 /* Load bootstrap code into instruction SRAM now,
@@ -201,9 +201,9 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
201 udelay(10); 201 udelay(10);
202 } 202 }
203 if (i < 100) 203 if (i < 100)
204 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); 204 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
205 else { 205 else {
206 IWL_ERROR("BSM write did not complete!\n"); 206 IWL_ERR(priv, "BSM write did not complete!\n");
207 return -EIO; 207 return -EIO;
208 } 208 }
209 209
@@ -257,7 +257,7 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
257 257
258 spin_unlock_irqrestore(&priv->lock, flags); 258 spin_unlock_irqrestore(&priv->lock, flags);
259 259
260 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); 260 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
261 261
262 return ret; 262 return ret;
263} 263}
@@ -279,7 +279,7 @@ static void iwl4965_init_alive_start(struct iwl_priv *priv)
279 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 279 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
280 /* We had an error bringing up the hardware, so take it 280 /* We had an error bringing up the hardware, so take it
281 * all the way back down so we can try again */ 281 * all the way back down so we can try again */
282 IWL_DEBUG_INFO("Initialize Alive failed.\n"); 282 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
283 goto restart; 283 goto restart;
284 } 284 }
285 285
@@ -289,7 +289,7 @@ static void iwl4965_init_alive_start(struct iwl_priv *priv)
289 if (iwl_verify_ucode(priv)) { 289 if (iwl_verify_ucode(priv)) {
290 /* Runtime instruction load was bad; 290 /* Runtime instruction load was bad;
291 * take it all the way back down so we can try again */ 291 * take it all the way back down so we can try again */
292 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); 292 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
293 goto restart; 293 goto restart;
294 } 294 }
295 295
@@ -299,11 +299,11 @@ static void iwl4965_init_alive_start(struct iwl_priv *priv)
299 /* Send pointers to protocol/runtime uCode image ... init code will 299 /* Send pointers to protocol/runtime uCode image ... init code will
300 * load and launch runtime uCode, which will send us another "Alive" 300 * load and launch runtime uCode, which will send us another "Alive"
301 * notification. */ 301 * notification. */
302 IWL_DEBUG_INFO("Initialization Alive received.\n"); 302 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
303 if (iwl4965_set_ucode_ptrs(priv)) { 303 if (iwl4965_set_ucode_ptrs(priv)) {
304 /* Runtime instruction load won't happen; 304 /* Runtime instruction load won't happen;
305 * take it all the way back down so we can try again */ 305 * take it all the way back down so we can try again */
306 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); 306 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
307 goto restart; 307 goto restart;
308 } 308 }
309 return; 309 return;
@@ -354,7 +354,7 @@ static int iwl4965_apm_init(struct iwl_priv *priv)
354 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 354 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
355 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 355 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
356 if (ret < 0) { 356 if (ret < 0) {
357 IWL_DEBUG_INFO("Failed to init the card\n"); 357 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
358 goto out; 358 goto out;
359 } 359 }
360 360
@@ -381,27 +381,20 @@ out:
381static void iwl4965_nic_config(struct iwl_priv *priv) 381static void iwl4965_nic_config(struct iwl_priv *priv)
382{ 382{
383 unsigned long flags; 383 unsigned long flags;
384 u32 val;
385 u16 radio_cfg; 384 u16 radio_cfg;
386 u16 link; 385 u16 lctl;
387 386
388 spin_lock_irqsave(&priv->lock, flags); 387 spin_lock_irqsave(&priv->lock, flags);
389 388
390 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) { 389 lctl = iwl_pcie_link_ctl(priv);
391 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
392 /* Enable No Snoop field */
393 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
394 val & ~(1 << 11));
395 }
396
397 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
398 390
399 /* L1 is enabled by BIOS */ 391 /* HW bug W/A - negligible power consumption */
400 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) 392 /* L1-ASPM is enabled by BIOS */
401 /* disable L0S disabled L1A enabled */ 393 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
394 /* L1-ASPM enabled: disable L0S */
402 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 395 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
403 else 396 else
404 /* L0S enabled L1A disabled */ 397 /* L1-ASPM disabled: enable L0S */
405 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 398 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
406 399
407 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 400 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
@@ -437,7 +430,7 @@ static int iwl4965_apm_stop_master(struct iwl_priv *priv)
437 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 430 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
438 431
439 spin_unlock_irqrestore(&priv->lock, flags); 432 spin_unlock_irqrestore(&priv->lock, flags);
440 IWL_DEBUG_INFO("stop master\n"); 433 IWL_DEBUG_INFO(priv, "stop master\n");
441 434
442 return 0; 435 return 0;
443} 436}
@@ -523,9 +516,10 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
523 cmd.diff_gain_c = 0; 516 cmd.diff_gain_c = 0;
524 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 517 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
525 sizeof(cmd), &cmd)) 518 sizeof(cmd), &cmd))
526 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n"); 519 IWL_ERR(priv,
520 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
527 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 521 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
528 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 522 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
529 } 523 }
530} 524}
531 525
@@ -557,7 +551,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
557 data->delta_gain_code[i] = 0; 551 data->delta_gain_code[i] = 0;
558 } 552 }
559 } 553 }
560 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n", 554 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
561 data->delta_gain_code[0], 555 data->delta_gain_code[0],
562 data->delta_gain_code[1], 556 data->delta_gain_code[1],
563 data->delta_gain_code[2]); 557 data->delta_gain_code[2]);
@@ -575,7 +569,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
575 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 569 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
576 sizeof(cmd), &cmd); 570 sizeof(cmd), &cmd);
577 if (ret) 571 if (ret)
578 IWL_DEBUG_CALIB("fail sending cmd " 572 IWL_DEBUG_CALIB(priv, "fail sending cmd "
579 "REPLY_PHY_CALIBRATION_CMD \n"); 573 "REPLY_PHY_CALIBRATION_CMD \n");
580 574
581 /* TODO we might want recalculate 575 /* TODO we might want recalculate
@@ -668,7 +662,7 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
668 662
669 txq->sched_retry = scd_retry; 663 txq->sched_retry = scd_retry;
670 664
671 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", 665 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
672 active ? "Activate" : "Deactivate", 666 active ? "Activate" : "Deactivate",
673 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 667 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
674} 668}
@@ -804,8 +798,9 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
804 798
805 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) || 799 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
806 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 800 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
807 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 801 IWL_ERR(priv,
808 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES); 802 "invalid queues_num, should be between %d and %d\n",
803 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
809 return -EINVAL; 804 return -EINVAL;
810 } 805 }
811 806
@@ -813,6 +808,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
813 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; 808 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
814 priv->hw_params.scd_bc_tbls_size = 809 priv->hw_params.scd_bc_tbls_size =
815 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl); 810 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl);
811 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
816 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 812 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
817 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 813 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
818 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; 814 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
@@ -820,6 +816,8 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
820 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; 816 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
821 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ); 817 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
822 818
819 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
820
823 priv->hw_params.tx_chains_num = 2; 821 priv->hw_params.tx_chains_num = 2;
824 priv->hw_params.rx_chains_num = 2; 822 priv->hw_params.rx_chains_num = 2;
825 priv->hw_params.valid_tx_ant = ANT_A | ANT_B; 823 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
@@ -902,7 +900,6 @@ static s32 iwl4965_get_tx_atten_grp(u16 channel)
902 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) 900 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
903 return CALIB_CH_GROUP_4; 901 return CALIB_CH_GROUP_4;
904 902
905 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
906 return -1; 903 return -1;
907} 904}
908 905
@@ -956,7 +953,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
956 953
957 s = iwl4965_get_sub_band(priv, channel); 954 s = iwl4965_get_sub_band(priv, channel);
958 if (s >= EEPROM_TX_POWER_BANDS) { 955 if (s >= EEPROM_TX_POWER_BANDS) {
959 IWL_ERROR("Tx Power can not find channel %d\n", channel); 956 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
960 return -1; 957 return -1;
961 } 958 }
962 959
@@ -964,7 +961,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
964 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num; 961 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
965 chan_info->ch_num = (u8) channel; 962 chan_info->ch_num = (u8) channel;
966 963
967 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", 964 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
968 channel, s, ch_i1, ch_i2); 965 channel, s, ch_i1, ch_i2);
969 966
970 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { 967 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
@@ -994,19 +991,19 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
994 m1->pa_det, ch_i2, 991 m1->pa_det, ch_i2,
995 m2->pa_det); 992 m2->pa_det);
996 993
997 IWL_DEBUG_TXPOWER 994 IWL_DEBUG_TXPOWER(priv,
998 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, 995 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
999 m1->actual_pow, m2->actual_pow, omeas->actual_pow); 996 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1000 IWL_DEBUG_TXPOWER 997 IWL_DEBUG_TXPOWER(priv,
1001 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, 998 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1002 m1->gain_idx, m2->gain_idx, omeas->gain_idx); 999 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1003 IWL_DEBUG_TXPOWER 1000 IWL_DEBUG_TXPOWER(priv,
1004 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, 1001 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1005 m1->pa_det, m2->pa_det, omeas->pa_det); 1002 m1->pa_det, m2->pa_det, omeas->pa_det);
1006 IWL_DEBUG_TXPOWER 1003 IWL_DEBUG_TXPOWER(priv,
1007 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m, 1004 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1008 m1->temperature, m2->temperature, 1005 m1->temperature, m2->temperature,
1009 omeas->temperature); 1006 omeas->temperature);
1010 } 1007 }
1011 } 1008 }
1012 1009
@@ -1303,12 +1300,12 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1303 s32 factory_actual_pwr[2]; 1300 s32 factory_actual_pwr[2];
1304 s32 power_index; 1301 s32 power_index;
1305 1302
1306 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units 1303 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
1307 * are used for indexing into txpower table) */ 1304 * are used for indexing into txpower table) */
1308 user_target_power = 2 * priv->tx_power_user_lmt; 1305 user_target_power = 2 * priv->tx_power_user_lmt;
1309 1306
1310 /* Get current (RXON) channel, band, width */ 1307 /* Get current (RXON) channel, band, width */
1311 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, 1308 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_fat %d\n", channel, band,
1312 is_fat); 1309 is_fat);
1313 1310
1314 ch_info = iwl_get_channel_info(priv, priv->band, channel); 1311 ch_info = iwl_get_channel_info(priv, priv->band, channel);
@@ -1319,10 +1316,13 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1319 /* get txatten group, used to select 1) thermal txpower adjustment 1316 /* get txatten group, used to select 1) thermal txpower adjustment
1320 * and 2) mimo txpower balance between Tx chains. */ 1317 * and 2) mimo txpower balance between Tx chains. */
1321 txatten_grp = iwl4965_get_tx_atten_grp(channel); 1318 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1322 if (txatten_grp < 0) 1319 if (txatten_grp < 0) {
1320 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
1321 channel);
1323 return -EINVAL; 1322 return -EINVAL;
1323 }
1324 1324
1325 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n", 1325 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
1326 channel, txatten_grp); 1326 channel, txatten_grp);
1327 1327
1328 if (is_fat) { 1328 if (is_fat) {
@@ -1372,7 +1372,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1372 voltage_compensation = 1372 voltage_compensation =
1373 iwl4965_get_voltage_compensation(voltage, init_voltage); 1373 iwl4965_get_voltage_compensation(voltage, init_voltage);
1374 1374
1375 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", 1375 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
1376 init_voltage, 1376 init_voltage,
1377 voltage, voltage_compensation); 1377 voltage, voltage_compensation);
1378 1378
@@ -1403,13 +1403,13 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1403 factory_gain_index[c] = measurement->gain_idx; 1403 factory_gain_index[c] = measurement->gain_idx;
1404 factory_actual_pwr[c] = measurement->actual_pow; 1404 factory_actual_pwr[c] = measurement->actual_pow;
1405 1405
1406 IWL_DEBUG_TXPOWER("chain = %d\n", c); 1406 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1407 IWL_DEBUG_TXPOWER("fctry tmp %d, " 1407 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1408 "curr tmp %d, comp %d steps\n", 1408 "curr tmp %d, comp %d steps\n",
1409 factory_temp, current_temp, 1409 factory_temp, current_temp,
1410 temperature_comp[c]); 1410 temperature_comp[c]);
1411 1411
1412 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n", 1412 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1413 factory_gain_index[c], 1413 factory_gain_index[c],
1414 factory_actual_pwr[c]); 1414 factory_actual_pwr[c]);
1415 } 1415 }
@@ -1442,7 +1442,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1442 if (target_power > power_limit) 1442 if (target_power > power_limit)
1443 target_power = power_limit; 1443 target_power = power_limit;
1444 1444
1445 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", 1445 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1446 i, saturation_power - back_off_table[i], 1446 i, saturation_power - back_off_table[i],
1447 current_regulatory, user_target_power, 1447 current_regulatory, user_target_power,
1448 target_power); 1448 target_power);
@@ -1466,7 +1466,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1466 voltage_compensation + 1466 voltage_compensation +
1467 atten_value); 1467 atten_value);
1468 1468
1469/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n", 1469/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1470 power_index); */ 1470 power_index); */
1471 1471
1472 if (power_index < get_min_power_index(i, band)) 1472 if (power_index < get_min_power_index(i, band))
@@ -1483,12 +1483,12 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1483 1483
1484 /* stay within the table! */ 1484 /* stay within the table! */
1485 if (power_index > 107) { 1485 if (power_index > 107) {
1486 IWL_WARNING("txpower index %d > 107\n", 1486 IWL_WARN(priv, "txpower index %d > 107\n",
1487 power_index); 1487 power_index);
1488 power_index = 107; 1488 power_index = 107;
1489 } 1489 }
1490 if (power_index < 0) { 1490 if (power_index < 0) {
1491 IWL_WARNING("txpower index %d < 0\n", 1491 IWL_WARN(priv, "txpower index %d < 0\n",
1492 power_index); 1492 power_index);
1493 power_index = 0; 1493 power_index = 0;
1494 } 1494 }
@@ -1499,7 +1499,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1499 tx_power.s.dsp_predis_atten[c] = 1499 tx_power.s.dsp_predis_atten[c] =
1500 gain_table[band][power_index].dsp; 1500 gain_table[band][power_index].dsp;
1501 1501
1502 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d " 1502 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1503 "gain 0x%02x dsp %d\n", 1503 "gain 0x%02x dsp %d\n",
1504 c, atten_value, power_index, 1504 c, atten_value, power_index,
1505 tx_power.s.radio_tx_gain[c], 1505 tx_power.s.radio_tx_gain[c],
@@ -1531,7 +1531,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1531 /* If this gets hit a lot, switch it to a BUG() and catch 1531 /* If this gets hit a lot, switch it to a BUG() and catch
1532 * the stack trace to find out who is calling this during 1532 * the stack trace to find out who is calling this during
1533 * a scan. */ 1533 * a scan. */
1534 IWL_WARNING("TX Power requested while scanning!\n"); 1534 IWL_WARN(priv, "TX Power requested while scanning!\n");
1535 return -EAGAIN; 1535 return -EAGAIN;
1536 } 1536 }
1537 1537
@@ -1574,7 +1574,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1574 rxon2->ofdm_ht_dual_stream_basic_rates) && 1574 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1575 (rxon1->rx_chain == rxon2->rx_chain) && 1575 (rxon1->rx_chain == rxon2->rx_chain) &&
1576 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { 1576 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1577 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); 1577 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1578 return 0; 1578 return 0;
1579 } 1579 }
1580 1580
@@ -1631,7 +1631,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1631 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat, 1631 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
1632 ctrl_chan_high, &cmd.tx_power); 1632 ctrl_chan_high, &cmd.tx_power);
1633 if (rc) { 1633 if (rc) {
1634 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc); 1634 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1635 return rc; 1635 return rc;
1636 } 1636 }
1637 1637
@@ -1696,13 +1696,13 @@ static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
1696 1696
1697 if (test_bit(STATUS_TEMPERATURE, &priv->status) && 1697 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1698 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) { 1698 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
1699 IWL_DEBUG_TEMP("Running FAT temperature calibration\n"); 1699 IWL_DEBUG_TEMP(priv, "Running FAT temperature calibration\n");
1700 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 1700 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1701 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); 1701 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1702 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); 1702 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1703 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); 1703 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1704 } else { 1704 } else {
1705 IWL_DEBUG_TEMP("Running temperature calibration\n"); 1705 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1706 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); 1706 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1707 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); 1707 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1708 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); 1708 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
@@ -1722,10 +1722,10 @@ static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
1722 vt = sign_extend( 1722 vt = sign_extend(
1723 le32_to_cpu(priv->statistics.general.temperature), 23); 1723 le32_to_cpu(priv->statistics.general.temperature), 23);
1724 1724
1725 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1725 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1726 1726
1727 if (R3 == R1) { 1727 if (R3 == R1) {
1728 IWL_ERROR("Calibration conflict R1 == R3\n"); 1728 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1729 return -1; 1729 return -1;
1730 } 1730 }
1731 1731
@@ -1735,7 +1735,7 @@ static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
1735 temperature /= (R3 - R1); 1735 temperature /= (R3 - R1);
1736 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; 1736 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1737 1737
1738 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", 1738 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1739 temperature, KELVIN_TO_CELSIUS(temperature)); 1739 temperature, KELVIN_TO_CELSIUS(temperature));
1740 1740
1741 return temperature; 1741 return temperature;
@@ -1758,7 +1758,7 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1758 int temp_diff; 1758 int temp_diff;
1759 1759
1760 if (!test_bit(STATUS_STATISTICS, &priv->status)) { 1760 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1761 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n"); 1761 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1762 return 0; 1762 return 0;
1763 } 1763 }
1764 1764
@@ -1766,19 +1766,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1766 1766
1767 /* get absolute value */ 1767 /* get absolute value */
1768 if (temp_diff < 0) { 1768 if (temp_diff < 0) {
1769 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff); 1769 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff);
1770 temp_diff = -temp_diff; 1770 temp_diff = -temp_diff;
1771 } else if (temp_diff == 0) 1771 } else if (temp_diff == 0)
1772 IWL_DEBUG_POWER("Same temp, \n"); 1772 IWL_DEBUG_POWER(priv, "Same temp, \n");
1773 else 1773 else
1774 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff); 1774 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff);
1775 1775
1776 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { 1776 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1777 IWL_DEBUG_POWER("Thermal txpower calib not needed\n"); 1777 IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n");
1778 return 0; 1778 return 0;
1779 } 1779 }
1780 1780
1781 IWL_DEBUG_POWER("Thermal txpower calib needed\n"); 1781 IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n");
1782 1782
1783 return 1; 1783 return 1;
1784} 1784}
@@ -1793,12 +1793,12 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1793 1793
1794 if (priv->temperature != temp) { 1794 if (priv->temperature != temp) {
1795 if (priv->temperature) 1795 if (priv->temperature)
1796 IWL_DEBUG_TEMP("Temperature changed " 1796 IWL_DEBUG_TEMP(priv, "Temperature changed "
1797 "from %dC to %dC\n", 1797 "from %dC to %dC\n",
1798 KELVIN_TO_CELSIUS(priv->temperature), 1798 KELVIN_TO_CELSIUS(priv->temperature),
1799 KELVIN_TO_CELSIUS(temp)); 1799 KELVIN_TO_CELSIUS(temp));
1800 else 1800 else
1801 IWL_DEBUG_TEMP("Temperature " 1801 IWL_DEBUG_TEMP(priv, "Temperature "
1802 "initialized to %dC\n", 1802 "initialized to %dC\n",
1803 KELVIN_TO_CELSIUS(temp)); 1803 KELVIN_TO_CELSIUS(temp));
1804 } 1804 }
@@ -1837,7 +1837,8 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1837 1837
1838 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1838 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1839 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1839 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
1840 IWL_WARNING("queue number out of range: %d, must be %d to %d\n", 1840 IWL_WARN(priv,
1841 "queue number out of range: %d, must be %d to %d\n",
1841 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1842 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1842 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1843 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
1843 return -EINVAL; 1844 return -EINVAL;
@@ -1908,7 +1909,8 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1908 1909
1909 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1910 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1910 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1911 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
1911 IWL_WARNING("queue number out of range: %d, must be %d to %d\n", 1912 IWL_WARN(priv,
1913 "queue number out of range: %d, must be %d to %d\n",
1912 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1914 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1913 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1915 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
1914 return -EINVAL; 1916 return -EINVAL;
@@ -1986,8 +1988,8 @@ static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1986 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; 1988 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1987 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; 1989 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1988 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; 1990 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1989 addsta->reserved1 = __constant_cpu_to_le16(0); 1991 addsta->reserved1 = cpu_to_le16(0);
1990 addsta->reserved2 = __constant_cpu_to_le32(0); 1992 addsta->reserved2 = cpu_to_le32(0);
1991 1993
1992 return (u16)sizeof(struct iwl4965_addsta_cmd); 1994 return (u16)sizeof(struct iwl4965_addsta_cmd);
1993} 1995}
@@ -2013,7 +2015,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2013 int i, sh, idx; 2015 int i, sh, idx;
2014 u16 seq; 2016 u16 seq;
2015 if (agg->wait_for_ba) 2017 if (agg->wait_for_ba)
2016 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n"); 2018 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
2017 2019
2018 agg->frame_count = tx_resp->frame_count; 2020 agg->frame_count = tx_resp->frame_count;
2019 agg->start_idx = start_idx; 2021 agg->start_idx = start_idx;
@@ -2027,7 +2029,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2027 idx = start_idx; 2029 idx = start_idx;
2028 2030
2029 /* FIXME: code repetition */ 2031 /* FIXME: code repetition */
2030 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", 2032 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
2031 agg->frame_count, agg->start_idx, idx); 2033 agg->frame_count, agg->start_idx, idx);
2032 2034
2033 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 2035 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
@@ -2038,9 +2040,9 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2038 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 2040 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
2039 /* FIXME: code repetition end */ 2041 /* FIXME: code repetition end */
2040 2042
2041 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", 2043 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
2042 status & 0xff, tx_resp->failure_frame); 2044 status & 0xff, tx_resp->failure_frame);
2043 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags); 2045 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
2044 2046
2045 agg->wait_for_ba = 0; 2047 agg->wait_for_ba = 0;
2046 } else { 2048 } else {
@@ -2060,21 +2062,21 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2060 AGG_TX_STATE_ABORT_MSK)) 2062 AGG_TX_STATE_ABORT_MSK))
2061 continue; 2063 continue;
2062 2064
2063 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", 2065 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
2064 agg->frame_count, txq_id, idx); 2066 agg->frame_count, txq_id, idx);
2065 2067
2066 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 2068 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2067 2069
2068 sc = le16_to_cpu(hdr->seq_ctrl); 2070 sc = le16_to_cpu(hdr->seq_ctrl);
2069 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 2071 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2070 IWL_ERROR("BUG_ON idx doesn't match seq control" 2072 IWL_ERR(priv,
2071 " idx=%d, seq_idx=%d, seq=%d\n", 2073 "BUG_ON idx doesn't match seq control"
2072 idx, SEQ_TO_SN(sc), 2074 " idx=%d, seq_idx=%d, seq=%d\n",
2073 hdr->seq_ctrl); 2075 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
2074 return -1; 2076 return -1;
2075 } 2077 }
2076 2078
2077 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", 2079 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
2078 i, idx, SEQ_TO_SN(sc)); 2080 i, idx, SEQ_TO_SN(sc));
2079 2081
2080 sh = idx - start; 2082 sh = idx - start;
@@ -2092,13 +2094,13 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2092 sh = 0; 2094 sh = 0;
2093 } 2095 }
2094 bitmap |= 1ULL << sh; 2096 bitmap |= 1ULL << sh;
2095 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n", 2097 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
2096 start, (unsigned long long)bitmap); 2098 start, (unsigned long long)bitmap);
2097 } 2099 }
2098 2100
2099 agg->bitmap = bitmap; 2101 agg->bitmap = bitmap;
2100 agg->start_idx = start; 2102 agg->start_idx = start;
2101 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", 2103 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
2102 agg->frame_count, agg->start_idx, 2104 agg->frame_count, agg->start_idx,
2103 (unsigned long long)agg->bitmap); 2105 (unsigned long long)agg->bitmap);
2104 2106
@@ -2129,7 +2131,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2129 u8 *qc = NULL; 2131 u8 *qc = NULL;
2130 2132
2131 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 2133 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
2132 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " 2134 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
2133 "is out of range [0-%d] %d %d\n", txq_id, 2135 "is out of range [0-%d] %d %d\n", txq_id,
2134 index, txq->q.n_bd, txq->q.write_ptr, 2136 index, txq->q.n_bd, txq->q.write_ptr,
2135 txq->q.read_ptr); 2137 txq->q.read_ptr);
@@ -2147,7 +2149,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2147 2149
2148 sta_id = iwl_get_ra_sta_id(priv, hdr); 2150 sta_id = iwl_get_ra_sta_id(priv, hdr);
2149 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { 2151 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2150 IWL_ERROR("Station not known\n"); 2152 IWL_ERR(priv, "Station not known\n");
2151 return; 2153 return;
2152 } 2154 }
2153 2155
@@ -2167,7 +2169,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2167 2169
2168 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 2170 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2169 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 2171 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2170 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " 2172 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2171 "%d index %d\n", scd_ssn , index); 2173 "%d index %d\n", scd_ssn , index);
2172 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2174 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2173 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2175 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
@@ -2190,7 +2192,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2190 le32_to_cpu(tx_resp->rate_n_flags), 2192 le32_to_cpu(tx_resp->rate_n_flags),
2191 info); 2193 info);
2192 2194
2193 IWL_DEBUG_TX_REPLY("TXQ %d status %s (0x%08x) " 2195 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
2194 "rate_n_flags 0x%x retries %d\n", 2196 "rate_n_flags 0x%x retries %d\n",
2195 txq_id, 2197 txq_id,
2196 iwl_get_tx_fail_reason(status), status, 2198 iwl_get_tx_fail_reason(status), status,
@@ -2210,7 +2212,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2210 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2212 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2211 2213
2212 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2214 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2213 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 2215 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
2214} 2216}
2215 2217
2216static int iwl4965_calc_rssi(struct iwl_priv *priv, 2218static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2238,13 +2240,13 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
2238 if (valid_antennae & (1 << i)) 2240 if (valid_antennae & (1 << i))
2239 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); 2241 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2240 2242
2241 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", 2243 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2242 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], 2244 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2243 max_rssi, agc); 2245 max_rssi, agc);
2244 2246
2245 /* dBm = max_rssi dB - agc dB - constant. 2247 /* dBm = max_rssi dB - agc dB - constant.
2246 * Higher AGC (higher radio gain) means lower signal. */ 2248 * Higher AGC (higher radio gain) means lower signal. */
2247 return max_rssi - agc - IWL_RSSI_OFFSET; 2249 return max_rssi - agc - IWL49_RSSI_OFFSET;
2248} 2250}
2249 2251
2250 2252
@@ -2287,6 +2289,9 @@ static struct iwl_lib_ops iwl4965_lib = {
2287 .txq_set_sched = iwl4965_txq_set_sched, 2289 .txq_set_sched = iwl4965_txq_set_sched,
2288 .txq_agg_enable = iwl4965_txq_agg_enable, 2290 .txq_agg_enable = iwl4965_txq_agg_enable,
2289 .txq_agg_disable = iwl4965_txq_agg_disable, 2291 .txq_agg_disable = iwl4965_txq_agg_disable,
2292 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2293 .txq_free_tfd = iwl_hw_txq_free_tfd,
2294 .txq_init = iwl_hw_tx_queue_init,
2290 .rx_handler_setup = iwl4965_rx_handler_setup, 2295 .rx_handler_setup = iwl4965_rx_handler_setup,
2291 .setup_deferred_work = iwl4965_setup_deferred_work, 2296 .setup_deferred_work = iwl4965_setup_deferred_work,
2292 .cancel_deferred_work = iwl4965_cancel_deferred_work, 2297 .cancel_deferred_work = iwl4965_cancel_deferred_work,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 82c3859ce0f..15cac70e36e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -68,10 +68,16 @@
68#ifndef __iwl_5000_hw_h__ 68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__ 69#define __iwl_5000_hw_h__
70 70
71#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
71#define IWL50_RTC_INST_UPPER_BOUND (0x020000) 72#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
73
74#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
72#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000) 75#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 76
74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) 77#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
78 IWL50_RTC_INST_LOWER_BOUND)
79#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
80 IWL50_RTC_DATA_LOWER_BOUND)
75 81
76/* EEPROM */ 82/* EEPROM */
77#define IWL_5000_EEPROM_IMG_SIZE 2048 83#define IWL_5000_EEPROM_IMG_SIZE 2048
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 66d053d28a7..ab39f4ae8e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -43,6 +43,7 @@
43#include "iwl-sta.h" 43#include "iwl-sta.h"
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-5000-hw.h" 45#include "iwl-5000-hw.h"
46#include "iwl-6000-hw.h"
46 47
47/* Highest firmware API version supported */ 48/* Highest firmware API version supported */
48#define IWL5000_UCODE_API_MAX 1 49#define IWL5000_UCODE_API_MAX 1
@@ -84,7 +85,7 @@ static int iwl5000_apm_stop_master(struct iwl_priv *priv)
84 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 85 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
85 86
86 spin_unlock_irqrestore(&priv->lock, flags); 87 spin_unlock_irqrestore(&priv->lock, flags);
87 IWL_DEBUG_INFO("stop master\n"); 88 IWL_DEBUG_INFO(priv, "stop master\n");
88 89
89 return 0; 90 return 0;
90} 91}
@@ -108,7 +109,8 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
108 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 109 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
109 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 110 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
110 111
111 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 112 if (priv->cfg->need_pll_cfg)
113 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
112 114
113 /* set "initialization complete" bit to move adapter 115 /* set "initialization complete" bit to move adapter
114 * D0U* --> D0A* state */ 116 * D0U* --> D0A* state */
@@ -118,7 +120,7 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
118 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 120 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
119 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 121 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
120 if (ret < 0) { 122 if (ret < 0) {
121 IWL_DEBUG_INFO("Failed to init the card\n"); 123 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
122 return ret; 124 return ret;
123 } 125 }
124 126
@@ -176,7 +178,8 @@ static int iwl5000_apm_reset(struct iwl_priv *priv)
176 178
177 /* FIXME: put here L1A -L0S w/a */ 179 /* FIXME: put here L1A -L0S w/a */
178 180
179 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 181 if (priv->cfg->need_pll_cfg)
182 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
180 183
181 /* set "initialization complete" bit to move adapter 184 /* set "initialization complete" bit to move adapter
182 * D0U* --> D0A* state */ 185 * D0U* --> D0A* state */
@@ -186,7 +189,7 @@ static int iwl5000_apm_reset(struct iwl_priv *priv)
186 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 189 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
187 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 190 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
188 if (ret < 0) { 191 if (ret < 0) {
189 IWL_DEBUG_INFO("Failed to init the card\n"); 192 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
190 goto out; 193 goto out;
191 } 194 }
192 195
@@ -216,18 +219,19 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
216{ 219{
217 unsigned long flags; 220 unsigned long flags;
218 u16 radio_cfg; 221 u16 radio_cfg;
219 u16 link; 222 u16 lctl;
220 223
221 spin_lock_irqsave(&priv->lock, flags); 224 spin_lock_irqsave(&priv->lock, flags);
222 225
223 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link); 226 lctl = iwl_pcie_link_ctl(priv);
224 227
225 /* L1 is enabled by BIOS */ 228 /* HW bug W/A */
226 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) 229 /* L1-ASPM is enabled by BIOS */
227 /* disable L0S disabled L1A enabled */ 230 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
231 /* L1-APSM enabled: disable L0S */
228 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 232 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
229 else 233 else
230 /* L0S enabled L1A disabled */ 234 /* L1-ASPM disabled: enable L0S */
231 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 235 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
232 236
233 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 237 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
@@ -289,7 +293,7 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
289 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); 293 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
290 break; 294 break;
291 default: 295 default:
292 IWL_ERROR("illegal indirect type: 0x%X\n", 296 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
293 address & INDIRECT_TYPE_MSK); 297 address & INDIRECT_TYPE_MSK);
294 break; 298 break;
295 } 299 }
@@ -338,7 +342,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
338 data->delta_gain_code[i] |= (1 << 2); 342 data->delta_gain_code[i] |= (1 << 2);
339 } 343 }
340 344
341 IWL_DEBUG_CALIB("Delta gains: ANT_B = %d ANT_C = %d\n", 345 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
342 data->delta_gain_code[1], data->delta_gain_code[2]); 346 data->delta_gain_code[1], data->delta_gain_code[2]);
343 347
344 if (!data->radio_write) { 348 if (!data->radio_write) {
@@ -384,13 +388,14 @@ static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
384 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 388 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
385 sizeof(cmd), &cmd); 389 sizeof(cmd), &cmd);
386 if (ret) 390 if (ret)
387 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n"); 391 IWL_ERR(priv,
392 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
388 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 393 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
389 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 394 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
390 } 395 }
391} 396}
392 397
393static void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 398void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
394 __le32 *tx_flags) 399 __le32 *tx_flags)
395{ 400{
396 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 401 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
@@ -507,7 +512,7 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
507 index = IWL_CALIB_BASE_BAND; 512 index = IWL_CALIB_BASE_BAND;
508 break; 513 break;
509 default: 514 default:
510 IWL_ERROR("Unknown calibration notification %d\n", 515 IWL_ERR(priv, "Unknown calibration notification %d\n",
511 hdr->op_code); 516 hdr->op_code);
512 return; 517 return;
513 } 518 }
@@ -517,7 +522,7 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
517static void iwl5000_rx_calib_complete(struct iwl_priv *priv, 522static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
518 struct iwl_rx_mem_buffer *rxb) 523 struct iwl_rx_mem_buffer *rxb)
519{ 524{
520 IWL_DEBUG_INFO("Init. calibration is completed, restarting fw.\n"); 525 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
521 queue_work(priv->workqueue, &priv->restart); 526 queue_work(priv->workqueue, &priv->restart);
522} 527}
523 528
@@ -580,40 +585,41 @@ static int iwl5000_load_given_ucode(struct iwl_priv *priv,
580{ 585{
581 int ret = 0; 586 int ret = 0;
582 587
583 ret = iwl5000_load_section(priv, inst_image, RTC_INST_LOWER_BOUND); 588 ret = iwl5000_load_section(priv, inst_image,
589 IWL50_RTC_INST_LOWER_BOUND);
584 if (ret) 590 if (ret)
585 return ret; 591 return ret;
586 592
587 IWL_DEBUG_INFO("INST uCode section being loaded...\n"); 593 IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
588 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 594 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
589 priv->ucode_write_complete, 5 * HZ); 595 priv->ucode_write_complete, 5 * HZ);
590 if (ret == -ERESTARTSYS) { 596 if (ret == -ERESTARTSYS) {
591 IWL_ERROR("Could not load the INST uCode section due " 597 IWL_ERR(priv, "Could not load the INST uCode section due "
592 "to interrupt\n"); 598 "to interrupt\n");
593 return ret; 599 return ret;
594 } 600 }
595 if (!ret) { 601 if (!ret) {
596 IWL_ERROR("Could not load the INST uCode section\n"); 602 IWL_ERR(priv, "Could not load the INST uCode section\n");
597 return -ETIMEDOUT; 603 return -ETIMEDOUT;
598 } 604 }
599 605
600 priv->ucode_write_complete = 0; 606 priv->ucode_write_complete = 0;
601 607
602 ret = iwl5000_load_section( 608 ret = iwl5000_load_section(
603 priv, data_image, RTC_DATA_LOWER_BOUND); 609 priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
604 if (ret) 610 if (ret)
605 return ret; 611 return ret;
606 612
607 IWL_DEBUG_INFO("DATA uCode section being loaded...\n"); 613 IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n");
608 614
609 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 615 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
610 priv->ucode_write_complete, 5 * HZ); 616 priv->ucode_write_complete, 5 * HZ);
611 if (ret == -ERESTARTSYS) { 617 if (ret == -ERESTARTSYS) {
612 IWL_ERROR("Could not load the INST uCode section due " 618 IWL_ERR(priv, "Could not load the INST uCode section due "
613 "to interrupt\n"); 619 "to interrupt\n");
614 return ret; 620 return ret;
615 } else if (!ret) { 621 } else if (!ret) {
616 IWL_ERROR("Could not load the DATA uCode section\n"); 622 IWL_ERR(priv, "Could not load the DATA uCode section\n");
617 return -ETIMEDOUT; 623 return -ETIMEDOUT;
618 } else 624 } else
619 ret = 0; 625 ret = 0;
@@ -629,20 +635,20 @@ static int iwl5000_load_ucode(struct iwl_priv *priv)
629 635
630 /* check whether init ucode should be loaded, or rather runtime ucode */ 636 /* check whether init ucode should be loaded, or rather runtime ucode */
631 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { 637 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
632 IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n"); 638 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
633 ret = iwl5000_load_given_ucode(priv, 639 ret = iwl5000_load_given_ucode(priv,
634 &priv->ucode_init, &priv->ucode_init_data); 640 &priv->ucode_init, &priv->ucode_init_data);
635 if (!ret) { 641 if (!ret) {
636 IWL_DEBUG_INFO("Init ucode load complete.\n"); 642 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
637 priv->ucode_type = UCODE_INIT; 643 priv->ucode_type = UCODE_INIT;
638 } 644 }
639 } else { 645 } else {
640 IWL_DEBUG_INFO("Init ucode not found, or already loaded. " 646 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
641 "Loading runtime ucode...\n"); 647 "Loading runtime ucode...\n");
642 ret = iwl5000_load_given_ucode(priv, 648 ret = iwl5000_load_given_ucode(priv,
643 &priv->ucode_code, &priv->ucode_data); 649 &priv->ucode_code, &priv->ucode_data);
644 if (!ret) { 650 if (!ret) {
645 IWL_DEBUG_INFO("Runtime ucode load complete.\n"); 651 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
646 priv->ucode_type = UCODE_RT; 652 priv->ucode_type = UCODE_RT;
647 } 653 }
648 } 654 }
@@ -658,7 +664,7 @@ static void iwl5000_init_alive_start(struct iwl_priv *priv)
658 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 664 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
659 /* We had an error bringing up the hardware, so take it 665 /* We had an error bringing up the hardware, so take it
660 * all the way back down so we can try again */ 666 * all the way back down so we can try again */
661 IWL_DEBUG_INFO("Initialize Alive failed.\n"); 667 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
662 goto restart; 668 goto restart;
663 } 669 }
664 670
@@ -668,14 +674,15 @@ static void iwl5000_init_alive_start(struct iwl_priv *priv)
668 if (iwl_verify_ucode(priv)) { 674 if (iwl_verify_ucode(priv)) {
669 /* Runtime instruction load was bad; 675 /* Runtime instruction load was bad;
670 * take it all the way back down so we can try again */ 676 * take it all the way back down so we can try again */
671 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); 677 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
672 goto restart; 678 goto restart;
673 } 679 }
674 680
675 iwl_clear_stations_table(priv); 681 iwl_clear_stations_table(priv);
676 ret = priv->cfg->ops->lib->alive_notify(priv); 682 ret = priv->cfg->ops->lib->alive_notify(priv);
677 if (ret) { 683 if (ret) {
678 IWL_WARNING("Could not complete ALIVE transition: %d\n", ret); 684 IWL_WARN(priv,
685 "Could not complete ALIVE transition: %d\n", ret);
679 goto restart; 686 goto restart;
680 } 687 }
681 688
@@ -710,7 +717,7 @@ static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
710 717
711 txq->sched_retry = scd_retry; 718 txq->sched_retry = scd_retry;
712 719
713 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", 720 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
714 active ? "Activate" : "Deactivate", 721 active ? "Activate" : "Deactivate",
715 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 722 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
716} 723}
@@ -824,8 +831,9 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
824{ 831{
825 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 832 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
826 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 833 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
827 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 834 IWL_ERR(priv,
828 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES); 835 "invalid queues_num, should be between %d and %d\n",
836 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
829 return -EINVAL; 837 return -EINVAL;
830 } 838 }
831 839
@@ -833,70 +841,62 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
833 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 841 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
834 priv->hw_params.scd_bc_tbls_size = 842 priv->hw_params.scd_bc_tbls_size =
835 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 843 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl);
844 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
836 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 845 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
837 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 846 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
838 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 847
839 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 848 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
849 case CSR_HW_REV_TYPE_6x00:
850 case CSR_HW_REV_TYPE_6x50:
851 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
852 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
853 break;
854 default:
855 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
856 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
857 }
858
840 priv->hw_params.max_bsm_size = 0; 859 priv->hw_params.max_bsm_size = 0;
841 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) | 860 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) |
842 BIT(IEEE80211_BAND_5GHZ); 861 BIT(IEEE80211_BAND_5GHZ);
862 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
863
843 priv->hw_params.sens = &iwl5000_sensitivity; 864 priv->hw_params.sens = &iwl5000_sensitivity;
844 865
845 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 866 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
846 case CSR_HW_REV_TYPE_5100: 867 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
847 priv->hw_params.tx_chains_num = 1; 868 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
848 priv->hw_params.rx_chains_num = 2; 869 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
849 priv->hw_params.valid_tx_ant = ANT_B;
850 priv->hw_params.valid_rx_ant = ANT_AB;
851 break;
852 case CSR_HW_REV_TYPE_5150:
853 priv->hw_params.tx_chains_num = 1;
854 priv->hw_params.rx_chains_num = 2;
855 priv->hw_params.valid_tx_ant = ANT_A;
856 priv->hw_params.valid_rx_ant = ANT_AB;
857 break;
858 case CSR_HW_REV_TYPE_5300:
859 case CSR_HW_REV_TYPE_5350:
860 priv->hw_params.tx_chains_num = 3;
861 priv->hw_params.rx_chains_num = 3;
862 priv->hw_params.valid_tx_ant = ANT_ABC;
863 priv->hw_params.valid_rx_ant = ANT_ABC;
864 break;
865 }
866 870
867 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 871 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
868 case CSR_HW_REV_TYPE_5100:
869 case CSR_HW_REV_TYPE_5300:
870 case CSR_HW_REV_TYPE_5350:
871 /* 5X00 and 5350 wants in Celsius */
872 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
873 break;
874 case CSR_HW_REV_TYPE_5150: 872 case CSR_HW_REV_TYPE_5150:
875 /* 5150 wants in Kelvin */ 873 /* 5150 wants in Kelvin */
876 priv->hw_params.ct_kill_threshold = 874 priv->hw_params.ct_kill_threshold =
877 iwl5150_get_ct_threshold(priv); 875 iwl5150_get_ct_threshold(priv);
878 break; 876 break;
877 default:
878 /* all others want Celsius */
879 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
880 break;
879 } 881 }
880 882
881 /* Set initial calibration set */ 883 /* Set initial calibration set */
882 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 884 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
883 case CSR_HW_REV_TYPE_5100: 885 case CSR_HW_REV_TYPE_5150:
884 case CSR_HW_REV_TYPE_5300:
885 case CSR_HW_REV_TYPE_5350:
886 priv->hw_params.calib_init_cfg = 886 priv->hw_params.calib_init_cfg =
887 BIT(IWL_CALIB_XTAL) | 887 BIT(IWL_CALIB_DC) |
888 BIT(IWL_CALIB_LO) | 888 BIT(IWL_CALIB_LO) |
889 BIT(IWL_CALIB_TX_IQ) | 889 BIT(IWL_CALIB_TX_IQ) |
890 BIT(IWL_CALIB_TX_IQ_PERD) |
891 BIT(IWL_CALIB_BASE_BAND); 890 BIT(IWL_CALIB_BASE_BAND);
891
892 break; 892 break;
893 case CSR_HW_REV_TYPE_5150: 893 default:
894 priv->hw_params.calib_init_cfg = 894 priv->hw_params.calib_init_cfg =
895 BIT(IWL_CALIB_DC) | 895 BIT(IWL_CALIB_XTAL) |
896 BIT(IWL_CALIB_LO) | 896 BIT(IWL_CALIB_LO) |
897 BIT(IWL_CALIB_TX_IQ) | 897 BIT(IWL_CALIB_TX_IQ) |
898 BIT(IWL_CALIB_TX_IQ_PERD) |
898 BIT(IWL_CALIB_BASE_BAND); 899 BIT(IWL_CALIB_BASE_BAND);
899
900 break; 900 break;
901 } 901 }
902 902
@@ -1011,7 +1011,8 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1011 1011
1012 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1012 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1013 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1013 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1014 IWL_WARNING("queue number out of range: %d, must be %d to %d\n", 1014 IWL_WARN(priv,
1015 "queue number out of range: %d, must be %d to %d\n",
1015 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1016 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1016 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1017 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
1017 return -EINVAL; 1018 return -EINVAL;
@@ -1076,7 +1077,8 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1076 1077
1077 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1078 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1078 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1079 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1079 IWL_WARNING("queue number out of range: %d, must be %d to %d\n", 1080 IWL_WARN(priv,
1081 "queue number out of range: %d, must be %d to %d\n",
1080 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1082 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1081 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1083 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
1082 return -EINVAL; 1084 return -EINVAL;
@@ -1104,7 +1106,7 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1104 return 0; 1106 return 0;
1105} 1107}
1106 1108
1107static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1109u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1108{ 1110{
1109 u16 size = (u16)sizeof(struct iwl_addsta_cmd); 1111 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
1110 memcpy(data, cmd, size); 1112 memcpy(data, cmd, size);
@@ -1142,7 +1144,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1142 u16 seq; 1144 u16 seq;
1143 1145
1144 if (agg->wait_for_ba) 1146 if (agg->wait_for_ba)
1145 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n"); 1147 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1146 1148
1147 agg->frame_count = tx_resp->frame_count; 1149 agg->frame_count = tx_resp->frame_count;
1148 agg->start_idx = start_idx; 1150 agg->start_idx = start_idx;
@@ -1156,7 +1158,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1156 idx = start_idx; 1158 idx = start_idx;
1157 1159
1158 /* FIXME: code repetition */ 1160 /* FIXME: code repetition */
1159 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", 1161 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1160 agg->frame_count, agg->start_idx, idx); 1162 agg->frame_count, agg->start_idx, idx);
1161 1163
1162 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 1164 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
@@ -1168,9 +1170,9 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1168 1170
1169 /* FIXME: code repetition end */ 1171 /* FIXME: code repetition end */
1170 1172
1171 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", 1173 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1172 status & 0xff, tx_resp->failure_frame); 1174 status & 0xff, tx_resp->failure_frame);
1173 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags); 1175 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1174 1176
1175 agg->wait_for_ba = 0; 1177 agg->wait_for_ba = 0;
1176 } else { 1178 } else {
@@ -1190,21 +1192,22 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1190 AGG_TX_STATE_ABORT_MSK)) 1192 AGG_TX_STATE_ABORT_MSK))
1191 continue; 1193 continue;
1192 1194
1193 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", 1195 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1194 agg->frame_count, txq_id, idx); 1196 agg->frame_count, txq_id, idx);
1195 1197
1196 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1198 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1197 1199
1198 sc = le16_to_cpu(hdr->seq_ctrl); 1200 sc = le16_to_cpu(hdr->seq_ctrl);
1199 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 1201 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1200 IWL_ERROR("BUG_ON idx doesn't match seq control" 1202 IWL_ERR(priv,
1201 " idx=%d, seq_idx=%d, seq=%d\n", 1203 "BUG_ON idx doesn't match seq control"
1204 " idx=%d, seq_idx=%d, seq=%d\n",
1202 idx, SEQ_TO_SN(sc), 1205 idx, SEQ_TO_SN(sc),
1203 hdr->seq_ctrl); 1206 hdr->seq_ctrl);
1204 return -1; 1207 return -1;
1205 } 1208 }
1206 1209
1207 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", 1210 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1208 i, idx, SEQ_TO_SN(sc)); 1211 i, idx, SEQ_TO_SN(sc));
1209 1212
1210 sh = idx - start; 1213 sh = idx - start;
@@ -1222,13 +1225,13 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1222 sh = 0; 1225 sh = 0;
1223 } 1226 }
1224 bitmap |= 1ULL << sh; 1227 bitmap |= 1ULL << sh;
1225 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n", 1228 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1226 start, (unsigned long long)bitmap); 1229 start, (unsigned long long)bitmap);
1227 } 1230 }
1228 1231
1229 agg->bitmap = bitmap; 1232 agg->bitmap = bitmap;
1230 agg->start_idx = start; 1233 agg->start_idx = start;
1231 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", 1234 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1232 agg->frame_count, agg->start_idx, 1235 agg->frame_count, agg->start_idx,
1233 (unsigned long long)agg->bitmap); 1236 (unsigned long long)agg->bitmap);
1234 1237
@@ -1254,7 +1257,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1254 int freed; 1257 int freed;
1255 1258
1256 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1259 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1257 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " 1260 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1258 "is out of range [0-%d] %d %d\n", txq_id, 1261 "is out of range [0-%d] %d %d\n", txq_id,
1259 index, txq->q.n_bd, txq->q.write_ptr, 1262 index, txq->q.n_bd, txq->q.write_ptr,
1260 txq->q.read_ptr); 1263 txq->q.read_ptr);
@@ -1281,7 +1284,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1281 1284
1282 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1285 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1283 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1286 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1284 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim " 1287 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
1285 "scd_ssn=%d idx=%d txq=%d swq=%d\n", 1288 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1286 scd_ssn , index, txq_id, txq->swq_id); 1289 scd_ssn , index, txq_id, txq->swq_id);
1287 1290
@@ -1308,7 +1311,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1308 le32_to_cpu(tx_resp->rate_n_flags), 1311 le32_to_cpu(tx_resp->rate_n_flags),
1309 info); 1312 info);
1310 1313
1311 IWL_DEBUG_TX_REPLY("TXQ %d status %s (0x%08x) rate_n_flags " 1314 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
1312 "0x%x retries %d\n", 1315 "0x%x retries %d\n",
1313 txq_id, 1316 txq_id,
1314 iwl_get_tx_fail_reason(status), status, 1317 iwl_get_tx_fail_reason(status), status,
@@ -1328,11 +1331,11 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1328 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 1331 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1329 1332
1330 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1333 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1331 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 1334 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
1332} 1335}
1333 1336
1334/* Currently 5000 is the superset of everything */ 1337/* Currently 5000 is the superset of everything */
1335static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) 1338u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1336{ 1339{
1337 return len; 1340 return len;
1338} 1341}
@@ -1356,7 +1359,7 @@ static void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1356 1359
1357static int iwl5000_hw_valid_rtc_data_addr(u32 addr) 1360static int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1358{ 1361{
1359 return (addr >= RTC_DATA_LOWER_BOUND) && 1362 return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
1360 (addr < IWL50_RTC_DATA_UPPER_BOUND); 1363 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1361} 1364}
1362 1365
@@ -1379,7 +1382,7 @@ static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1379 (rxon1->acquisition_data == rxon2->acquisition_data) && 1382 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1380 (rxon1->rx_chain == rxon2->rx_chain) && 1383 (rxon1->rx_chain == rxon2->rx_chain) &&
1381 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { 1384 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1382 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); 1385 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1383 return 0; 1386 return 0;
1384 } 1387 }
1385 1388
@@ -1409,12 +1412,19 @@ static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1409static int iwl5000_send_tx_power(struct iwl_priv *priv) 1412static int iwl5000_send_tx_power(struct iwl_priv *priv)
1410{ 1413{
1411 struct iwl5000_tx_power_dbm_cmd tx_power_cmd; 1414 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
1415 u8 tx_ant_cfg_cmd;
1412 1416
1413 /* half dBm need to multiply */ 1417 /* half dBm need to multiply */
1414 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 1418 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
1415 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 1419 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
1416 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 1420 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
1417 return iwl_send_cmd_pdu_async(priv, REPLY_TX_POWER_DBM_CMD, 1421
1422 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1423 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
1424 else
1425 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
1426
1427 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
1418 sizeof(tx_power_cmd), &tx_power_cmd, 1428 sizeof(tx_power_cmd), &tx_power_cmd,
1419 NULL); 1429 NULL);
1420} 1430}
@@ -1426,7 +1436,7 @@ static void iwl5000_temperature(struct iwl_priv *priv)
1426} 1436}
1427 1437
1428/* Calc max signal level (dBm) among 3 possible receivers */ 1438/* Calc max signal level (dBm) among 3 possible receivers */
1429static int iwl5000_calc_rssi(struct iwl_priv *priv, 1439int iwl5000_calc_rssi(struct iwl_priv *priv,
1430 struct iwl_rx_phy_res *rx_resp) 1440 struct iwl_rx_phy_res *rx_resp)
1431{ 1441{
1432 /* data from PHY/DSP regarding signal strength, etc., 1442 /* data from PHY/DSP regarding signal strength, etc.,
@@ -1455,19 +1465,19 @@ static int iwl5000_calc_rssi(struct iwl_priv *priv,
1455 max_rssi = max_t(u32, rssi_a, rssi_b); 1465 max_rssi = max_t(u32, rssi_a, rssi_b);
1456 max_rssi = max_t(u32, max_rssi, rssi_c); 1466 max_rssi = max_t(u32, max_rssi, rssi_c);
1457 1467
1458 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", 1468 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1459 rssi_a, rssi_b, rssi_c, max_rssi, agc); 1469 rssi_a, rssi_b, rssi_c, max_rssi, agc);
1460 1470
1461 /* dBm = max_rssi dB - agc dB - constant. 1471 /* dBm = max_rssi dB - agc dB - constant.
1462 * Higher AGC (higher radio gain) means lower signal. */ 1472 * Higher AGC (higher radio gain) means lower signal. */
1463 return max_rssi - agc - IWL_RSSI_OFFSET; 1473 return max_rssi - agc - IWL49_RSSI_OFFSET;
1464} 1474}
1465 1475
1466static struct iwl_hcmd_ops iwl5000_hcmd = { 1476struct iwl_hcmd_ops iwl5000_hcmd = {
1467 .rxon_assoc = iwl5000_send_rxon_assoc, 1477 .rxon_assoc = iwl5000_send_rxon_assoc,
1468}; 1478};
1469 1479
1470static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { 1480struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1471 .get_hcmd_size = iwl5000_get_hcmd_size, 1481 .get_hcmd_size = iwl5000_get_hcmd_size,
1472 .build_addsta_hcmd = iwl5000_build_addsta_hcmd, 1482 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1473 .gain_computation = iwl5000_gain_computation, 1483 .gain_computation = iwl5000_gain_computation,
@@ -1476,13 +1486,16 @@ static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1476 .calc_rssi = iwl5000_calc_rssi, 1486 .calc_rssi = iwl5000_calc_rssi,
1477}; 1487};
1478 1488
1479static struct iwl_lib_ops iwl5000_lib = { 1489struct iwl_lib_ops iwl5000_lib = {
1480 .set_hw_params = iwl5000_hw_set_hw_params, 1490 .set_hw_params = iwl5000_hw_set_hw_params,
1481 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 1491 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
1482 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 1492 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
1483 .txq_set_sched = iwl5000_txq_set_sched, 1493 .txq_set_sched = iwl5000_txq_set_sched,
1484 .txq_agg_enable = iwl5000_txq_agg_enable, 1494 .txq_agg_enable = iwl5000_txq_agg_enable,
1485 .txq_agg_disable = iwl5000_txq_agg_disable, 1495 .txq_agg_disable = iwl5000_txq_agg_disable,
1496 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1497 .txq_free_tfd = iwl_hw_txq_free_tfd,
1498 .txq_init = iwl_hw_tx_queue_init,
1486 .rx_handler_setup = iwl5000_rx_handler_setup, 1499 .rx_handler_setup = iwl5000_rx_handler_setup,
1487 .setup_deferred_work = iwl5000_setup_deferred_work, 1500 .setup_deferred_work = iwl5000_setup_deferred_work,
1488 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1501 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
@@ -1517,13 +1530,13 @@ static struct iwl_lib_ops iwl5000_lib = {
1517 }, 1530 },
1518}; 1531};
1519 1532
1520static struct iwl_ops iwl5000_ops = { 1533struct iwl_ops iwl5000_ops = {
1521 .lib = &iwl5000_lib, 1534 .lib = &iwl5000_lib,
1522 .hcmd = &iwl5000_hcmd, 1535 .hcmd = &iwl5000_hcmd,
1523 .utils = &iwl5000_hcmd_utils, 1536 .utils = &iwl5000_hcmd_utils,
1524}; 1537};
1525 1538
1526static struct iwl_mod_params iwl50_mod_params = { 1539struct iwl_mod_params iwl50_mod_params = {
1527 .num_of_queues = IWL50_NUM_QUEUES, 1540 .num_of_queues = IWL50_NUM_QUEUES,
1528 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 1541 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1529 .amsdu_size_8K = 1, 1542 .amsdu_size_8K = 1,
@@ -1543,6 +1556,9 @@ struct iwl_cfg iwl5300_agn_cfg = {
1543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1556 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1544 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1557 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1545 .mod_params = &iwl50_mod_params, 1558 .mod_params = &iwl50_mod_params,
1559 .valid_tx_ant = ANT_ABC,
1560 .valid_rx_ant = ANT_ABC,
1561 .need_pll_cfg = true,
1546}; 1562};
1547 1563
1548struct iwl_cfg iwl5100_bg_cfg = { 1564struct iwl_cfg iwl5100_bg_cfg = {
@@ -1556,6 +1572,9 @@ struct iwl_cfg iwl5100_bg_cfg = {
1556 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1572 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1557 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1573 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1558 .mod_params = &iwl50_mod_params, 1574 .mod_params = &iwl50_mod_params,
1575 .valid_tx_ant = ANT_B,
1576 .valid_rx_ant = ANT_AB,
1577 .need_pll_cfg = true,
1559}; 1578};
1560 1579
1561struct iwl_cfg iwl5100_abg_cfg = { 1580struct iwl_cfg iwl5100_abg_cfg = {
@@ -1569,6 +1588,9 @@ struct iwl_cfg iwl5100_abg_cfg = {
1569 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1588 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1570 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1589 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1571 .mod_params = &iwl50_mod_params, 1590 .mod_params = &iwl50_mod_params,
1591 .valid_tx_ant = ANT_B,
1592 .valid_rx_ant = ANT_AB,
1593 .need_pll_cfg = true,
1572}; 1594};
1573 1595
1574struct iwl_cfg iwl5100_agn_cfg = { 1596struct iwl_cfg iwl5100_agn_cfg = {
@@ -1582,6 +1604,9 @@ struct iwl_cfg iwl5100_agn_cfg = {
1582 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1604 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1583 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1605 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1584 .mod_params = &iwl50_mod_params, 1606 .mod_params = &iwl50_mod_params,
1607 .valid_tx_ant = ANT_B,
1608 .valid_rx_ant = ANT_AB,
1609 .need_pll_cfg = true,
1585}; 1610};
1586 1611
1587struct iwl_cfg iwl5350_agn_cfg = { 1612struct iwl_cfg iwl5350_agn_cfg = {
@@ -1595,6 +1620,9 @@ struct iwl_cfg iwl5350_agn_cfg = {
1595 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1620 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1596 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1621 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1597 .mod_params = &iwl50_mod_params, 1622 .mod_params = &iwl50_mod_params,
1623 .valid_tx_ant = ANT_ABC,
1624 .valid_rx_ant = ANT_ABC,
1625 .need_pll_cfg = true,
1598}; 1626};
1599 1627
1600struct iwl_cfg iwl5150_agn_cfg = { 1628struct iwl_cfg iwl5150_agn_cfg = {
@@ -1608,6 +1636,9 @@ struct iwl_cfg iwl5150_agn_cfg = {
1608 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1636 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1609 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1637 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1610 .mod_params = &iwl50_mod_params, 1638 .mod_params = &iwl50_mod_params,
1639 .valid_tx_ant = ANT_A,
1640 .valid_rx_ant = ANT_AB,
1641 .need_pll_cfg = true,
1611}; 1642};
1612 1643
1613MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 1644MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-core.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 6f463555402..90185777d98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -58,47 +58,24 @@
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
61 *****************************************************************************/ 62 *****************************************************************************/
63/*
64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions.
66 */
62 67
63#ifndef __iwl_3945_dev_h__ 68#ifndef __iwl_6000_hw_h__
64#define __iwl_3945_dev_h__ 69#define __iwl_6000_hw_h__
65
66#define IWL_PCI_DEVICE(dev, subdev, cfg) \
67 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
68 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
69 .driver_data = (kernel_ulong_t)&(cfg)
70 70
71#define IWL_SKU_G 0x1 71#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
72#define IWL_SKU_A 0x2 72#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
73#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
74#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
75#define IWL60_RTC_INST_SIZE \
76 (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
77#define IWL60_RTC_DATA_SIZE \
78 (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
73 79
74/** 80#endif /* __iwl_6000_hw_h__ */
75 * struct iwl_3945_cfg
76 * @fw_name_pre: Firmware filename prefix. The api version and extension
77 * (.ucode) will be added to filename before loading from disk. The
78 * filename is constructed as fw_name_pre<api>.ucode.
79 * @ucode_api_max: Highest version of uCode API supported by driver.
80 * @ucode_api_min: Lowest version of uCode API supported by driver.
81 *
82 * We enable the driver to be backward compatible wrt API version. The
83 * driver specifies which APIs it supports (with @ucode_api_max being the
84 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
85 * it has a supported API version. The firmware's API version will be
86 * stored in @iwl_priv, enabling the driver to make runtime changes based
87 * on firmware version used.
88 *
89 * For example,
90 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
91 * Driver interacts with Firmware API version >= 2.
92 * } else {
93 * Driver interacts with Firmware API version 1.
94 * }
95 */
96struct iwl_3945_cfg {
97 const char *name;
98 const char *fw_name_pre;
99 const unsigned int ucode_api_max;
100 const unsigned int ucode_api_min;
101 unsigned int sku;
102};
103 81
104#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
new file mode 100644
index 00000000000..edfa5e149f7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -0,0 +1,158 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h"
47
48/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 2
50#define IWL6050_UCODE_API_MAX 2
51
52/* Lowest firmware API version supported */
53#define IWL6000_UCODE_API_MIN 1
54#define IWL6050_UCODE_API_MIN 1
55
56#define IWL6000_FW_PRE "iwlwifi-6000-"
57#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
58#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api)
59
60#define IWL6050_FW_PRE "iwlwifi-6050-"
61#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
62#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
63
64static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
65 .get_hcmd_size = iwl5000_get_hcmd_size,
66 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
67 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
68 .calc_rssi = iwl5000_calc_rssi,
69};
70
71static struct iwl_ops iwl6000_ops = {
72 .lib = &iwl5000_lib,
73 .hcmd = &iwl5000_hcmd,
74 .utils = &iwl6000_hcmd_utils,
75};
76
77struct iwl_cfg iwl6000_2ag_cfg = {
78 .name = "6000 Series 2x2 AG",
79 .fw_name_pre = IWL6000_FW_PRE,
80 .ucode_api_max = IWL6000_UCODE_API_MAX,
81 .ucode_api_min = IWL6000_UCODE_API_MIN,
82 .sku = IWL_SKU_A|IWL_SKU_G,
83 .ops = &iwl6000_ops,
84 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
85 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
86 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
87 .mod_params = &iwl50_mod_params,
88 .valid_tx_ant = ANT_BC,
89 .valid_rx_ant = ANT_BC,
90 .need_pll_cfg = false,
91};
92
93struct iwl_cfg iwl6000_2agn_cfg = {
94 .name = "6000 Series 2x2 AGN",
95 .fw_name_pre = IWL6000_FW_PRE,
96 .ucode_api_max = IWL6000_UCODE_API_MAX,
97 .ucode_api_min = IWL6000_UCODE_API_MIN,
98 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
99 .ops = &iwl6000_ops,
100 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
101 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
102 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
103 .mod_params = &iwl50_mod_params,
104 .valid_tx_ant = ANT_BC,
105 .valid_rx_ant = ANT_BC,
106 .need_pll_cfg = false,
107};
108
109struct iwl_cfg iwl6050_2agn_cfg = {
110 .name = "6050 Series 2x2 AGN",
111 .fw_name_pre = IWL6050_FW_PRE,
112 .ucode_api_max = IWL6050_UCODE_API_MAX,
113 .ucode_api_min = IWL6050_UCODE_API_MIN,
114 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
115 .ops = &iwl6000_ops,
116 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
117 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
118 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
119 .mod_params = &iwl50_mod_params,
120 .valid_tx_ant = ANT_BC,
121 .valid_rx_ant = ANT_BC,
122 .need_pll_cfg = false,
123};
124
125struct iwl_cfg iwl6000_3agn_cfg = {
126 .name = "6000 Series 3x3 AGN",
127 .fw_name_pre = IWL6000_FW_PRE,
128 .ucode_api_max = IWL6000_UCODE_API_MAX,
129 .ucode_api_min = IWL6000_UCODE_API_MIN,
130 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
131 .ops = &iwl6000_ops,
132 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
133 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
134 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
135 .mod_params = &iwl50_mod_params,
136 .valid_tx_ant = ANT_ABC,
137 .valid_rx_ant = ANT_ABC,
138 .need_pll_cfg = false,
139};
140
141struct iwl_cfg iwl6050_3agn_cfg = {
142 .name = "6050 Series 3x3 AGN",
143 .fw_name_pre = IWL6050_FW_PRE,
144 .ucode_api_max = IWL6050_UCODE_API_MAX,
145 .ucode_api_min = IWL6050_UCODE_API_MIN,
146 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
147 .ops = &iwl6000_ops,
148 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
149 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
150 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
151 .mod_params = &iwl50_mod_params,
152 .valid_tx_ant = ANT_ABC,
153 .valid_rx_ant = ANT_ABC,
154 .need_pll_cfg = false,
155};
156
157MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
158MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c
deleted file mode 100644
index b8137eeae1d..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31#include "iwl-dev.h"
32#include "iwl-debug.h"
33#include "iwl-commands.h"
34
35
36/**
37 * iwl_check_rxon_cmd - validate RXON structure is valid
38 *
39 * NOTE: This is really only useful during development and can eventually
40 * be #ifdef'd out once the driver is stable and folks aren't actively
41 * making changes
42 */
43int iwl_agn_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
44{
45 int error = 0;
46 int counter = 1;
47
48 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
49 error |= le32_to_cpu(rxon->flags &
50 (RXON_FLG_TGJ_NARROW_BAND_MSK |
51 RXON_FLG_RADAR_DETECT_MSK));
52 if (error)
53 IWL_WARNING("check 24G fields %d | %d\n",
54 counter++, error);
55 } else {
56 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
57 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
58 if (error)
59 IWL_WARNING("check 52 fields %d | %d\n",
60 counter++, error);
61 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
62 if (error)
63 IWL_WARNING("check 52 CCK %d | %d\n",
64 counter++, error);
65 }
66 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
67 if (error)
68 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
69
70 /* make sure basic rates 6Mbps and 1Mbps are supported */
71 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
72 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
73 if (error)
74 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
75
76 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
77 if (error)
78 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
79
80 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
81 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
82 if (error)
83 IWL_WARNING("check CCK and short slot %d | %d\n",
84 counter++, error);
85
86 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
87 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
88 if (error)
89 IWL_WARNING("check CCK & auto detect %d | %d\n",
90 counter++, error);
91
92 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
93 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
94 if (error)
95 IWL_WARNING("check TGG and auto detect %d | %d\n",
96 counter++, error);
97
98 if (error)
99 IWL_WARNING("Tuning to channel %d\n",
100 le16_to_cpu(rxon->channel));
101
102 if (error) {
103 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
104 return -1;
105 }
106 return 0;
107}
108
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 27f50471aed..04b42c8a770 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -49,6 +49,8 @@
49#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ 49#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
50#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ 50#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
51 51
52/* max allowed rate miss before sync LQ cmd */
53#define IWL_MISSED_RATE_MAX 15
52/* max time to accum history 2 seconds */ 54/* max time to accum history 2 seconds */
53#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ) 55#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ)
54 56
@@ -148,6 +150,8 @@ struct iwl_lq_sta {
148 u16 active_mimo2_rate; 150 u16 active_mimo2_rate;
149 u16 active_mimo3_rate; 151 u16 active_mimo3_rate;
150 u16 active_rate_basic; 152 u16 active_rate_basic;
153 s8 max_rate_idx; /* Max rate set by user */
154 u8 missed_rate_counter;
151 155
152 struct iwl_link_quality_cmd lq; 156 struct iwl_link_quality_cmd lq;
153 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 157 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
@@ -356,7 +360,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
356 struct ieee80211_sta *sta) 360 struct ieee80211_sta *sta)
357{ 361{
358 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 362 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
359 IWL_DEBUG_HT("Starting Tx agg: STA: %pM tid: %d\n", 363 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
360 sta->addr, tid); 364 sta->addr, tid);
361 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid); 365 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
362 } 366 }
@@ -463,8 +467,9 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
463 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 467 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
464 */ 468 */
465/* FIXME:RS:remove this function and put the flags statically in the table */ 469/* FIXME:RS:remove this function and put the flags statically in the table */
466static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl, 470static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
467 int index, u8 use_green) 471 struct iwl_scale_tbl_info *tbl,
472 int index, u8 use_green)
468{ 473{
469 u32 rate_n_flags = 0; 474 u32 rate_n_flags = 0;
470 475
@@ -475,7 +480,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
475 480
476 } else if (is_Ht(tbl->lq_type)) { 481 } else if (is_Ht(tbl->lq_type)) {
477 if (index > IWL_LAST_OFDM_RATE) { 482 if (index > IWL_LAST_OFDM_RATE) {
478 IWL_ERROR("invalid HT rate index %d\n", index); 483 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
479 index = IWL_LAST_OFDM_RATE; 484 index = IWL_LAST_OFDM_RATE;
480 } 485 }
481 rate_n_flags = RATE_MCS_HT_MSK; 486 rate_n_flags = RATE_MCS_HT_MSK;
@@ -487,7 +492,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
487 else 492 else
488 rate_n_flags |= iwl_rates[index].plcp_mimo3; 493 rate_n_flags |= iwl_rates[index].plcp_mimo3;
489 } else { 494 } else {
490 IWL_ERROR("Invalid tbl->lq_type %d\n", tbl->lq_type); 495 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
491 } 496 }
492 497
493 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & 498 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
@@ -507,7 +512,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
507 rate_n_flags |= RATE_MCS_GF_MSK; 512 rate_n_flags |= RATE_MCS_GF_MSK;
508 if (is_siso(tbl->lq_type) && tbl->is_SGI) { 513 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
509 rate_n_flags &= ~RATE_MCS_SGI_MSK; 514 rate_n_flags &= ~RATE_MCS_SGI_MSK;
510 IWL_ERROR("GF was set with SGI:SISO\n"); 515 IWL_ERR(priv, "GF was set with SGI:SISO\n");
511 } 516 }
512 } 517 }
513 } 518 }
@@ -688,7 +693,7 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
688 break; 693 break;
689 if (rate_mask & (1 << low)) 694 if (rate_mask & (1 << low))
690 break; 695 break;
691 IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); 696 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
692 } 697 }
693 698
694 high = index; 699 high = index;
@@ -698,7 +703,7 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
698 break; 703 break;
699 if (rate_mask & (1 << high)) 704 if (rate_mask & (1 << high))
700 break; 705 break;
701 IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); 706 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
702 } 707 }
703 708
704 return (high << 8) | low; 709 return (high << 8) | low;
@@ -758,7 +763,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
758 low = scale_index; 763 low = scale_index;
759 764
760out: 765out:
761 return rate_n_flags_from_tbl(tbl, low, is_green); 766 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
762} 767}
763 768
764/* 769/*
@@ -785,7 +790,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
785 u8 active_index = 0; 790 u8 active_index = 0;
786 s32 tpt = 0; 791 s32 tpt = 0;
787 792
788 IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n"); 793 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
789 794
790 if (!ieee80211_is_data(hdr->frame_control) || 795 if (!ieee80211_is_data(hdr->frame_control) ||
791 is_multicast_ether_addr(hdr->addr1)) 796 is_multicast_ether_addr(hdr->addr1))
@@ -835,14 +840,19 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
835 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) || 840 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
836 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate != 841 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
837 hw->wiphy->bands[info->band]->bitrates[info->status.rates[0].idx].bitrate)) { 842 hw->wiphy->bands[info->band]->bitrates[info->status.rates[0].idx].bitrate)) {
838 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate); 843 IWL_DEBUG_RATE(priv, "initial rate does not match 0x%x\n", tx_rate);
839 /* the last LQ command could failed so the LQ in ucode not 844 /* the last LQ command could failed so the LQ in ucode not
840 * the same in driver sync up 845 * the same in driver sync up
841 */ 846 */
842 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 847 lq_sta->missed_rate_counter++;
848 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
849 lq_sta->missed_rate_counter = 0;
850 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
851 }
843 goto out; 852 goto out;
844 } 853 }
845 854
855 lq_sta->missed_rate_counter = 0;
846 /* Update frame history window with "failure" for each Tx retry. */ 856 /* Update frame history window with "failure" for each Tx retry. */
847 while (retries) { 857 while (retries) {
848 /* Look up the rate and other info used for each tx attempt. 858 /* Look up the rate and other info used for each tx attempt.
@@ -961,7 +971,7 @@ out:
961static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, 971static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
962 struct iwl_lq_sta *lq_sta) 972 struct iwl_lq_sta *lq_sta)
963{ 973{
964 IWL_DEBUG_RATE("we are staying in the same table\n"); 974 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
965 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 975 lq_sta->stay_in_tbl = 1; /* only place this gets set */
966 if (is_legacy) { 976 if (is_legacy) {
967 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; 977 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
@@ -1129,7 +1139,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1129 s32 rate; 1139 s32 rate;
1130 s8 is_green = lq_sta->is_green; 1140 s8 is_green = lq_sta->is_green;
1131 1141
1132 if (!conf->ht.enabled || !sta->ht_cap.ht_supported) 1142 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1133 return -1; 1143 return -1;
1134 1144
1135 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) 1145 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
@@ -1140,7 +1150,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1140 if (priv->hw_params.tx_chains_num < 2) 1150 if (priv->hw_params.tx_chains_num < 2)
1141 return -1; 1151 return -1;
1142 1152
1143 IWL_DEBUG_RATE("LQ: try to switch to MIMO2\n"); 1153 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1144 1154
1145 tbl->lq_type = LQ_MIMO2; 1155 tbl->lq_type = LQ_MIMO2;
1146 tbl->is_dup = lq_sta->is_dup; 1156 tbl->is_dup = lq_sta->is_dup;
@@ -1169,16 +1179,16 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1169 1179
1170 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); 1180 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1171 1181
1172 IWL_DEBUG_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); 1182 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1173 1183
1174 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { 1184 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1175 IWL_DEBUG_RATE("Can't switch with index %d rate mask %x\n", 1185 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1176 rate, rate_mask); 1186 rate, rate_mask);
1177 return -1; 1187 return -1;
1178 } 1188 }
1179 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green); 1189 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1180 1190
1181 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n", 1191 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1182 tbl->current_rate, is_green); 1192 tbl->current_rate, is_green);
1183 return 0; 1193 return 0;
1184} 1194}
@@ -1196,10 +1206,10 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1196 u8 is_green = lq_sta->is_green; 1206 u8 is_green = lq_sta->is_green;
1197 s32 rate; 1207 s32 rate;
1198 1208
1199 if (!conf->ht.enabled || !sta->ht_cap.ht_supported) 1209 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1200 return -1; 1210 return -1;
1201 1211
1202 IWL_DEBUG_RATE("LQ: try to switch to SISO\n"); 1212 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1203 1213
1204 tbl->is_dup = lq_sta->is_dup; 1214 tbl->is_dup = lq_sta->is_dup;
1205 tbl->lq_type = LQ_SISO; 1215 tbl->lq_type = LQ_SISO;
@@ -1230,14 +1240,14 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1230 rs_set_expected_tpt_table(lq_sta, tbl); 1240 rs_set_expected_tpt_table(lq_sta, tbl);
1231 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); 1241 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1232 1242
1233 IWL_DEBUG_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask); 1243 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1234 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { 1244 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1235 IWL_DEBUG_RATE("can not switch with index %d rate mask %x\n", 1245 IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
1236 rate, rate_mask); 1246 rate, rate_mask);
1237 return -1; 1247 return -1;
1238 } 1248 }
1239 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green); 1249 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1240 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n", 1250 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1241 tbl->current_rate, is_green); 1251 tbl->current_rate, is_green);
1242 return 0; 1252 return 0;
1243} 1253}
@@ -1266,7 +1276,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1266 switch (tbl->action) { 1276 switch (tbl->action) {
1267 case IWL_LEGACY_SWITCH_ANTENNA1: 1277 case IWL_LEGACY_SWITCH_ANTENNA1:
1268 case IWL_LEGACY_SWITCH_ANTENNA2: 1278 case IWL_LEGACY_SWITCH_ANTENNA2:
1269 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n"); 1279 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1270 1280
1271 lq_sta->action_counter++; 1281 lq_sta->action_counter++;
1272 1282
@@ -1290,7 +1300,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1290 } 1300 }
1291 break; 1301 break;
1292 case IWL_LEGACY_SWITCH_SISO: 1302 case IWL_LEGACY_SWITCH_SISO:
1293 IWL_DEBUG_RATE("LQ: Legacy switch to SISO\n"); 1303 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1294 1304
1295 /* Set up search table to try SISO */ 1305 /* Set up search table to try SISO */
1296 memcpy(search_tbl, tbl, sz); 1306 memcpy(search_tbl, tbl, sz);
@@ -1306,7 +1316,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1306 case IWL_LEGACY_SWITCH_MIMO2_AB: 1316 case IWL_LEGACY_SWITCH_MIMO2_AB:
1307 case IWL_LEGACY_SWITCH_MIMO2_AC: 1317 case IWL_LEGACY_SWITCH_MIMO2_AC:
1308 case IWL_LEGACY_SWITCH_MIMO2_BC: 1318 case IWL_LEGACY_SWITCH_MIMO2_BC:
1309 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n"); 1319 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1310 1320
1311 /* Set up search table to try MIMO */ 1321 /* Set up search table to try MIMO */
1312 memcpy(search_tbl, tbl, sz); 1322 memcpy(search_tbl, tbl, sz);
@@ -1375,7 +1385,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1375 switch (tbl->action) { 1385 switch (tbl->action) {
1376 case IWL_SISO_SWITCH_ANTENNA1: 1386 case IWL_SISO_SWITCH_ANTENNA1:
1377 case IWL_SISO_SWITCH_ANTENNA2: 1387 case IWL_SISO_SWITCH_ANTENNA2:
1378 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n"); 1388 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1379 1389
1380 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && 1390 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1381 tx_chains_num <= 1) || 1391 tx_chains_num <= 1) ||
@@ -1394,7 +1404,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1394 case IWL_SISO_SWITCH_MIMO2_AB: 1404 case IWL_SISO_SWITCH_MIMO2_AB:
1395 case IWL_SISO_SWITCH_MIMO2_AC: 1405 case IWL_SISO_SWITCH_MIMO2_AC:
1396 case IWL_SISO_SWITCH_MIMO2_BC: 1406 case IWL_SISO_SWITCH_MIMO2_BC:
1397 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n"); 1407 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1398 memcpy(search_tbl, tbl, sz); 1408 memcpy(search_tbl, tbl, sz);
1399 search_tbl->is_SGI = 0; 1409 search_tbl->is_SGI = 0;
1400 1410
@@ -1423,14 +1433,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1423 HT_SHORT_GI_40MHZ)) 1433 HT_SHORT_GI_40MHZ))
1424 break; 1434 break;
1425 1435
1426 IWL_DEBUG_RATE("LQ: SISO toggle SGI/NGI\n"); 1436 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1427 1437
1428 memcpy(search_tbl, tbl, sz); 1438 memcpy(search_tbl, tbl, sz);
1429 if (is_green) { 1439 if (is_green) {
1430 if (!tbl->is_SGI) 1440 if (!tbl->is_SGI)
1431 break; 1441 break;
1432 else 1442 else
1433 IWL_ERROR("SGI was set in GF+SISO\n"); 1443 IWL_ERR(priv,
1444 "SGI was set in GF+SISO\n");
1434 } 1445 }
1435 search_tbl->is_SGI = !tbl->is_SGI; 1446 search_tbl->is_SGI = !tbl->is_SGI;
1436 rs_set_expected_tpt_table(lq_sta, search_tbl); 1447 rs_set_expected_tpt_table(lq_sta, search_tbl);
@@ -1439,8 +1450,9 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1439 if (tpt >= search_tbl->expected_tpt[index]) 1450 if (tpt >= search_tbl->expected_tpt[index])
1440 break; 1451 break;
1441 } 1452 }
1442 search_tbl->current_rate = rate_n_flags_from_tbl( 1453 search_tbl->current_rate =
1443 search_tbl, index, is_green); 1454 rate_n_flags_from_tbl(priv, search_tbl,
1455 index, is_green);
1444 goto out; 1456 goto out;
1445 } 1457 }
1446 tbl->action++; 1458 tbl->action++;
@@ -1486,7 +1498,7 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1486 switch (tbl->action) { 1498 switch (tbl->action) {
1487 case IWL_MIMO2_SWITCH_ANTENNA1: 1499 case IWL_MIMO2_SWITCH_ANTENNA1:
1488 case IWL_MIMO2_SWITCH_ANTENNA2: 1500 case IWL_MIMO2_SWITCH_ANTENNA2:
1489 IWL_DEBUG_RATE("LQ: MIMO toggle Antennas\n"); 1501 IWL_DEBUG_RATE(priv, "LQ: MIMO toggle Antennas\n");
1490 1502
1491 if (tx_chains_num <= 2) 1503 if (tx_chains_num <= 2)
1492 break; 1504 break;
@@ -1502,7 +1514,7 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1502 case IWL_MIMO2_SWITCH_SISO_A: 1514 case IWL_MIMO2_SWITCH_SISO_A:
1503 case IWL_MIMO2_SWITCH_SISO_B: 1515 case IWL_MIMO2_SWITCH_SISO_B:
1504 case IWL_MIMO2_SWITCH_SISO_C: 1516 case IWL_MIMO2_SWITCH_SISO_C:
1505 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n"); 1517 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1506 1518
1507 /* Set up new search table for SISO */ 1519 /* Set up new search table for SISO */
1508 memcpy(search_tbl, tbl, sz); 1520 memcpy(search_tbl, tbl, sz);
@@ -1534,7 +1546,7 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1534 HT_SHORT_GI_40MHZ)) 1546 HT_SHORT_GI_40MHZ))
1535 break; 1547 break;
1536 1548
1537 IWL_DEBUG_RATE("LQ: MIMO toggle SGI/NGI\n"); 1549 IWL_DEBUG_RATE(priv, "LQ: MIMO toggle SGI/NGI\n");
1538 1550
1539 /* Set up new search table for MIMO */ 1551 /* Set up new search table for MIMO */
1540 memcpy(search_tbl, tbl, sz); 1552 memcpy(search_tbl, tbl, sz);
@@ -1551,8 +1563,9 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1551 if (tpt >= search_tbl->expected_tpt[index]) 1563 if (tpt >= search_tbl->expected_tpt[index])
1552 break; 1564 break;
1553 } 1565 }
1554 search_tbl->current_rate = rate_n_flags_from_tbl( 1566 search_tbl->current_rate =
1555 search_tbl, index, is_green); 1567 rate_n_flags_from_tbl(priv, search_tbl,
1568 index, is_green);
1556 goto out; 1569 goto out;
1557 1570
1558 } 1571 }
@@ -1616,7 +1629,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1616 (lq_sta->total_success > lq_sta->max_success_limit) || 1629 (lq_sta->total_success > lq_sta->max_success_limit) ||
1617 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 1630 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1618 && (flush_interval_passed))) { 1631 && (flush_interval_passed))) {
1619 IWL_DEBUG_RATE("LQ: stay is expired %d %d %d\n:", 1632 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1620 lq_sta->total_failed, 1633 lq_sta->total_failed,
1621 lq_sta->total_success, 1634 lq_sta->total_success,
1622 flush_interval_passed); 1635 flush_interval_passed);
@@ -1639,7 +1652,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1639 lq_sta->table_count_limit) { 1652 lq_sta->table_count_limit) {
1640 lq_sta->table_count = 0; 1653 lq_sta->table_count = 0;
1641 1654
1642 IWL_DEBUG_RATE("LQ: stay in table clear win\n"); 1655 IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
1643 for (i = 0; i < IWL_RATE_COUNT; i++) 1656 for (i = 0; i < IWL_RATE_COUNT; i++)
1644 rs_rate_scale_clear_window( 1657 rs_rate_scale_clear_window(
1645 &(tbl->win[i])); 1658 &(tbl->win[i]));
@@ -1688,7 +1701,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1688 s32 sr; 1701 s32 sr;
1689 u8 tid = MAX_TID_COUNT; 1702 u8 tid = MAX_TID_COUNT;
1690 1703
1691 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1704 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1692 1705
1693 /* Send management frames and broadcast/multicast data using 1706 /* Send management frames and broadcast/multicast data using
1694 * lowest rate. */ 1707 * lowest rate. */
@@ -1720,13 +1733,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1720 /* current tx rate */ 1733 /* current tx rate */
1721 index = lq_sta->last_txrate_idx; 1734 index = lq_sta->last_txrate_idx;
1722 1735
1723 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, 1736 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1724 tbl->lq_type); 1737 tbl->lq_type);
1725 1738
1726 /* rates available for this association, and for modulation mode */ 1739 /* rates available for this association, and for modulation mode */
1727 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); 1740 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1728 1741
1729 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); 1742 IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask);
1730 1743
1731 /* mask with station rate restriction */ 1744 /* mask with station rate restriction */
1732 if (is_legacy(tbl->lq_type)) { 1745 if (is_legacy(tbl->lq_type)) {
@@ -1745,16 +1758,25 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1745 rate_scale_index_msk = rate_mask; 1758 rate_scale_index_msk = rate_mask;
1746 1759
1747 if (!((1 << index) & rate_scale_index_msk)) { 1760 if (!((1 << index) & rate_scale_index_msk)) {
1748 IWL_ERROR("Current Rate is not valid\n"); 1761 IWL_ERR(priv, "Current Rate is not valid\n");
1749 return; 1762 return;
1750 } 1763 }
1751 1764
1752 /* Get expected throughput table and history window for current rate */ 1765 /* Get expected throughput table and history window for current rate */
1753 if (!tbl->expected_tpt) { 1766 if (!tbl->expected_tpt) {
1754 IWL_ERROR("tbl->expected_tpt is NULL\n"); 1767 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1755 return; 1768 return;
1756 } 1769 }
1757 1770
1771 /* force user max rate if set by user */
1772 if ((lq_sta->max_rate_idx != -1) &&
1773 (lq_sta->max_rate_idx < index)) {
1774 index = lq_sta->max_rate_idx;
1775 update_lq = 1;
1776 window = &(tbl->win[index]);
1777 goto lq_update;
1778 }
1779
1758 window = &(tbl->win[index]); 1780 window = &(tbl->win[index]);
1759 1781
1760 /* 1782 /*
@@ -1767,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1767 fail_count = window->counter - window->success_counter; 1789 fail_count = window->counter - window->success_counter;
1768 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && 1790 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1769 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { 1791 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1770 IWL_DEBUG_RATE("LQ: still below TH. succ=%d total=%d " 1792 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1771 "for index %d\n", 1793 "for index %d\n",
1772 window->success_counter, window->counter, index); 1794 window->success_counter, window->counter, index);
1773 1795
@@ -1795,7 +1817,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1795 * continuing to use the setup that we've been trying. */ 1817 * continuing to use the setup that we've been trying. */
1796 if (window->average_tpt > lq_sta->last_tpt) { 1818 if (window->average_tpt > lq_sta->last_tpt) {
1797 1819
1798 IWL_DEBUG_RATE("LQ: SWITCHING TO NEW TABLE " 1820 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1799 "suc=%d cur-tpt=%d old-tpt=%d\n", 1821 "suc=%d cur-tpt=%d old-tpt=%d\n",
1800 window->success_ratio, 1822 window->success_ratio,
1801 window->average_tpt, 1823 window->average_tpt,
@@ -1811,7 +1833,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1811 /* Else poor success; go back to mode in "active" table */ 1833 /* Else poor success; go back to mode in "active" table */
1812 } else { 1834 } else {
1813 1835
1814 IWL_DEBUG_RATE("LQ: GOING BACK TO THE OLD TABLE " 1836 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1815 "suc=%d cur-tpt=%d old-tpt=%d\n", 1837 "suc=%d cur-tpt=%d old-tpt=%d\n",
1816 window->success_ratio, 1838 window->success_ratio,
1817 window->average_tpt, 1839 window->average_tpt,
@@ -1846,6 +1868,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1846 low = high_low & 0xff; 1868 low = high_low & 0xff;
1847 high = (high_low >> 8) & 0xff; 1869 high = (high_low >> 8) & 0xff;
1848 1870
1871 /* If user set max rate, dont allow higher than user constrain */
1872 if ((lq_sta->max_rate_idx != -1) &&
1873 (lq_sta->max_rate_idx < high))
1874 high = IWL_RATE_INVALID;
1875
1849 sr = window->success_ratio; 1876 sr = window->success_ratio;
1850 1877
1851 /* Collect measured throughputs for current and adjacent rates */ 1878 /* Collect measured throughputs for current and adjacent rates */
@@ -1859,7 +1886,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1859 1886
1860 /* Too many failures, decrease rate */ 1887 /* Too many failures, decrease rate */
1861 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { 1888 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1862 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 1889 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
1863 scale_action = -1; 1890 scale_action = -1;
1864 1891
1865 /* No throughput measured yet for adjacent rates; try increase. */ 1892 /* No throughput measured yet for adjacent rates; try increase. */
@@ -1890,8 +1917,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1890 sr >= IWL_RATE_INCREASE_TH) { 1917 sr >= IWL_RATE_INCREASE_TH) {
1891 scale_action = 1; 1918 scale_action = 1;
1892 } else { 1919 } else {
1893 IWL_DEBUG_RATE 1920 IWL_DEBUG_RATE(priv,
1894 ("decrease rate because of high tpt\n"); 1921 "decrease rate because of high tpt\n");
1895 scale_action = -1; 1922 scale_action = -1;
1896 } 1923 }
1897 1924
@@ -1899,8 +1926,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1899 } else if (low_tpt != IWL_INVALID_VALUE) { 1926 } else if (low_tpt != IWL_INVALID_VALUE) {
1900 /* Lower rate has better throughput */ 1927 /* Lower rate has better throughput */
1901 if (low_tpt > current_tpt) { 1928 if (low_tpt > current_tpt) {
1902 IWL_DEBUG_RATE 1929 IWL_DEBUG_RATE(priv,
1903 ("decrease rate because of low tpt\n"); 1930 "decrease rate because of low tpt\n");
1904 scale_action = -1; 1931 scale_action = -1;
1905 } else if (sr >= IWL_RATE_INCREASE_TH) { 1932 } else if (sr >= IWL_RATE_INCREASE_TH) {
1906 scale_action = 1; 1933 scale_action = 1;
@@ -1937,14 +1964,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1937 break; 1964 break;
1938 } 1965 }
1939 1966
1940 IWL_DEBUG_RATE("choose rate scale index %d action %d low %d " 1967 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
1941 "high %d type %d\n", 1968 "high %d type %d\n",
1942 index, scale_action, low, high, tbl->lq_type); 1969 index, scale_action, low, high, tbl->lq_type);
1943 1970
1944lq_update: 1971lq_update:
1945 /* Replace uCode's rate table for the destination station. */ 1972 /* Replace uCode's rate table for the destination station. */
1946 if (update_lq) { 1973 if (update_lq) {
1947 rate = rate_n_flags_from_tbl(tbl, index, is_green); 1974 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
1948 rs_fill_link_cmd(priv, lq_sta, rate); 1975 rs_fill_link_cmd(priv, lq_sta, rate);
1949 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1976 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1950 } 1977 }
@@ -1981,7 +2008,7 @@ lq_update:
1981 /* Use new "search" start rate */ 2008 /* Use new "search" start rate */
1982 index = iwl_hwrate_to_plcp_idx(tbl->current_rate); 2009 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
1983 2010
1984 IWL_DEBUG_RATE("Switch current mcs: %X index: %d\n", 2011 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
1985 tbl->current_rate, index); 2012 tbl->current_rate, index);
1986 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 2013 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
1987 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2014 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
@@ -1993,10 +2020,10 @@ lq_update:
1993 * stay with best antenna legacy modulation for a while 2020 * stay with best antenna legacy modulation for a while
1994 * before next round of mode comparisons. */ 2021 * before next round of mode comparisons. */
1995 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 2022 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
1996 if (is_legacy(tbl1->lq_type) && !conf->ht.enabled && 2023 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
1997 lq_sta->action_counter >= 1) { 2024 lq_sta->action_counter >= 1) {
1998 lq_sta->action_counter = 0; 2025 lq_sta->action_counter = 0;
1999 IWL_DEBUG_RATE("LQ: STAY in legacy table\n"); 2026 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2000 rs_set_stay_in_table(priv, 1, lq_sta); 2027 rs_set_stay_in_table(priv, 1, lq_sta);
2001 } 2028 }
2002 2029
@@ -2008,7 +2035,7 @@ lq_update:
2008 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && 2035 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2009 (lq_sta->tx_agg_tid_en & (1 << tid)) && 2036 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2010 (tid != MAX_TID_COUNT)) { 2037 (tid != MAX_TID_COUNT)) {
2011 IWL_DEBUG_RATE("try to aggregate tid %d\n", tid); 2038 IWL_DEBUG_RATE(priv, "try to aggregate tid %d\n", tid);
2012 rs_tl_turn_on_agg(priv, tid, lq_sta, sta); 2039 rs_tl_turn_on_agg(priv, tid, lq_sta, sta);
2013 } 2040 }
2014 lq_sta->action_counter = 0; 2041 lq_sta->action_counter = 0;
@@ -2028,7 +2055,7 @@ lq_update:
2028 } 2055 }
2029 2056
2030out: 2057out:
2031 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green); 2058 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2032 i = index; 2059 i = index;
2033 lq_sta->last_txrate_idx = i; 2060 lq_sta->last_txrate_idx = i;
2034 2061
@@ -2081,7 +2108,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2081 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) 2108 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2082 rs_toggle_antenna(valid_tx_ant, &rate, tbl); 2109 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2083 2110
2084 rate = rate_n_flags_from_tbl(tbl, rate_idx, use_green); 2111 rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2085 tbl->current_rate = rate; 2112 tbl->current_rate = rate;
2086 rs_set_expected_tpt_table(lq_sta, tbl); 2113 rs_set_expected_tpt_table(lq_sta, tbl);
2087 rs_fill_link_cmd(NULL, lq_sta, rate); 2114 rs_fill_link_cmd(NULL, lq_sta, rate);
@@ -2104,7 +2131,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2104 int rate_idx; 2131 int rate_idx;
2105 u64 mask_bit = 0; 2132 u64 mask_bit = 0;
2106 2133
2107 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2134 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2135
2136 /* Get max rate if user set max rate */
2137 if (lq_sta) {
2138 lq_sta->max_rate_idx = txrc->max_rate_idx;
2139 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2140 (lq_sta->max_rate_idx != -1))
2141 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2142 if ((lq_sta->max_rate_idx < 0) ||
2143 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2144 lq_sta->max_rate_idx = -1;
2145 }
2108 2146
2109 if (sta) 2147 if (sta)
2110 mask_bit = sta->supp_rates[sband->band]; 2148 mask_bit = sta->supp_rates[sband->band];
@@ -2129,7 +2167,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2129 u8 sta_id = iwl_find_station(priv, hdr->addr1); 2167 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2130 2168
2131 if (sta_id == IWL_INVALID_STATION) { 2169 if (sta_id == IWL_INVALID_STATION) {
2132 IWL_DEBUG_RATE("LQ: ADD station %pM\n", 2170 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
2133 hdr->addr1); 2171 hdr->addr1);
2134 sta_id = iwl_add_station_flags(priv, hdr->addr1, 2172 sta_id = iwl_add_station_flags(priv, hdr->addr1,
2135 0, CMD_ASYNC, NULL); 2173 0, CMD_ASYNC, NULL);
@@ -2158,7 +2196,7 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2158 int i, j; 2196 int i, j;
2159 2197
2160 priv = (struct iwl_priv *)priv_rate; 2198 priv = (struct iwl_priv *)priv_rate;
2161 IWL_DEBUG_RATE("create station rate scale window\n"); 2199 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2162 2200
2163 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp); 2201 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
2164 2202
@@ -2182,6 +2220,8 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2182 struct ieee80211_conf *conf = &priv->hw->conf; 2220 struct ieee80211_conf *conf = &priv->hw->conf;
2183 struct iwl_lq_sta *lq_sta = priv_sta; 2221 struct iwl_lq_sta *lq_sta = priv_sta;
2184 u16 mask_bit = 0; 2222 u16 mask_bit = 0;
2223 int count;
2224 int start_rate = 0;
2185 2225
2186 lq_sta->flush_timer = 0; 2226 lq_sta->flush_timer = 0;
2187 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2227 lq_sta->supp_rates = sta->supp_rates[sband->band];
@@ -2189,7 +2229,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2189 for (i = 0; i < IWL_RATE_COUNT; i++) 2229 for (i = 0; i < IWL_RATE_COUNT; i++)
2190 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2230 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2191 2231
2192 IWL_DEBUG_RATE("LQ: *** rate scale station global init ***\n"); 2232 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n");
2193 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2233 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2194 * the lowest or the highest rate.. Could consider using RSSI from 2234 * the lowest or the highest rate.. Could consider using RSSI from
2195 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2235 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -2200,10 +2240,10 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2200 u8 sta_id = iwl_find_station(priv, sta->addr); 2240 u8 sta_id = iwl_find_station(priv, sta->addr);
2201 2241
2202 /* for IBSS the call are from tasklet */ 2242 /* for IBSS the call are from tasklet */
2203 IWL_DEBUG_RATE("LQ: ADD station %pM\n", sta->addr); 2243 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2204 2244
2205 if (sta_id == IWL_INVALID_STATION) { 2245 if (sta_id == IWL_INVALID_STATION) {
2206 IWL_DEBUG_RATE("LQ: ADD station %pM\n", sta->addr); 2246 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2207 sta_id = iwl_add_station_flags(priv, sta->addr, 2247 sta_id = iwl_add_station_flags(priv, sta->addr,
2208 0, CMD_ASYNC, NULL); 2248 0, CMD_ASYNC, NULL);
2209 } 2249 }
@@ -2216,6 +2256,8 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2216 } 2256 }
2217 2257
2218 lq_sta->is_dup = 0; 2258 lq_sta->is_dup = 0;
2259 lq_sta->max_rate_idx = -1;
2260 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2219 lq_sta->is_green = rs_use_green(priv, conf); 2261 lq_sta->is_green = rs_use_green(priv, conf);
2220 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2262 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2221 lq_sta->active_rate_basic = priv->active_rate_basic; 2263 lq_sta->active_rate_basic = priv->active_rate_basic;
@@ -2240,7 +2282,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2240 lq_sta->active_mimo3_rate &= ~((u16)0x2); 2282 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2241 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; 2283 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2242 2284
2243 IWL_DEBUG_RATE("SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", 2285 IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2244 lq_sta->active_siso_rate, 2286 lq_sta->active_siso_rate,
2245 lq_sta->active_mimo2_rate, 2287 lq_sta->active_mimo2_rate,
2246 lq_sta->active_mimo3_rate); 2288 lq_sta->active_mimo3_rate);
@@ -2254,16 +2296,20 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2254 lq_sta->drv = priv; 2296 lq_sta->drv = priv;
2255 2297
2256 /* Find highest tx rate supported by hardware and destination station */ 2298 /* Find highest tx rate supported by hardware and destination station */
2257 mask_bit = sta->supp_rates[sband->band] & lq_sta->active_legacy_rate; 2299 mask_bit = sta->supp_rates[sband->band];
2258 lq_sta->last_txrate_idx = 3; 2300 count = sband->n_bitrates;
2259 for (i = 0; i < sband->n_bitrates; i++) 2301 if (sband->band == IEEE80211_BAND_5GHZ) {
2302 count += IWL_FIRST_OFDM_RATE;
2303 start_rate = IWL_FIRST_OFDM_RATE;
2304 mask_bit <<= IWL_FIRST_OFDM_RATE;
2305 }
2306
2307 mask_bit = mask_bit & lq_sta->active_legacy_rate;
2308 lq_sta->last_txrate_idx = 4;
2309 for (i = start_rate; i < count; i++)
2260 if (mask_bit & BIT(i)) 2310 if (mask_bit & BIT(i))
2261 lq_sta->last_txrate_idx = i; 2311 lq_sta->last_txrate_idx = i;
2262 2312
2263 /* For MODE_IEEE80211A, skip over cck rates in global rate table */
2264 if (sband->band == IEEE80211_BAND_5GHZ)
2265 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2266
2267 rs_initialize_lq(priv, conf, sta, lq_sta); 2313 rs_initialize_lq(priv, conf, sta, lq_sta);
2268} 2314}
2269 2315
@@ -2402,9 +2448,9 @@ static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2402 struct iwl_lq_sta *lq_sta = priv_sta; 2448 struct iwl_lq_sta *lq_sta = priv_sta;
2403 struct iwl_priv *priv __maybe_unused = priv_r; 2449 struct iwl_priv *priv __maybe_unused = priv_r;
2404 2450
2405 IWL_DEBUG_RATE("enter\n"); 2451 IWL_DEBUG_RATE(priv, "enter\n");
2406 kfree(lq_sta); 2452 kfree(lq_sta);
2407 IWL_DEBUG_RATE("leave\n"); 2453 IWL_DEBUG_RATE(priv, "leave\n");
2408} 2454}
2409 2455
2410 2456
@@ -2429,9 +2475,9 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2429 else 2475 else
2430 *rate_n_flags = 0x820A; 2476 *rate_n_flags = 0x820A;
2431 } 2477 }
2432 IWL_DEBUG_RATE("Fixed rate ON\n"); 2478 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2433 } else { 2479 } else {
2434 IWL_DEBUG_RATE("Fixed rate OFF\n"); 2480 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2435 } 2481 }
2436} 2482}
2437 2483
@@ -2460,7 +2506,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2460 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2506 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2461 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2507 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2462 2508
2463 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", 2509 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2464 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); 2510 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2465 2511
2466 if (lq_sta->dbg_fixed_rate) { 2512 if (lq_sta->dbg_fixed_rate) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 78ee83adf74..345806dd887 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
27#ifndef __iwl_agn_rs_h__ 27#ifndef __iwl_agn_rs_h__
28#define __iwl_agn_rs_h__ 28#define __iwl_agn_rs_h__
29 29
30#include "iwl-dev.h"
31
32struct iwl_rate_info { 30struct iwl_rate_info {
33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
@@ -43,6 +41,19 @@ struct iwl_rate_info {
43 u8 next_rs_tgg; /* next rate used in TGG rs algo */ 41 u8 next_rs_tgg; /* next rate used in TGG rs algo */
44}; 42};
45 43
44struct iwl3945_rate_info {
45 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
46 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
47 u8 prev_ieee; /* previous rate in IEEE speeds */
48 u8 next_ieee; /* next rate in IEEE speeds */
49 u8 prev_rs; /* previous rate used in rs algo */
50 u8 next_rs; /* next rate used in rs algo */
51 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
52 u8 next_rs_tgg; /* next rate used in TGG rs algo */
53 u8 table_rs_index; /* index in rate scale table cmd */
54 u8 prev_table_rs; /* prev in rate table cmd */
55};
56
46/* 57/*
47 * These serve as indexes into 58 * These serve as indexes into
48 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 59 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -62,12 +73,30 @@ enum {
62 IWL_RATE_54M_INDEX, 73 IWL_RATE_54M_INDEX,
63 IWL_RATE_60M_INDEX, 74 IWL_RATE_60M_INDEX,
64 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/ 75 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
76 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
65 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, 77 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
66 IWL_RATE_INVALID = IWL_RATE_COUNT, 78 IWL_RATE_INVALID = IWL_RATE_COUNT,
67}; 79};
68 80
69enum { 81enum {
82 IWL_RATE_6M_INDEX_TABLE = 0,
83 IWL_RATE_9M_INDEX_TABLE,
84 IWL_RATE_12M_INDEX_TABLE,
85 IWL_RATE_18M_INDEX_TABLE,
86 IWL_RATE_24M_INDEX_TABLE,
87 IWL_RATE_36M_INDEX_TABLE,
88 IWL_RATE_48M_INDEX_TABLE,
89 IWL_RATE_54M_INDEX_TABLE,
90 IWL_RATE_1M_INDEX_TABLE,
91 IWL_RATE_2M_INDEX_TABLE,
92 IWL_RATE_5M_INDEX_TABLE,
93 IWL_RATE_11M_INDEX_TABLE,
94 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
95};
96
97enum {
70 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, 98 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
99 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
71 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, 100 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
72 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, 101 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
73 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, 102 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
@@ -248,6 +277,7 @@ enum {
248#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 277#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
249 278
250extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 279extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
280extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
251 281
252enum iwl_table_type { 282enum iwl_table_type {
253 LQ_NONE, 283 LQ_NONE,
@@ -303,6 +333,23 @@ static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
303 return rate; 333 return rate;
304} 334}
305 335
336static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
337{
338 u8 rate = iwl3945_rates[rate_index].prev_ieee;
339
340 if (rate == IWL_RATE_INVALID)
341 rate = rate_index;
342 return rate;
343}
344
345/**
346 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
347 *
348 * The specific throughput table used is based on the type of network
349 * the associated with, including A, B, G, and G w/ TGG protection
350 */
351extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
352
306/** 353/**
307 * iwl_rate_control_register - Register the rate control algorithm callbacks 354 * iwl_rate_control_register - Register the rate control algorithm callbacks
308 * 355 *
@@ -314,6 +361,7 @@ static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
314 * 361 *
315 */ 362 */
316extern int iwlagn_rate_control_register(void); 363extern int iwlagn_rate_control_register(void);
364extern int iwl3945_rate_control_register(void);
317 365
318/** 366/**
319 * iwl_rate_control_unregister - Unregister the rate control callbacks 367 * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -322,5 +370,6 @@ extern int iwlagn_rate_control_register(void);
322 * the driver is unloaded. 370 * the driver is unloaded.
323 */ 371 */
324extern void iwlagn_rate_control_unregister(void); 372extern void iwlagn_rate_control_unregister(void);
373extern void iwl3945_rate_control_unregister(void);
325 374
326#endif /* __iwl_agn__rs__ */ 375#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36bafeb353c..397577c06c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -44,6 +44,8 @@
44 44
45#include <asm/div64.h> 45#include <asm/div64.h>
46 46
47#define DRV_NAME "iwlagn"
48
47#include "iwl-eeprom.h" 49#include "iwl-eeprom.h"
48#include "iwl-dev.h" 50#include "iwl-dev.h"
49#include "iwl-core.h" 51#include "iwl-core.h"
@@ -61,9 +63,7 @@
61 63
62/* 64/*
63 * module name, copyright, version, etc. 65 * module name, copyright, version, etc.
64 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
65 */ 66 */
66
67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" 67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
68 68
69#ifdef CONFIG_IWLWIFI_DEBUG 69#ifdef CONFIG_IWLWIFI_DEBUG
@@ -94,66 +94,6 @@ MODULE_ALIAS("iwl4965");
94 94
95/**************************************************************/ 95/**************************************************************/
96 96
97
98
99static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
100{
101 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
102
103 if (hw_decrypt)
104 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
105 else
106 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
107
108}
109
110/**
111 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
112 * @priv: staging_rxon is compared to active_rxon
113 *
114 * If the RXON structure is changing enough to require a new tune,
115 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
116 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
117 */
118static int iwl_full_rxon_required(struct iwl_priv *priv)
119{
120
121 /* These items are only settable from the full RXON command */
122 if (!(iwl_is_associated(priv)) ||
123 compare_ether_addr(priv->staging_rxon.bssid_addr,
124 priv->active_rxon.bssid_addr) ||
125 compare_ether_addr(priv->staging_rxon.node_addr,
126 priv->active_rxon.node_addr) ||
127 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
128 priv->active_rxon.wlap_bssid_addr) ||
129 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
130 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
131 (priv->staging_rxon.air_propagation !=
132 priv->active_rxon.air_propagation) ||
133 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
134 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
135 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
136 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
137 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
138 return 1;
139
140 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
141 * be updated with the RXON_ASSOC command -- however only some
142 * flag transitions are allowed using RXON_ASSOC */
143
144 /* Check if we are not switching bands */
145 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
146 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
147 return 1;
148
149 /* Check if we are switching association toggle */
150 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
151 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
152 return 1;
153
154 return 0;
155}
156
157/** 97/**
158 * iwl_commit_rxon - commit staging_rxon to hardware 98 * iwl_commit_rxon - commit staging_rxon to hardware
159 * 99 *
@@ -179,9 +119,9 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
179 * 5000, but will not damage 4965 */ 119 * 5000, but will not damage 4965 */
180 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; 120 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
181 121
182 ret = iwl_agn_check_rxon_cmd(&priv->staging_rxon); 122 ret = iwl_check_rxon_cmd(priv);
183 if (ret) { 123 if (ret) {
184 IWL_ERROR("Invalid RXON configuration. Not committing.\n"); 124 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
185 return -EINVAL; 125 return -EINVAL;
186 } 126 }
187 127
@@ -191,7 +131,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
191 if (!iwl_full_rxon_required(priv)) { 131 if (!iwl_full_rxon_required(priv)) {
192 ret = iwl_send_rxon_assoc(priv); 132 ret = iwl_send_rxon_assoc(priv);
193 if (ret) { 133 if (ret) {
194 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret); 134 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
195 return ret; 135 return ret;
196 } 136 }
197 137
@@ -207,7 +147,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
207 * we must clear the associated from the active configuration 147 * we must clear the associated from the active configuration
208 * before we apply the new config */ 148 * before we apply the new config */
209 if (iwl_is_associated(priv) && new_assoc) { 149 if (iwl_is_associated(priv) && new_assoc) {
210 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); 150 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
211 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 151 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
212 152
213 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 153 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -218,12 +158,12 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
218 * active_rxon back to what it was previously */ 158 * active_rxon back to what it was previously */
219 if (ret) { 159 if (ret) {
220 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; 160 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
221 IWL_ERROR("Error clearing ASSOC_MSK (%d)\n", ret); 161 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
222 return ret; 162 return ret;
223 } 163 }
224 } 164 }
225 165
226 IWL_DEBUG_INFO("Sending RXON\n" 166 IWL_DEBUG_INFO(priv, "Sending RXON\n"
227 "* with%s RXON_FILTER_ASSOC_MSK\n" 167 "* with%s RXON_FILTER_ASSOC_MSK\n"
228 "* channel = %d\n" 168 "* channel = %d\n"
229 "* bssid = %pM\n", 169 "* bssid = %pM\n",
@@ -242,7 +182,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
242 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 182 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
243 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 183 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
244 if (ret) { 184 if (ret) {
245 IWL_ERROR("Error setting new RXON (%d)\n", ret); 185 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
246 return ret; 186 return ret;
247 } 187 }
248 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 188 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
@@ -256,7 +196,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
256 /* Add the broadcast address so we can send broadcast frames */ 196 /* Add the broadcast address so we can send broadcast frames */
257 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) == 197 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
258 IWL_INVALID_STATION) { 198 IWL_INVALID_STATION) {
259 IWL_ERROR("Error adding BROADCAST address for transmit.\n"); 199 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
260 return -EIO; 200 return -EIO;
261 } 201 }
262 202
@@ -267,13 +207,15 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
267 ret = iwl_rxon_add_station(priv, 207 ret = iwl_rxon_add_station(priv,
268 priv->active_rxon.bssid_addr, 1); 208 priv->active_rxon.bssid_addr, 1);
269 if (ret == IWL_INVALID_STATION) { 209 if (ret == IWL_INVALID_STATION) {
270 IWL_ERROR("Error adding AP address for TX.\n"); 210 IWL_ERR(priv,
211 "Error adding AP address for TX.\n");
271 return -EIO; 212 return -EIO;
272 } 213 }
273 priv->assoc_station_added = 1; 214 priv->assoc_station_added = 1;
274 if (priv->default_wep_key && 215 if (priv->default_wep_key &&
275 iwl_send_static_wepkey_cmd(priv, 0)) 216 iwl_send_static_wepkey_cmd(priv, 0))
276 IWL_ERROR("Could not send WEP static key.\n"); 217 IWL_ERR(priv,
218 "Could not send WEP static key.\n");
277 } 219 }
278 220
279 /* Apply the new configuration 221 /* Apply the new configuration
@@ -282,7 +224,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
282 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 224 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
283 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 225 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
284 if (ret) { 226 if (ret) {
285 IWL_ERROR("Error setting new RXON (%d)\n", ret); 227 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
286 return ret; 228 return ret;
287 } 229 }
288 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 230 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
@@ -294,7 +236,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
294 * send a new TXPOWER command or we won't be able to Tx any frames */ 236 * send a new TXPOWER command or we won't be able to Tx any frames */
295 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 237 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
296 if (ret) { 238 if (ret) {
297 IWL_ERROR("Error sending TX power (%d)\n", ret); 239 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
298 return ret; 240 return ret;
299 } 241 }
300 242
@@ -308,25 +250,11 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
308 iwl_commit_rxon(priv); 250 iwl_commit_rxon(priv);
309} 251}
310 252
311static int iwl_send_bt_config(struct iwl_priv *priv)
312{
313 struct iwl_bt_cmd bt_cmd = {
314 .flags = 3,
315 .lead_time = 0xAA,
316 .max_kill = 1,
317 .kill_ack_mask = 0,
318 .kill_cts_mask = 0,
319 };
320
321 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
322 sizeof(struct iwl_bt_cmd), &bt_cmd);
323}
324
325static void iwl_clear_free_frames(struct iwl_priv *priv) 253static void iwl_clear_free_frames(struct iwl_priv *priv)
326{ 254{
327 struct list_head *element; 255 struct list_head *element;
328 256
329 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", 257 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
330 priv->frames_count); 258 priv->frames_count);
331 259
332 while (!list_empty(&priv->free_frames)) { 260 while (!list_empty(&priv->free_frames)) {
@@ -337,7 +265,7 @@ static void iwl_clear_free_frames(struct iwl_priv *priv)
337 } 265 }
338 266
339 if (priv->frames_count) { 267 if (priv->frames_count) {
340 IWL_WARNING("%d frames still in use. Did we lose one?\n", 268 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
341 priv->frames_count); 269 priv->frames_count);
342 priv->frames_count = 0; 270 priv->frames_count = 0;
343 } 271 }
@@ -350,7 +278,7 @@ static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
350 if (list_empty(&priv->free_frames)) { 278 if (list_empty(&priv->free_frames)) {
351 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 279 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
352 if (!frame) { 280 if (!frame) {
353 IWL_ERROR("Could not allocate frame!\n"); 281 IWL_ERR(priv, "Could not allocate frame!\n");
354 return NULL; 282 return NULL;
355 } 283 }
356 284
@@ -386,31 +314,6 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
386 return priv->ibss_beacon->len; 314 return priv->ibss_beacon->len;
387} 315}
388 316
389static u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
390{
391 int i;
392 int rate_mask;
393
394 /* Set rate mask*/
395 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
396 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
397 else
398 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
399
400 /* Find lowest valid rate */
401 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
402 i = iwl_rates[i].next_ieee) {
403 if (rate_mask & (1 << i))
404 return iwl_rates[i].plcp;
405 }
406
407 /* No valid rate was found. Assign the lowest one */
408 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
409 return IWL_RATE_1M_PLCP;
410 else
411 return IWL_RATE_6M_PLCP;
412}
413
414static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, 317static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
415 struct iwl_frame *frame, u8 rate) 318 struct iwl_frame *frame, u8 rate)
416{ 319{
@@ -452,7 +355,7 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv)
452 frame = iwl_get_free_frame(priv); 355 frame = iwl_get_free_frame(priv);
453 356
454 if (!frame) { 357 if (!frame) {
455 IWL_ERROR("Could not obtain free frame buffer for beacon " 358 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
456 "command.\n"); 359 "command.\n");
457 return -ENOMEM; 360 return -ENOMEM;
458 } 361 }
@@ -469,6 +372,159 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv)
469 return rc; 372 return rc;
470} 373}
471 374
375static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
376{
377 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
378
379 dma_addr_t addr = get_unaligned_le32(&tb->lo);
380 if (sizeof(dma_addr_t) > sizeof(u32))
381 addr |=
382 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
383
384 return addr;
385}
386
387static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
388{
389 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
390
391 return le16_to_cpu(tb->hi_n_len) >> 4;
392}
393
394static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
395 dma_addr_t addr, u16 len)
396{
397 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
398 u16 hi_n_len = len << 4;
399
400 put_unaligned_le32(addr, &tb->lo);
401 if (sizeof(dma_addr_t) > sizeof(u32))
402 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
403
404 tb->hi_n_len = cpu_to_le16(hi_n_len);
405
406 tfd->num_tbs = idx + 1;
407}
408
409static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
410{
411 return tfd->num_tbs & 0x1f;
412}
413
414/**
415 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
416 * @priv - driver private data
417 * @txq - tx queue
418 *
419 * Does NOT advance any TFD circular buffer read/write indexes
420 * Does NOT free the TFD itself (which is within circular buffer)
421 */
422void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
423{
424 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
425 struct iwl_tfd *tfd;
426 struct pci_dev *dev = priv->pci_dev;
427 int index = txq->q.read_ptr;
428 int i;
429 int num_tbs;
430
431 tfd = &tfd_tmp[index];
432
433 /* Sanity check on number of chunks */
434 num_tbs = iwl_tfd_get_num_tbs(tfd);
435
436 if (num_tbs >= IWL_NUM_OF_TBS) {
437 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
438 /* @todo issue fatal error, it is quite serious situation */
439 return;
440 }
441
442 /* Unmap tx_cmd */
443 if (num_tbs)
444 pci_unmap_single(dev,
445 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
446 pci_unmap_len(&txq->cmd[index]->meta, len),
447 PCI_DMA_TODEVICE);
448
449 /* Unmap chunks, if any. */
450 for (i = 1; i < num_tbs; i++) {
451 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
452 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
453
454 if (txq->txb) {
455 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
456 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
457 }
458 }
459}
460
461int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
462 struct iwl_tx_queue *txq,
463 dma_addr_t addr, u16 len,
464 u8 reset, u8 pad)
465{
466 struct iwl_queue *q;
467 struct iwl_tfd *tfd, *tfd_tmp;
468 u32 num_tbs;
469
470 q = &txq->q;
471 tfd_tmp = (struct iwl_tfd *)txq->tfds;
472 tfd = &tfd_tmp[q->write_ptr];
473
474 if (reset)
475 memset(tfd, 0, sizeof(*tfd));
476
477 num_tbs = iwl_tfd_get_num_tbs(tfd);
478
479 /* Each TFD can point to a maximum 20 Tx buffers */
480 if (num_tbs >= IWL_NUM_OF_TBS) {
481 IWL_ERR(priv, "Error can not send more than %d chunks\n",
482 IWL_NUM_OF_TBS);
483 return -EINVAL;
484 }
485
486 BUG_ON(addr & ~DMA_BIT_MASK(36));
487 if (unlikely(addr & ~IWL_TX_DMA_MASK))
488 IWL_ERR(priv, "Unaligned address = %llx\n",
489 (unsigned long long)addr);
490
491 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
492
493 return 0;
494}
495
496/*
497 * Tell nic where to find circular buffer of Tx Frame Descriptors for
498 * given Tx queue, and enable the DMA channel used for that queue.
499 *
500 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
501 * channels supported in hardware.
502 */
503int iwl_hw_tx_queue_init(struct iwl_priv *priv,
504 struct iwl_tx_queue *txq)
505{
506 int ret;
507 unsigned long flags;
508 int txq_id = txq->q.id;
509
510 spin_lock_irqsave(&priv->lock, flags);
511 ret = iwl_grab_nic_access(priv);
512 if (ret) {
513 spin_unlock_irqrestore(&priv->lock, flags);
514 return ret;
515 }
516
517 /* Circular buffer (TFD queue in DRAM) physical base address */
518 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
519 txq->q.dma_addr >> 8);
520
521 iwl_release_nic_access(priv);
522 spin_unlock_irqrestore(&priv->lock, flags);
523
524 return 0;
525}
526
527
472/****************************************************************************** 528/******************************************************************************
473 * 529 *
474 * Misc. internal state and helper functions 530 * Misc. internal state and helper functions
@@ -482,7 +538,7 @@ static void iwl_ht_conf(struct iwl_priv *priv,
482 struct iwl_ht_info *iwl_conf = &priv->current_ht_config; 538 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
483 struct ieee80211_sta *sta; 539 struct ieee80211_sta *sta;
484 540
485 IWL_DEBUG_MAC80211("enter: \n"); 541 IWL_DEBUG_MAC80211(priv, "enter: \n");
486 542
487 if (!iwl_conf->is_ht) 543 if (!iwl_conf->is_ht)
488 return; 544 return;
@@ -520,9 +576,9 @@ static void iwl_ht_conf(struct iwl_priv *priv,
520 */ 576 */
521 577
522 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; 578 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
523 if (priv->hw->conf.ht.channel_type == NL80211_CHAN_HT40MINUS) 579 if (conf_is_ht40_minus(&priv->hw->conf))
524 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; 580 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
525 else if(priv->hw->conf.ht.channel_type == NL80211_CHAN_HT40PLUS) 581 else if (conf_is_ht40_plus(&priv->hw->conf))
526 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 582 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
527 583
528 /* If no above or below channel supplied disable FAT channel */ 584 /* If no above or below channel supplied disable FAT channel */
@@ -542,7 +598,7 @@ static void iwl_ht_conf(struct iwl_priv *priv,
542 598
543 rcu_read_unlock(); 599 rcu_read_unlock();
544 600
545 IWL_DEBUG_MAC80211("leave\n"); 601 IWL_DEBUG_MAC80211(priv, "leave\n");
546} 602}
547 603
548/* 604/*
@@ -567,7 +623,7 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
567 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 623 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
568 624
569 if (force || iwl_is_associated(priv)) { 625 if (force || iwl_is_associated(priv)) {
570 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 626 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
571 priv->qos_data.qos_active, 627 priv->qos_data.qos_active,
572 priv->qos_data.def_qos_parm.qos_flags); 628 priv->qos_data.def_qos_parm.qos_flags);
573 629
@@ -624,117 +680,16 @@ static void iwl_setup_rxon_timing(struct iwl_priv *priv)
624 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 680 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
625 681
626 spin_unlock_irqrestore(&priv->lock, flags); 682 spin_unlock_irqrestore(&priv->lock, flags);
627 IWL_DEBUG_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 683 IWL_DEBUG_ASSOC(priv, "beacon interval %d beacon timer %d beacon tim %d\n",
628 le16_to_cpu(priv->rxon_timing.beacon_interval), 684 le16_to_cpu(priv->rxon_timing.beacon_interval),
629 le32_to_cpu(priv->rxon_timing.beacon_init_val), 685 le32_to_cpu(priv->rxon_timing.beacon_init_val),
630 le16_to_cpu(priv->rxon_timing.atim_window)); 686 le16_to_cpu(priv->rxon_timing.atim_window));
631} 687}
632 688
633static void iwl_set_flags_for_band(struct iwl_priv *priv,
634 enum ieee80211_band band)
635{
636 if (band == IEEE80211_BAND_5GHZ) {
637 priv->staging_rxon.flags &=
638 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
639 | RXON_FLG_CCK_MSK);
640 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
641 } else {
642 /* Copied from iwl_post_associate() */
643 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
644 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
645 else
646 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
647
648 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
649 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
650
651 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
652 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
653 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
654 }
655}
656
657/*
658 * initialize rxon structure with default values from eeprom
659 */
660static void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
661{
662 const struct iwl_channel_info *ch_info;
663
664 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
665
666 switch (mode) {
667 case NL80211_IFTYPE_AP:
668 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
669 break;
670
671 case NL80211_IFTYPE_STATION:
672 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
673 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
674 break;
675
676 case NL80211_IFTYPE_ADHOC:
677 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
678 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
679 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
680 RXON_FILTER_ACCEPT_GRP_MSK;
681 break;
682
683 case NL80211_IFTYPE_MONITOR:
684 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
685 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
686 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
687 break;
688 default:
689 IWL_ERROR("Unsupported interface type %d\n", mode);
690 break;
691 }
692
693#if 0
694 /* TODO: Figure out when short_preamble would be set and cache from
695 * that */
696 if (!hw_to_local(priv->hw)->short_preamble)
697 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
698 else
699 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
700#endif
701
702 ch_info = iwl_get_channel_info(priv, priv->band,
703 le16_to_cpu(priv->active_rxon.channel));
704
705 if (!ch_info)
706 ch_info = &priv->channel_info[0];
707
708 /*
709 * in some case A channels are all non IBSS
710 * in this case force B/G channel
711 */
712 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
713 !(is_channel_ibss(ch_info)))
714 ch_info = &priv->channel_info[0];
715
716 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
717 priv->band = ch_info->band;
718
719 iwl_set_flags_for_band(priv, priv->band);
720
721 priv->staging_rxon.ofdm_basic_rates =
722 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
723 priv->staging_rxon.cck_basic_rates =
724 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
725
726 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
727 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
728 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
729 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
730 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
731 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
732 iwl_set_rxon_chain(priv);
733}
734
735static int iwl_set_mode(struct iwl_priv *priv, int mode) 689static int iwl_set_mode(struct iwl_priv *priv, int mode)
736{ 690{
737 iwl_connection_init_rx_config(priv, mode); 691 iwl_connection_init_rx_config(priv, mode);
692 iwl_set_rxon_chain(priv);
738 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 693 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
739 694
740 iwl_clear_stations_table(priv); 695 iwl_clear_stations_table(priv);
@@ -745,8 +700,8 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode)
745 700
746 cancel_delayed_work(&priv->scan_check); 701 cancel_delayed_work(&priv->scan_check);
747 if (iwl_scan_cancel_timeout(priv, 100)) { 702 if (iwl_scan_cancel_timeout(priv, 100)) {
748 IWL_WARNING("Aborted scan still in progress after 100ms\n"); 703 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
749 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 704 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
750 return -EAGAIN; 705 return -EAGAIN;
751 } 706 }
752 707
@@ -755,54 +710,6 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode)
755 return 0; 710 return 0;
756} 711}
757 712
758static void iwl_set_rate(struct iwl_priv *priv)
759{
760 const struct ieee80211_supported_band *hw = NULL;
761 struct ieee80211_rate *rate;
762 int i;
763
764 hw = iwl_get_hw_mode(priv, priv->band);
765 if (!hw) {
766 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
767 return;
768 }
769
770 priv->active_rate = 0;
771 priv->active_rate_basic = 0;
772
773 for (i = 0; i < hw->n_bitrates; i++) {
774 rate = &(hw->bitrates[i]);
775 if (rate->hw_value < IWL_RATE_COUNT)
776 priv->active_rate |= (1 << rate->hw_value);
777 }
778
779 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
780 priv->active_rate, priv->active_rate_basic);
781
782 /*
783 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
784 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
785 * OFDM
786 */
787 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
788 priv->staging_rxon.cck_basic_rates =
789 ((priv->active_rate_basic &
790 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
791 else
792 priv->staging_rxon.cck_basic_rates =
793 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
794
795 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
796 priv->staging_rxon.ofdm_basic_rates =
797 ((priv->active_rate_basic &
798 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
799 IWL_FIRST_OFDM_RATE) & 0xFF;
800 else
801 priv->staging_rxon.ofdm_basic_rates =
802 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
803}
804
805
806/****************************************************************************** 713/******************************************************************************
807 * 714 *
808 * Generic RX handler implementations 715 * Generic RX handler implementations
@@ -817,19 +724,19 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
817 724
818 palive = &pkt->u.alive_frame; 725 palive = &pkt->u.alive_frame;
819 726
820 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " 727 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
821 "0x%01X 0x%01X\n", 728 "0x%01X 0x%01X\n",
822 palive->is_valid, palive->ver_type, 729 palive->is_valid, palive->ver_type,
823 palive->ver_subtype); 730 palive->ver_subtype);
824 731
825 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 732 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
826 IWL_DEBUG_INFO("Initialization Alive received.\n"); 733 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
827 memcpy(&priv->card_alive_init, 734 memcpy(&priv->card_alive_init,
828 &pkt->u.alive_frame, 735 &pkt->u.alive_frame,
829 sizeof(struct iwl_init_alive_resp)); 736 sizeof(struct iwl_init_alive_resp));
830 pwork = &priv->init_alive_start; 737 pwork = &priv->init_alive_start;
831 } else { 738 } else {
832 IWL_DEBUG_INFO("Runtime Alive received.\n"); 739 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
833 memcpy(&priv->card_alive, &pkt->u.alive_frame, 740 memcpy(&priv->card_alive, &pkt->u.alive_frame,
834 sizeof(struct iwl_alive_resp)); 741 sizeof(struct iwl_alive_resp));
835 pwork = &priv->alive_start; 742 pwork = &priv->alive_start;
@@ -841,7 +748,7 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
841 queue_delayed_work(priv->workqueue, pwork, 748 queue_delayed_work(priv->workqueue, pwork,
842 msecs_to_jiffies(5)); 749 msecs_to_jiffies(5));
843 else 750 else
844 IWL_WARNING("uCode did not respond OK.\n"); 751 IWL_WARN(priv, "uCode did not respond OK.\n");
845} 752}
846 753
847static void iwl_rx_reply_error(struct iwl_priv *priv, 754static void iwl_rx_reply_error(struct iwl_priv *priv,
@@ -849,7 +756,7 @@ static void iwl_rx_reply_error(struct iwl_priv *priv,
849{ 756{
850 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 757 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
851 758
852 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " 759 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
853 "seq 0x%04X ser 0x%08X\n", 760 "seq 0x%04X ser 0x%08X\n",
854 le32_to_cpu(pkt->u.err_resp.error_type), 761 le32_to_cpu(pkt->u.err_resp.error_type),
855 get_cmd_string(pkt->u.err_resp.cmd_id), 762 get_cmd_string(pkt->u.err_resp.cmd_id),
@@ -858,26 +765,13 @@ static void iwl_rx_reply_error(struct iwl_priv *priv,
858 le32_to_cpu(pkt->u.err_resp.error_info)); 765 le32_to_cpu(pkt->u.err_resp.error_info));
859} 766}
860 767
861#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
862
863static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
864{
865 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
866 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
867 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
868 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
869 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
870 rxon->channel = csa->channel;
871 priv->staging_rxon.channel = csa->channel;
872}
873
874static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 768static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
875 struct iwl_rx_mem_buffer *rxb) 769 struct iwl_rx_mem_buffer *rxb)
876{ 770{
877#ifdef CONFIG_IWLWIFI_DEBUG 771#ifdef CONFIG_IWLWIFI_DEBUG
878 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 772 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
879 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 773 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
880 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 774 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
881 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 775 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
882#endif 776#endif
883} 777}
@@ -886,7 +780,7 @@ static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
886 struct iwl_rx_mem_buffer *rxb) 780 struct iwl_rx_mem_buffer *rxb)
887{ 781{
888 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 782 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
889 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 783 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
890 "notification for %s:\n", 784 "notification for %s:\n",
891 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 785 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
892 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 786 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
@@ -902,7 +796,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
902 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 796 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
903 797
904 if (!beacon) { 798 if (!beacon) {
905 IWL_ERROR("update beacon failed\n"); 799 IWL_ERR(priv, "update beacon failed\n");
906 return; 800 return;
907 } 801 }
908 802
@@ -950,7 +844,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
950 (struct iwl4965_beacon_notif *)pkt->u.raw; 844 (struct iwl4965_beacon_notif *)pkt->u.raw;
951 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 845 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
952 846
953 IWL_DEBUG_RX("beacon status %x retries %d iss %d " 847 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
954 "tsf %d %d rate %d\n", 848 "tsf %d %d rate %d\n",
955 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, 849 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
956 beacon->beacon_notify_hdr.failure_frame, 850 beacon->beacon_notify_hdr.failure_frame,
@@ -973,7 +867,7 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
973 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 867 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
974 unsigned long status = priv->status; 868 unsigned long status = priv->status;
975 869
976 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", 870 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
977 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 871 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
978 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 872 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
979 873
@@ -1046,11 +940,7 @@ int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1046 goto err; 940 goto err;
1047 941
1048 if (src == IWL_PWR_SRC_VAUX) { 942 if (src == IWL_PWR_SRC_VAUX) {
1049 u32 val; 943 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
1050 ret = pci_read_config_dword(priv->pci_dev, PCI_CFG_POWER_SOURCE,
1051 &val);
1052
1053 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
1054 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 944 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
1055 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 945 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
1056 ~APMG_PS_CTRL_MSK_PWR_SRC); 946 ~APMG_PS_CTRL_MSK_PWR_SRC);
@@ -1135,7 +1025,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1135 1025
1136 /* Rx interrupt, but nothing sent from uCode */ 1026 /* Rx interrupt, but nothing sent from uCode */
1137 if (i == r) 1027 if (i == r)
1138 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d\n", r, i); 1028 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1139 1029
1140 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 1030 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
1141 fill_rx = 1; 1031 fill_rx = 1;
@@ -1175,12 +1065,12 @@ void iwl_rx_handle(struct iwl_priv *priv)
1175 * handle those that need handling via function in 1065 * handle those that need handling via function in
1176 * rx_handlers table. See iwl_setup_rx_handlers() */ 1066 * rx_handlers table. See iwl_setup_rx_handlers() */
1177 if (priv->rx_handlers[pkt->hdr.cmd]) { 1067 if (priv->rx_handlers[pkt->hdr.cmd]) {
1178 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r, 1068 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
1179 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1069 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1180 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1070 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1181 } else { 1071 } else {
1182 /* No handling needed */ 1072 /* No handling needed */
1183 IWL_DEBUG(IWL_DL_RX, 1073 IWL_DEBUG_RX(priv,
1184 "r %d i %d No handler needed for %s, 0x%02x\n", 1074 "r %d i %d No handler needed for %s, 0x%02x\n",
1185 r, i, get_cmd_string(pkt->hdr.cmd), 1075 r, i, get_cmd_string(pkt->hdr.cmd),
1186 pkt->hdr.cmd); 1076 pkt->hdr.cmd);
@@ -1193,7 +1083,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1193 if (rxb && rxb->skb) 1083 if (rxb && rxb->skb)
1194 iwl_tx_cmd_complete(priv, rxb); 1084 iwl_tx_cmd_complete(priv, rxb);
1195 else 1085 else
1196 IWL_WARNING("Claim null rxb?\n"); 1086 IWL_WARN(priv, "Claim null rxb?\n");
1197 } 1087 }
1198 1088
1199 /* For now we just don't re-use anything. We can tweak this 1089 /* For now we just don't re-use anything. We can tweak this
@@ -1229,27 +1119,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
1229 iwl_rx_queue_restock(priv); 1119 iwl_rx_queue_restock(priv);
1230} 1120}
1231 1121
1232#ifdef CONFIG_IWLWIFI_DEBUG
1233static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1234{
1235 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1236
1237 IWL_DEBUG_RADIO("RX CONFIG:\n");
1238 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
1239 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1240 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1241 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
1242 le32_to_cpu(rxon->filter_flags));
1243 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
1244 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
1245 rxon->ofdm_basic_rates);
1246 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1247 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
1248 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1249 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1250}
1251#endif
1252
1253/* call this function to flush any scheduled tasklet */ 1122/* call this function to flush any scheduled tasklet */
1254static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1123static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1255{ 1124{
@@ -1258,45 +1127,6 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1258 tasklet_kill(&priv->irq_tasklet); 1127 tasklet_kill(&priv->irq_tasklet);
1259} 1128}
1260 1129
1261/**
1262 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1263 */
1264static void iwl_irq_handle_error(struct iwl_priv *priv)
1265{
1266 /* Set the FW error flag -- cleared on iwl_down */
1267 set_bit(STATUS_FW_ERROR, &priv->status);
1268
1269 /* Cancel currently queued command. */
1270 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1271
1272#ifdef CONFIG_IWLWIFI_DEBUG
1273 if (priv->debug_level & IWL_DL_FW_ERRORS) {
1274 iwl_dump_nic_error_log(priv);
1275 iwl_dump_nic_event_log(priv);
1276 iwl_print_rx_config_cmd(priv);
1277 }
1278#endif
1279
1280 wake_up_interruptible(&priv->wait_command_queue);
1281
1282 /* Keep the restart process from trying to send host
1283 * commands by clearing the INIT status bit */
1284 clear_bit(STATUS_READY, &priv->status);
1285
1286 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1287 IWL_DEBUG(IWL_DL_FW_ERRORS,
1288 "Restarting adapter due to uCode error.\n");
1289
1290 if (iwl_is_associated(priv)) {
1291 memcpy(&priv->recovery_rxon, &priv->active_rxon,
1292 sizeof(priv->recovery_rxon));
1293 priv->error_recovering = 1;
1294 }
1295 if (priv->cfg->mod_params->restart_fw)
1296 queue_work(priv->workqueue, &priv->restart);
1297 }
1298}
1299
1300static void iwl_error_recovery(struct iwl_priv *priv) 1130static void iwl_error_recovery(struct iwl_priv *priv)
1301{ 1131{
1302 unsigned long flags; 1132 unsigned long flags;
@@ -1341,7 +1171,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1341 if (priv->debug_level & IWL_DL_ISR) { 1171 if (priv->debug_level & IWL_DL_ISR) {
1342 /* just for debug */ 1172 /* just for debug */
1343 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1173 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1344 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1174 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1345 inta, inta_mask, inta_fh); 1175 inta, inta_mask, inta_fh);
1346 } 1176 }
1347#endif 1177#endif
@@ -1357,7 +1187,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1357 1187
1358 /* Now service all interrupt bits discovered above. */ 1188 /* Now service all interrupt bits discovered above. */
1359 if (inta & CSR_INT_BIT_HW_ERR) { 1189 if (inta & CSR_INT_BIT_HW_ERR) {
1360 IWL_ERROR("Microcode HW error detected. Restarting.\n"); 1190 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
1361 1191
1362 /* Tell the device to stop sending interrupts */ 1192 /* Tell the device to stop sending interrupts */
1363 iwl_disable_interrupts(priv); 1193 iwl_disable_interrupts(priv);
@@ -1375,12 +1205,12 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1375 if (priv->debug_level & (IWL_DL_ISR)) { 1205 if (priv->debug_level & (IWL_DL_ISR)) {
1376 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1206 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1377 if (inta & CSR_INT_BIT_SCD) 1207 if (inta & CSR_INT_BIT_SCD)
1378 IWL_DEBUG_ISR("Scheduler finished to transmit " 1208 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1379 "the frame/frames.\n"); 1209 "the frame/frames.\n");
1380 1210
1381 /* Alive notification via Rx interrupt will do the real work */ 1211 /* Alive notification via Rx interrupt will do the real work */
1382 if (inta & CSR_INT_BIT_ALIVE) 1212 if (inta & CSR_INT_BIT_ALIVE)
1383 IWL_DEBUG_ISR("Alive interrupt\n"); 1213 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1384 } 1214 }
1385#endif 1215#endif
1386 /* Safely ignore these bits for debug checks below */ 1216 /* Safely ignore these bits for debug checks below */
@@ -1393,17 +1223,20 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1393 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 1223 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1394 hw_rf_kill = 1; 1224 hw_rf_kill = 1;
1395 1225
1396 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n", 1226 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
1397 hw_rf_kill ? "disable radio" : "enable radio"); 1227 hw_rf_kill ? "disable radio" : "enable radio");
1398 1228
1399 /* driver only loads ucode once setting the interface up. 1229 /* driver only loads ucode once setting the interface up.
1400 * the driver as well won't allow loading if RFKILL is set 1230 * the driver allows loading the ucode even if the radio
1401 * therefore no need to restart the driver from this handler 1231 * is killed. Hence update the killswitch state here. The
1232 * rfkill handler will care about restarting if needed.
1402 */ 1233 */
1403 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) { 1234 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1404 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1235 if (hw_rf_kill)
1405 if (priv->is_open && !iwl_is_rfkill(priv)) 1236 set_bit(STATUS_RF_KILL_HW, &priv->status);
1406 queue_work(priv->workqueue, &priv->up); 1237 else
1238 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1239 queue_work(priv->workqueue, &priv->rf_kill);
1407 } 1240 }
1408 1241
1409 handled |= CSR_INT_BIT_RF_KILL; 1242 handled |= CSR_INT_BIT_RF_KILL;
@@ -1411,21 +1244,21 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1411 1244
1412 /* Chip got too hot and stopped itself */ 1245 /* Chip got too hot and stopped itself */
1413 if (inta & CSR_INT_BIT_CT_KILL) { 1246 if (inta & CSR_INT_BIT_CT_KILL) {
1414 IWL_ERROR("Microcode CT kill error detected.\n"); 1247 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1415 handled |= CSR_INT_BIT_CT_KILL; 1248 handled |= CSR_INT_BIT_CT_KILL;
1416 } 1249 }
1417 1250
1418 /* Error detected by uCode */ 1251 /* Error detected by uCode */
1419 if (inta & CSR_INT_BIT_SW_ERR) { 1252 if (inta & CSR_INT_BIT_SW_ERR) {
1420 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", 1253 IWL_ERR(priv, "Microcode SW error detected. "
1421 inta); 1254 " Restarting 0x%X.\n", inta);
1422 iwl_irq_handle_error(priv); 1255 iwl_irq_handle_error(priv);
1423 handled |= CSR_INT_BIT_SW_ERR; 1256 handled |= CSR_INT_BIT_SW_ERR;
1424 } 1257 }
1425 1258
1426 /* uCode wakes up after power-down sleep */ 1259 /* uCode wakes up after power-down sleep */
1427 if (inta & CSR_INT_BIT_WAKEUP) { 1260 if (inta & CSR_INT_BIT_WAKEUP) {
1428 IWL_DEBUG_ISR("Wakeup interrupt\n"); 1261 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1429 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1262 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1430 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1263 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
1431 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1264 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
@@ -1446,7 +1279,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1446 } 1279 }
1447 1280
1448 if (inta & CSR_INT_BIT_FH_TX) { 1281 if (inta & CSR_INT_BIT_FH_TX) {
1449 IWL_DEBUG_ISR("Tx interrupt\n"); 1282 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1450 handled |= CSR_INT_BIT_FH_TX; 1283 handled |= CSR_INT_BIT_FH_TX;
1451 /* FH finished to write, send event */ 1284 /* FH finished to write, send event */
1452 priv->ucode_write_complete = 1; 1285 priv->ucode_write_complete = 1;
@@ -1454,12 +1287,12 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1454 } 1287 }
1455 1288
1456 if (inta & ~handled) 1289 if (inta & ~handled)
1457 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 1290 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1458 1291
1459 if (inta & ~CSR_INI_SET_MASK) { 1292 if (inta & ~CSR_INI_SET_MASK) {
1460 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", 1293 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1461 inta & ~CSR_INI_SET_MASK); 1294 inta & ~CSR_INI_SET_MASK);
1462 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); 1295 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1463 } 1296 }
1464 1297
1465 /* Re-enable all interrupts */ 1298 /* Re-enable all interrupts */
@@ -1472,7 +1305,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1472 inta = iwl_read32(priv, CSR_INT); 1305 inta = iwl_read32(priv, CSR_INT);
1473 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1306 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1474 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1307 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1475 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 1308 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1476 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1309 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1477 } 1310 }
1478#endif 1311#endif
@@ -1504,18 +1337,18 @@ static irqreturn_t iwl_isr(int irq, void *data)
1504 * This may be due to IRQ shared with another device, 1337 * This may be due to IRQ shared with another device,
1505 * or due to sporadic interrupts thrown from our NIC. */ 1338 * or due to sporadic interrupts thrown from our NIC. */
1506 if (!inta && !inta_fh) { 1339 if (!inta && !inta_fh) {
1507 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 1340 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1508 goto none; 1341 goto none;
1509 } 1342 }
1510 1343
1511 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1344 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1512 /* Hardware disappeared. It might have already raised 1345 /* Hardware disappeared. It might have already raised
1513 * an interrupt */ 1346 * an interrupt */
1514 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta); 1347 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1515 goto unplugged; 1348 goto unplugged;
1516 } 1349 }
1517 1350
1518 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1351 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1519 inta, inta_mask, inta_fh); 1352 inta, inta_mask, inta_fh);
1520 1353
1521 inta &= ~CSR_INT_BIT_SCD; 1354 inta &= ~CSR_INT_BIT_SCD;
@@ -1584,7 +1417,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1584 sprintf(buf, "%s%d%s", name_pre, index, ".ucode"); 1417 sprintf(buf, "%s%d%s", name_pre, index, ".ucode");
1585 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); 1418 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1586 if (ret < 0) { 1419 if (ret < 0) {
1587 IWL_ERROR("%s firmware file req failed: Reason %d\n", 1420 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1588 buf, ret); 1421 buf, ret);
1589 if (ret == -ENOENT) 1422 if (ret == -ENOENT)
1590 continue; 1423 continue;
@@ -1592,9 +1425,12 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1592 goto error; 1425 goto error;
1593 } else { 1426 } else {
1594 if (index < api_max) 1427 if (index < api_max)
1595 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n", 1428 IWL_ERR(priv, "Loaded firmware %s, "
1429 "which is deprecated. "
1430 "Please use API v%u instead.\n",
1596 buf, api_max); 1431 buf, api_max);
1597 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", 1432
1433 IWL_DEBUG_INFO(priv, "Got firmware '%s' file (%zd bytes) from disk\n",
1598 buf, ucode_raw->size); 1434 buf, ucode_raw->size);
1599 break; 1435 break;
1600 } 1436 }
@@ -1605,7 +1441,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1605 1441
1606 /* Make sure that we got at least our header! */ 1442 /* Make sure that we got at least our header! */
1607 if (ucode_raw->size < sizeof(*ucode)) { 1443 if (ucode_raw->size < sizeof(*ucode)) {
1608 IWL_ERROR("File size way too small!\n"); 1444 IWL_ERR(priv, "File size way too small!\n");
1609 ret = -EINVAL; 1445 ret = -EINVAL;
1610 goto err_release; 1446 goto err_release;
1611 } 1447 }
@@ -1626,7 +1462,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1626 * on the API version read from firware header from here on forward */ 1462 * on the API version read from firware header from here on forward */
1627 1463
1628 if (api_ver < api_min || api_ver > api_max) { 1464 if (api_ver < api_min || api_ver > api_max) {
1629 IWL_ERROR("Driver unable to support your firmware API. " 1465 IWL_ERR(priv, "Driver unable to support your firmware API. "
1630 "Driver supports v%u, firmware is v%u.\n", 1466 "Driver supports v%u, firmware is v%u.\n",
1631 api_max, api_ver); 1467 api_max, api_ver);
1632 priv->ucode_ver = 0; 1468 priv->ucode_ver = 0;
@@ -1634,28 +1470,28 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1634 goto err_release; 1470 goto err_release;
1635 } 1471 }
1636 if (api_ver != api_max) 1472 if (api_ver != api_max)
1637 IWL_ERROR("Firmware has old API version. Expected v%u, " 1473 IWL_ERR(priv, "Firmware has old API version. Expected v%u, "
1638 "got v%u. New firmware can be obtained " 1474 "got v%u. New firmware can be obtained "
1639 "from http://www.intellinuxwireless.org.\n", 1475 "from http://www.intellinuxwireless.org.\n",
1640 api_max, api_ver); 1476 api_max, api_ver);
1641 1477
1642 printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n", 1478 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1643 IWL_UCODE_MAJOR(priv->ucode_ver), 1479 IWL_UCODE_MAJOR(priv->ucode_ver),
1644 IWL_UCODE_MINOR(priv->ucode_ver), 1480 IWL_UCODE_MINOR(priv->ucode_ver),
1645 IWL_UCODE_API(priv->ucode_ver), 1481 IWL_UCODE_API(priv->ucode_ver),
1646 IWL_UCODE_SERIAL(priv->ucode_ver)); 1482 IWL_UCODE_SERIAL(priv->ucode_ver));
1647 1483
1648 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n", 1484 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1649 priv->ucode_ver); 1485 priv->ucode_ver);
1650 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", 1486 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1651 inst_size); 1487 inst_size);
1652 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", 1488 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1653 data_size); 1489 data_size);
1654 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", 1490 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1655 init_size); 1491 init_size);
1656 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", 1492 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1657 init_data_size); 1493 init_data_size);
1658 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", 1494 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1659 boot_size); 1495 boot_size);
1660 1496
1661 /* Verify size of file vs. image size info in file's header */ 1497 /* Verify size of file vs. image size info in file's header */
@@ -1663,7 +1499,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1663 inst_size + data_size + init_size + 1499 inst_size + data_size + init_size +
1664 init_data_size + boot_size) { 1500 init_data_size + boot_size) {
1665 1501
1666 IWL_DEBUG_INFO("uCode file size %d too small\n", 1502 IWL_DEBUG_INFO(priv, "uCode file size %d too small\n",
1667 (int)ucode_raw->size); 1503 (int)ucode_raw->size);
1668 ret = -EINVAL; 1504 ret = -EINVAL;
1669 goto err_release; 1505 goto err_release;
@@ -1671,36 +1507,33 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1671 1507
1672 /* Verify that uCode images will fit in card's SRAM */ 1508 /* Verify that uCode images will fit in card's SRAM */
1673 if (inst_size > priv->hw_params.max_inst_size) { 1509 if (inst_size > priv->hw_params.max_inst_size) {
1674 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n", 1510 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1675 inst_size); 1511 inst_size);
1676 ret = -EINVAL; 1512 ret = -EINVAL;
1677 goto err_release; 1513 goto err_release;
1678 } 1514 }
1679 1515
1680 if (data_size > priv->hw_params.max_data_size) { 1516 if (data_size > priv->hw_params.max_data_size) {
1681 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n", 1517 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1682 data_size); 1518 data_size);
1683 ret = -EINVAL; 1519 ret = -EINVAL;
1684 goto err_release; 1520 goto err_release;
1685 } 1521 }
1686 if (init_size > priv->hw_params.max_inst_size) { 1522 if (init_size > priv->hw_params.max_inst_size) {
1687 IWL_DEBUG_INFO 1523 IWL_INFO(priv, "uCode init instr len %d too large to fit in\n",
1688 ("uCode init instr len %d too large to fit in\n", 1524 init_size);
1689 init_size);
1690 ret = -EINVAL; 1525 ret = -EINVAL;
1691 goto err_release; 1526 goto err_release;
1692 } 1527 }
1693 if (init_data_size > priv->hw_params.max_data_size) { 1528 if (init_data_size > priv->hw_params.max_data_size) {
1694 IWL_DEBUG_INFO 1529 IWL_INFO(priv, "uCode init data len %d too large to fit in\n",
1695 ("uCode init data len %d too large to fit in\n",
1696 init_data_size); 1530 init_data_size);
1697 ret = -EINVAL; 1531 ret = -EINVAL;
1698 goto err_release; 1532 goto err_release;
1699 } 1533 }
1700 if (boot_size > priv->hw_params.max_bsm_size) { 1534 if (boot_size > priv->hw_params.max_bsm_size) {
1701 IWL_DEBUG_INFO 1535 IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n",
1702 ("uCode boot instr len %d too large to fit in\n", 1536 boot_size);
1703 boot_size);
1704 ret = -EINVAL; 1537 ret = -EINVAL;
1705 goto err_release; 1538 goto err_release;
1706 } 1539 }
@@ -1749,16 +1582,16 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1749 /* Runtime instructions (first block of data in file) */ 1582 /* Runtime instructions (first block of data in file) */
1750 src = &ucode->data[0]; 1583 src = &ucode->data[0];
1751 len = priv->ucode_code.len; 1584 len = priv->ucode_code.len;
1752 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len); 1585 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len);
1753 memcpy(priv->ucode_code.v_addr, src, len); 1586 memcpy(priv->ucode_code.v_addr, src, len);
1754 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 1587 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1755 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 1588 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1756 1589
1757 /* Runtime data (2nd block) 1590 /* Runtime data (2nd block)
1758 * NOTE: Copy into backup buffer will be done in iwl_up() */ 1591 * NOTE: Copy into backup buffer will be done in iwl_up() */
1759 src = &ucode->data[inst_size]; 1592 src = &ucode->data[inst_size];
1760 len = priv->ucode_data.len; 1593 len = priv->ucode_data.len;
1761 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len); 1594 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len);
1762 memcpy(priv->ucode_data.v_addr, src, len); 1595 memcpy(priv->ucode_data.v_addr, src, len);
1763 memcpy(priv->ucode_data_backup.v_addr, src, len); 1596 memcpy(priv->ucode_data_backup.v_addr, src, len);
1764 1597
@@ -1766,7 +1599,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1766 if (init_size) { 1599 if (init_size) {
1767 src = &ucode->data[inst_size + data_size]; 1600 src = &ucode->data[inst_size + data_size];
1768 len = priv->ucode_init.len; 1601 len = priv->ucode_init.len;
1769 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n", 1602 IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
1770 len); 1603 len);
1771 memcpy(priv->ucode_init.v_addr, src, len); 1604 memcpy(priv->ucode_init.v_addr, src, len);
1772 } 1605 }
@@ -1775,7 +1608,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1775 if (init_data_size) { 1608 if (init_data_size) {
1776 src = &ucode->data[inst_size + data_size + init_size]; 1609 src = &ucode->data[inst_size + data_size + init_size];
1777 len = priv->ucode_init_data.len; 1610 len = priv->ucode_init_data.len;
1778 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n", 1611 IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
1779 len); 1612 len);
1780 memcpy(priv->ucode_init_data.v_addr, src, len); 1613 memcpy(priv->ucode_init_data.v_addr, src, len);
1781 } 1614 }
@@ -1783,7 +1616,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1783 /* Bootstrap instructions (5th block) */ 1616 /* Bootstrap instructions (5th block) */
1784 src = &ucode->data[inst_size + data_size + init_size + init_data_size]; 1617 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
1785 len = priv->ucode_boot.len; 1618 len = priv->ucode_boot.len;
1786 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len); 1619 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len);
1787 memcpy(priv->ucode_boot.v_addr, src, len); 1620 memcpy(priv->ucode_boot.v_addr, src, len);
1788 1621
1789 /* We have our copies now, allow OS release its copies */ 1622 /* We have our copies now, allow OS release its copies */
@@ -1791,7 +1624,7 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1791 return 0; 1624 return 0;
1792 1625
1793 err_pci_alloc: 1626 err_pci_alloc:
1794 IWL_ERROR("failed to allocate pci memory\n"); 1627 IWL_ERR(priv, "failed to allocate pci memory\n");
1795 ret = -ENOMEM; 1628 ret = -ENOMEM;
1796 iwl_dealloc_ucode_pci(priv); 1629 iwl_dealloc_ucode_pci(priv);
1797 1630
@@ -1815,12 +1648,12 @@ static void iwl_alive_start(struct iwl_priv *priv)
1815{ 1648{
1816 int ret = 0; 1649 int ret = 0;
1817 1650
1818 IWL_DEBUG_INFO("Runtime Alive received.\n"); 1651 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1819 1652
1820 if (priv->card_alive.is_valid != UCODE_VALID_OK) { 1653 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
1821 /* We had an error bringing up the hardware, so take it 1654 /* We had an error bringing up the hardware, so take it
1822 * all the way back down so we can try again */ 1655 * all the way back down so we can try again */
1823 IWL_DEBUG_INFO("Alive failed.\n"); 1656 IWL_DEBUG_INFO(priv, "Alive failed.\n");
1824 goto restart; 1657 goto restart;
1825 } 1658 }
1826 1659
@@ -1830,15 +1663,15 @@ static void iwl_alive_start(struct iwl_priv *priv)
1830 if (iwl_verify_ucode(priv)) { 1663 if (iwl_verify_ucode(priv)) {
1831 /* Runtime instruction load was bad; 1664 /* Runtime instruction load was bad;
1832 * take it all the way back down so we can try again */ 1665 * take it all the way back down so we can try again */
1833 IWL_DEBUG_INFO("Bad runtime uCode load.\n"); 1666 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
1834 goto restart; 1667 goto restart;
1835 } 1668 }
1836 1669
1837 iwl_clear_stations_table(priv); 1670 iwl_clear_stations_table(priv);
1838 ret = priv->cfg->ops->lib->alive_notify(priv); 1671 ret = priv->cfg->ops->lib->alive_notify(priv);
1839 if (ret) { 1672 if (ret) {
1840 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", 1673 IWL_WARN(priv,
1841 ret); 1674 "Could not complete ALIVE transition [ntf]: %d\n", ret);
1842 goto restart; 1675 goto restart;
1843 } 1676 }
1844 1677
@@ -1863,6 +1696,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
1863 } else { 1696 } else {
1864 /* Initialize our rx_config data */ 1697 /* Initialize our rx_config data */
1865 iwl_connection_init_rx_config(priv, priv->iw_mode); 1698 iwl_connection_init_rx_config(priv, priv->iw_mode);
1699 iwl_set_rxon_chain(priv);
1866 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 1700 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1867 } 1701 }
1868 1702
@@ -1879,7 +1713,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
1879 1713
1880 iwl_leds_register(priv); 1714 iwl_leds_register(priv);
1881 1715
1882 IWL_DEBUG_INFO("ALIVE processing complete.\n"); 1716 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1883 set_bit(STATUS_READY, &priv->status); 1717 set_bit(STATUS_READY, &priv->status);
1884 wake_up_interruptible(&priv->wait_command_queue); 1718 wake_up_interruptible(&priv->wait_command_queue);
1885 1719
@@ -1913,7 +1747,7 @@ static void __iwl_down(struct iwl_priv *priv)
1913 unsigned long flags; 1747 unsigned long flags;
1914 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 1748 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
1915 1749
1916 IWL_DEBUG_INFO(DRV_NAME " is going down\n"); 1750 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1917 1751
1918 if (!exit_pending) 1752 if (!exit_pending)
1919 set_bit(STATUS_EXIT_PENDING, &priv->status); 1753 set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -2024,12 +1858,12 @@ static int __iwl_up(struct iwl_priv *priv)
2024 int ret; 1858 int ret;
2025 1859
2026 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 1860 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2027 IWL_WARNING("Exit pending; will not bring the NIC up\n"); 1861 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2028 return -EIO; 1862 return -EIO;
2029 } 1863 }
2030 1864
2031 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 1865 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2032 IWL_ERROR("ucode not available for device bringup\n"); 1866 IWL_ERR(priv, "ucode not available for device bringup\n");
2033 return -EIO; 1867 return -EIO;
2034 } 1868 }
2035 1869
@@ -2041,7 +1875,7 @@ static int __iwl_up(struct iwl_priv *priv)
2041 1875
2042 if (iwl_is_rfkill(priv)) { 1876 if (iwl_is_rfkill(priv)) {
2043 iwl_enable_interrupts(priv); 1877 iwl_enable_interrupts(priv);
2044 IWL_WARNING("Radio disabled by %s RF Kill switch\n", 1878 IWL_WARN(priv, "Radio disabled by %s RF Kill switch\n",
2045 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW"); 1879 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
2046 return 0; 1880 return 0;
2047 } 1881 }
@@ -2050,7 +1884,7 @@ static int __iwl_up(struct iwl_priv *priv)
2050 1884
2051 ret = iwl_hw_nic_init(priv); 1885 ret = iwl_hw_nic_init(priv);
2052 if (ret) { 1886 if (ret) {
2053 IWL_ERROR("Unable to init nic\n"); 1887 IWL_ERR(priv, "Unable to init nic\n");
2054 return ret; 1888 return ret;
2055 } 1889 }
2056 1890
@@ -2083,7 +1917,8 @@ static int __iwl_up(struct iwl_priv *priv)
2083 ret = priv->cfg->ops->lib->load_ucode(priv); 1917 ret = priv->cfg->ops->lib->load_ucode(priv);
2084 1918
2085 if (ret) { 1919 if (ret) {
2086 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", ret); 1920 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
1921 ret);
2087 continue; 1922 continue;
2088 } 1923 }
2089 1924
@@ -2093,7 +1928,7 @@ static int __iwl_up(struct iwl_priv *priv)
2093 /* start card; "initialize" will load runtime ucode */ 1928 /* start card; "initialize" will load runtime ucode */
2094 iwl_nic_start(priv); 1929 iwl_nic_start(priv);
2095 1930
2096 IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); 1931 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2097 1932
2098 return 0; 1933 return 0;
2099 } 1934 }
@@ -2104,7 +1939,7 @@ static int __iwl_up(struct iwl_priv *priv)
2104 1939
2105 /* tried to restart and config the device for as long as our 1940 /* tried to restart and config the device for as long as our
2106 * patience could withstand */ 1941 * patience could withstand */
2107 IWL_ERROR("Unable to initialize device after %d attempts.\n", i); 1942 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2108 return -EIO; 1943 return -EIO;
2109} 1944}
2110 1945
@@ -2141,40 +1976,6 @@ static void iwl_bg_alive_start(struct work_struct *data)
2141 mutex_unlock(&priv->mutex); 1976 mutex_unlock(&priv->mutex);
2142} 1977}
2143 1978
2144static void iwl_bg_rf_kill(struct work_struct *work)
2145{
2146 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
2147
2148 wake_up_interruptible(&priv->wait_command_queue);
2149
2150 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2151 return;
2152
2153 mutex_lock(&priv->mutex);
2154
2155 if (!iwl_is_rfkill(priv)) {
2156 IWL_DEBUG(IWL_DL_RF_KILL,
2157 "HW and/or SW RF Kill no longer active, restarting "
2158 "device\n");
2159 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
2160 queue_work(priv->workqueue, &priv->restart);
2161 } else {
2162 /* make sure mac80211 stop sending Tx frame */
2163 if (priv->mac80211_registered)
2164 ieee80211_stop_queues(priv->hw);
2165
2166 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
2167 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2168 "disabled by SW switch\n");
2169 else
2170 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
2171 "Kill switch must be turned off for "
2172 "wireless networking to work.\n");
2173 }
2174 mutex_unlock(&priv->mutex);
2175 iwl_rfkill_set_hw_state(priv);
2176}
2177
2178static void iwl_bg_run_time_calib_work(struct work_struct *work) 1979static void iwl_bg_run_time_calib_work(struct work_struct *work)
2179{ 1980{
2180 struct iwl_priv *priv = container_of(work, struct iwl_priv, 1981 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -2244,11 +2045,11 @@ static void iwl_post_associate(struct iwl_priv *priv)
2244 unsigned long flags; 2045 unsigned long flags;
2245 2046
2246 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2047 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2247 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 2048 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2248 return; 2049 return;
2249 } 2050 }
2250 2051
2251 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n", 2052 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2252 priv->assoc_id, priv->active_rxon.bssid_addr); 2053 priv->assoc_id, priv->active_rxon.bssid_addr);
2253 2054
2254 2055
@@ -2271,7 +2072,7 @@ static void iwl_post_associate(struct iwl_priv *priv)
2271 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 2072 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2272 sizeof(priv->rxon_timing), &priv->rxon_timing); 2073 sizeof(priv->rxon_timing), &priv->rxon_timing);
2273 if (ret) 2074 if (ret)
2274 IWL_WARNING("REPLY_RXON_TIMING failed - " 2075 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2275 "Attempting to continue.\n"); 2076 "Attempting to continue.\n");
2276 2077
2277 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2078 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -2281,7 +2082,7 @@ static void iwl_post_associate(struct iwl_priv *priv)
2281 iwl_set_rxon_chain(priv); 2082 iwl_set_rxon_chain(priv);
2282 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 2083 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
2283 2084
2284 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", 2085 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2285 priv->assoc_id, priv->beacon_int); 2086 priv->assoc_id, priv->beacon_int);
2286 2087
2287 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 2088 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -2317,7 +2118,7 @@ static void iwl_post_associate(struct iwl_priv *priv)
2317 break; 2118 break;
2318 2119
2319 default: 2120 default:
2320 IWL_ERROR("%s Should not be called in %d mode\n", 2121 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2321 __func__, priv->iw_mode); 2122 __func__, priv->iw_mode);
2322 break; 2123 break;
2323 } 2124 }
@@ -2353,30 +2154,8 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2353{ 2154{
2354 struct iwl_priv *priv = hw->priv; 2155 struct iwl_priv *priv = hw->priv;
2355 int ret; 2156 int ret;
2356 u16 pci_cmd;
2357 2157
2358 IWL_DEBUG_MAC80211("enter\n"); 2158 IWL_DEBUG_MAC80211(priv, "enter\n");
2359
2360 if (pci_enable_device(priv->pci_dev)) {
2361 IWL_ERROR("Fail to pci_enable_device\n");
2362 return -ENODEV;
2363 }
2364 pci_restore_state(priv->pci_dev);
2365 pci_enable_msi(priv->pci_dev);
2366
2367 /* enable interrupts if needed: hw bug w/a */
2368 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
2369 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2370 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2371 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
2372 }
2373
2374 ret = request_irq(priv->pci_dev->irq, iwl_isr, IRQF_SHARED,
2375 DRV_NAME, priv);
2376 if (ret) {
2377 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
2378 goto out_disable_msi;
2379 }
2380 2159
2381 /* we should be verifying the device is ready to be opened */ 2160 /* we should be verifying the device is ready to be opened */
2382 mutex_lock(&priv->mutex); 2161 mutex_lock(&priv->mutex);
@@ -2388,9 +2167,9 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2388 if (!priv->ucode_code.len) { 2167 if (!priv->ucode_code.len) {
2389 ret = iwl_read_ucode(priv); 2168 ret = iwl_read_ucode(priv);
2390 if (ret) { 2169 if (ret) {
2391 IWL_ERROR("Could not read microcode: %d\n", ret); 2170 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2392 mutex_unlock(&priv->mutex); 2171 mutex_unlock(&priv->mutex);
2393 goto out_release_irq; 2172 return ret;
2394 } 2173 }
2395 } 2174 }
2396 2175
@@ -2401,12 +2180,12 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2401 iwl_rfkill_set_hw_state(priv); 2180 iwl_rfkill_set_hw_state(priv);
2402 2181
2403 if (ret) 2182 if (ret)
2404 goto out_release_irq; 2183 return ret;
2405 2184
2406 if (iwl_is_rfkill(priv)) 2185 if (iwl_is_rfkill(priv))
2407 goto out; 2186 goto out;
2408 2187
2409 IWL_DEBUG_INFO("Start UP work done.\n"); 2188 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2410 2189
2411 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 2190 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
2412 return 0; 2191 return 0;
@@ -2418,36 +2197,26 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2418 UCODE_READY_TIMEOUT); 2197 UCODE_READY_TIMEOUT);
2419 if (!ret) { 2198 if (!ret) {
2420 if (!test_bit(STATUS_READY, &priv->status)) { 2199 if (!test_bit(STATUS_READY, &priv->status)) {
2421 IWL_ERROR("START_ALIVE timeout after %dms.\n", 2200 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2422 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 2201 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2423 ret = -ETIMEDOUT; 2202 return -ETIMEDOUT;
2424 goto out_release_irq;
2425 } 2203 }
2426 } 2204 }
2427 2205
2428out: 2206out:
2429 priv->is_open = 1; 2207 priv->is_open = 1;
2430 IWL_DEBUG_MAC80211("leave\n"); 2208 IWL_DEBUG_MAC80211(priv, "leave\n");
2431 return 0; 2209 return 0;
2432
2433out_release_irq:
2434 free_irq(priv->pci_dev->irq, priv);
2435out_disable_msi:
2436 pci_disable_msi(priv->pci_dev);
2437 pci_disable_device(priv->pci_dev);
2438 priv->is_open = 0;
2439 IWL_DEBUG_MAC80211("leave - failed\n");
2440 return ret;
2441} 2210}
2442 2211
2443static void iwl_mac_stop(struct ieee80211_hw *hw) 2212static void iwl_mac_stop(struct ieee80211_hw *hw)
2444{ 2213{
2445 struct iwl_priv *priv = hw->priv; 2214 struct iwl_priv *priv = hw->priv;
2446 2215
2447 IWL_DEBUG_MAC80211("enter\n"); 2216 IWL_DEBUG_MAC80211(priv, "enter\n");
2448 2217
2449 if (!priv->is_open) { 2218 if (!priv->is_open) {
2450 IWL_DEBUG_MAC80211("leave - skip\n"); 2219 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2451 return; 2220 return;
2452 } 2221 }
2453 2222
@@ -2465,27 +2234,27 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
2465 iwl_down(priv); 2234 iwl_down(priv);
2466 2235
2467 flush_workqueue(priv->workqueue); 2236 flush_workqueue(priv->workqueue);
2468 free_irq(priv->pci_dev->irq, priv);
2469 pci_disable_msi(priv->pci_dev);
2470 pci_save_state(priv->pci_dev);
2471 pci_disable_device(priv->pci_dev);
2472 2237
2473 IWL_DEBUG_MAC80211("leave\n"); 2238 /* enable interrupts again in order to receive rfkill changes */
2239 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2240 iwl_enable_interrupts(priv);
2241
2242 IWL_DEBUG_MAC80211(priv, "leave\n");
2474} 2243}
2475 2244
2476static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2245static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2477{ 2246{
2478 struct iwl_priv *priv = hw->priv; 2247 struct iwl_priv *priv = hw->priv;
2479 2248
2480 IWL_DEBUG_MACDUMP("enter\n"); 2249 IWL_DEBUG_MACDUMP(priv, "enter\n");
2481 2250
2482 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2251 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2483 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2252 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2484 2253
2485 if (iwl_tx_skb(priv, skb)) 2254 if (iwl_tx_skb(priv, skb))
2486 dev_kfree_skb_any(skb); 2255 dev_kfree_skb_any(skb);
2487 2256
2488 IWL_DEBUG_MACDUMP("leave\n"); 2257 IWL_DEBUG_MACDUMP(priv, "leave\n");
2489 return NETDEV_TX_OK; 2258 return NETDEV_TX_OK;
2490} 2259}
2491 2260
@@ -2495,10 +2264,10 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw,
2495 struct iwl_priv *priv = hw->priv; 2264 struct iwl_priv *priv = hw->priv;
2496 unsigned long flags; 2265 unsigned long flags;
2497 2266
2498 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type); 2267 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
2499 2268
2500 if (priv->vif) { 2269 if (priv->vif) {
2501 IWL_DEBUG_MAC80211("leave - vif != NULL\n"); 2270 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
2502 return -EOPNOTSUPP; 2271 return -EOPNOTSUPP;
2503 } 2272 }
2504 2273
@@ -2511,7 +2280,7 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw,
2511 mutex_lock(&priv->mutex); 2280 mutex_lock(&priv->mutex);
2512 2281
2513 if (conf->mac_addr) { 2282 if (conf->mac_addr) {
2514 IWL_DEBUG_MAC80211("Set %pM\n", conf->mac_addr); 2283 IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
2515 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2284 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
2516 } 2285 }
2517 2286
@@ -2521,7 +2290,7 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw,
2521 2290
2522 mutex_unlock(&priv->mutex); 2291 mutex_unlock(&priv->mutex);
2523 2292
2524 IWL_DEBUG_MAC80211("leave\n"); 2293 IWL_DEBUG_MAC80211(priv, "leave\n");
2525 return 0; 2294 return 0;
2526} 2295}
2527 2296
@@ -2542,12 +2311,12 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2542 u16 channel; 2311 u16 channel;
2543 2312
2544 mutex_lock(&priv->mutex); 2313 mutex_lock(&priv->mutex);
2545 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 2314 IWL_DEBUG_MAC80211(priv, "enter to channel %d\n", conf->channel->hw_value);
2546 2315
2547 priv->current_ht_config.is_ht = conf->ht.enabled; 2316 priv->current_ht_config.is_ht = conf_is_ht(conf);
2548 2317
2549 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2318 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
2550 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n"); 2319 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n");
2551 goto out; 2320 goto out;
2552 } 2321 }
2553 2322
@@ -2555,14 +2324,14 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2555 iwl_radio_kill_sw_disable_radio(priv); 2324 iwl_radio_kill_sw_disable_radio(priv);
2556 2325
2557 if (!iwl_is_ready(priv)) { 2326 if (!iwl_is_ready(priv)) {
2558 IWL_DEBUG_MAC80211("leave - not ready\n"); 2327 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2559 ret = -EIO; 2328 ret = -EIO;
2560 goto out; 2329 goto out;
2561 } 2330 }
2562 2331
2563 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2332 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2564 test_bit(STATUS_SCANNING, &priv->status))) { 2333 test_bit(STATUS_SCANNING, &priv->status))) {
2565 IWL_DEBUG_MAC80211("leave - scanning\n"); 2334 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2566 mutex_unlock(&priv->mutex); 2335 mutex_unlock(&priv->mutex);
2567 return 0; 2336 return 0;
2568 } 2337 }
@@ -2570,14 +2339,14 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2570 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2339 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2571 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel); 2340 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
2572 if (!is_channel_valid(ch_info)) { 2341 if (!is_channel_valid(ch_info)) {
2573 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 2342 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2574 ret = -EINVAL; 2343 ret = -EINVAL;
2575 goto out; 2344 goto out;
2576 } 2345 }
2577 2346
2578 if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 2347 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2579 !is_channel_ibss(ch_info)) { 2348 !is_channel_ibss(ch_info)) {
2580 IWL_ERROR("channel %d in band %d not IBSS channel\n", 2349 IWL_ERR(priv, "channel %d in band %d not IBSS channel\n",
2581 conf->channel->hw_value, conf->channel->band); 2350 conf->channel->hw_value, conf->channel->band);
2582 ret = -EINVAL; 2351 ret = -EINVAL;
2583 goto out; 2352 goto out;
@@ -2615,12 +2384,12 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2615#endif 2384#endif
2616 2385
2617 if (!conf->radio_enabled) { 2386 if (!conf->radio_enabled) {
2618 IWL_DEBUG_MAC80211("leave - radio disabled\n"); 2387 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
2619 goto out; 2388 goto out;
2620 } 2389 }
2621 2390
2622 if (iwl_is_rfkill(priv)) { 2391 if (iwl_is_rfkill(priv)) {
2623 IWL_DEBUG_MAC80211("leave - RF kill\n"); 2392 IWL_DEBUG_MAC80211(priv, "leave - RF kill\n");
2624 ret = -EIO; 2393 ret = -EIO;
2625 goto out; 2394 goto out;
2626 } 2395 }
@@ -2630,22 +2399,25 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2630 else 2399 else
2631 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM); 2400 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2632 if (ret) 2401 if (ret)
2633 IWL_DEBUG_MAC80211("Error setting power level\n"); 2402 IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
2634 2403
2635 IWL_DEBUG_MAC80211("TX Power old=%d new=%d\n", 2404 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2636 priv->tx_power_user_lmt, conf->power_level); 2405 priv->tx_power_user_lmt, conf->power_level);
2637 2406
2638 iwl_set_tx_power(priv, conf->power_level, false); 2407 iwl_set_tx_power(priv, conf->power_level, false);
2639 2408
2640 iwl_set_rate(priv); 2409 iwl_set_rate(priv);
2641 2410
2411 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2412 iwl_set_rxon_chain(priv);
2413
2642 if (memcmp(&priv->active_rxon, 2414 if (memcmp(&priv->active_rxon,
2643 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2415 &priv->staging_rxon, sizeof(priv->staging_rxon)))
2644 iwl_commit_rxon(priv); 2416 iwl_commit_rxon(priv);
2645 else 2417 else
2646 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); 2418 IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n");
2647 2419
2648 IWL_DEBUG_MAC80211("leave\n"); 2420 IWL_DEBUG_MAC80211(priv, "leave\n");
2649 2421
2650out: 2422out:
2651 mutex_unlock(&priv->mutex); 2423 mutex_unlock(&priv->mutex);
@@ -2672,7 +2444,7 @@ static void iwl_config_ap(struct iwl_priv *priv)
2672 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 2444 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2673 sizeof(priv->rxon_timing), &priv->rxon_timing); 2445 sizeof(priv->rxon_timing), &priv->rxon_timing);
2674 if (ret) 2446 if (ret)
2675 IWL_WARNING("REPLY_RXON_TIMING failed - " 2447 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2676 "Attempting to continue.\n"); 2448 "Attempting to continue.\n");
2677 2449
2678 iwl_set_rxon_chain(priv); 2450 iwl_set_rxon_chain(priv);
@@ -2726,7 +2498,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2726 return -EIO; 2498 return -EIO;
2727 2499
2728 if (priv->vif != vif) { 2500 if (priv->vif != vif) {
2729 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n"); 2501 IWL_DEBUG_MAC80211(priv, "leave - priv->vif != vif\n");
2730 return 0; 2502 return 0;
2731 } 2503 }
2732 2504
@@ -2748,7 +2520,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2748 mutex_lock(&priv->mutex); 2520 mutex_lock(&priv->mutex);
2749 2521
2750 if (conf->bssid) 2522 if (conf->bssid)
2751 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid); 2523 IWL_DEBUG_MAC80211(priv, "bssid: %pM\n", conf->bssid);
2752 2524
2753/* 2525/*
2754 * very dubious code was here; the probe filtering flag is never set: 2526 * very dubious code was here; the probe filtering flag is never set:
@@ -2761,7 +2533,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2761 if (!conf->bssid) { 2533 if (!conf->bssid) {
2762 conf->bssid = priv->mac_addr; 2534 conf->bssid = priv->mac_addr;
2763 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 2535 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
2764 IWL_DEBUG_MAC80211("bssid was set to: %pM\n", 2536 IWL_DEBUG_MAC80211(priv, "bssid was set to: %pM\n",
2765 conf->bssid); 2537 conf->bssid);
2766 } 2538 }
2767 if (priv->ibss_beacon) 2539 if (priv->ibss_beacon)
@@ -2778,9 +2550,9 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2778 /* If there is currently a HW scan going on in the background 2550 /* If there is currently a HW scan going on in the background
2779 * then we need to cancel it else the RXON below will fail. */ 2551 * then we need to cancel it else the RXON below will fail. */
2780 if (iwl_scan_cancel_timeout(priv, 100)) { 2552 if (iwl_scan_cancel_timeout(priv, 100)) {
2781 IWL_WARNING("Aborted scan still in progress " 2553 IWL_WARN(priv, "Aborted scan still in progress "
2782 "after 100ms\n"); 2554 "after 100ms\n");
2783 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 2555 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2784 mutex_unlock(&priv->mutex); 2556 mutex_unlock(&priv->mutex);
2785 return -EAGAIN; 2557 return -EAGAIN;
2786 } 2558 }
@@ -2808,64 +2580,18 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2808 } 2580 }
2809 2581
2810 done: 2582 done:
2811 IWL_DEBUG_MAC80211("leave\n"); 2583 IWL_DEBUG_MAC80211(priv, "leave\n");
2812 mutex_unlock(&priv->mutex); 2584 mutex_unlock(&priv->mutex);
2813 2585
2814 return 0; 2586 return 0;
2815} 2587}
2816 2588
2817static void iwl_configure_filter(struct ieee80211_hw *hw,
2818 unsigned int changed_flags,
2819 unsigned int *total_flags,
2820 int mc_count, struct dev_addr_list *mc_list)
2821{
2822 struct iwl_priv *priv = hw->priv;
2823 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
2824
2825 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
2826 changed_flags, *total_flags);
2827
2828 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
2829 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
2830 *filter_flags |= RXON_FILTER_PROMISC_MSK;
2831 else
2832 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
2833 }
2834 if (changed_flags & FIF_ALLMULTI) {
2835 if (*total_flags & FIF_ALLMULTI)
2836 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
2837 else
2838 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
2839 }
2840 if (changed_flags & FIF_CONTROL) {
2841 if (*total_flags & FIF_CONTROL)
2842 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
2843 else
2844 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
2845 }
2846 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2847 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
2848 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
2849 else
2850 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
2851 }
2852
2853 /* We avoid iwl_commit_rxon here to commit the new filter flags
2854 * since mac80211 will call ieee80211_hw_config immediately.
2855 * (mc_list is not supported at this time). Otherwise, we need to
2856 * queue a background iwl_commit_rxon work.
2857 */
2858
2859 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2860 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2861}
2862
2863static void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2589static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2864 struct ieee80211_if_init_conf *conf) 2590 struct ieee80211_if_init_conf *conf)
2865{ 2591{
2866 struct iwl_priv *priv = hw->priv; 2592 struct iwl_priv *priv = hw->priv;
2867 2593
2868 IWL_DEBUG_MAC80211("enter\n"); 2594 IWL_DEBUG_MAC80211(priv, "enter\n");
2869 2595
2870 mutex_lock(&priv->mutex); 2596 mutex_lock(&priv->mutex);
2871 2597
@@ -2880,7 +2606,7 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2880 } 2606 }
2881 mutex_unlock(&priv->mutex); 2607 mutex_unlock(&priv->mutex);
2882 2608
2883 IWL_DEBUG_MAC80211("leave\n"); 2609 IWL_DEBUG_MAC80211(priv, "leave\n");
2884 2610
2885} 2611}
2886 2612
@@ -2892,10 +2618,10 @@ static void iwl_bss_info_changed(struct ieee80211_hw *hw,
2892{ 2618{
2893 struct iwl_priv *priv = hw->priv; 2619 struct iwl_priv *priv = hw->priv;
2894 2620
2895 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes); 2621 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2896 2622
2897 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 2623 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2898 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n", 2624 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2899 bss_conf->use_short_preamble); 2625 bss_conf->use_short_preamble);
2900 if (bss_conf->use_short_preamble) 2626 if (bss_conf->use_short_preamble)
2901 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2627 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
@@ -2904,7 +2630,7 @@ static void iwl_bss_info_changed(struct ieee80211_hw *hw,
2904 } 2630 }
2905 2631
2906 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 2632 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2907 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 2633 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
2908 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 2634 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
2909 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 2635 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
2910 else 2636 else
@@ -2917,7 +2643,7 @@ static void iwl_bss_info_changed(struct ieee80211_hw *hw,
2917 } 2643 }
2918 2644
2919 if (changes & BSS_CHANGED_ASSOC) { 2645 if (changes & BSS_CHANGED_ASSOC) {
2920 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc); 2646 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2921 /* This should never happen as this function should 2647 /* This should never happen as this function should
2922 * never be called from interrupt context. */ 2648 * never be called from interrupt context. */
2923 if (WARN_ON_ONCE(in_interrupt())) 2649 if (WARN_ON_ONCE(in_interrupt()))
@@ -2939,29 +2665,37 @@ static void iwl_bss_info_changed(struct ieee80211_hw *hw,
2939 mutex_unlock(&priv->mutex); 2665 mutex_unlock(&priv->mutex);
2940 } else { 2666 } else {
2941 priv->assoc_id = 0; 2667 priv->assoc_id = 0;
2942 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc); 2668 IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc);
2943 } 2669 }
2944 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) { 2670 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2945 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes); 2671 IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes);
2946 iwl_send_rxon_assoc(priv); 2672 iwl_send_rxon_assoc(priv);
2947 } 2673 }
2948 2674
2949} 2675}
2950 2676
2951static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len) 2677static int iwl_mac_hw_scan(struct ieee80211_hw *hw,
2678 struct cfg80211_scan_request *req)
2952{ 2679{
2953 unsigned long flags; 2680 unsigned long flags;
2954 struct iwl_priv *priv = hw->priv; 2681 struct iwl_priv *priv = hw->priv;
2955 int ret; 2682 int ret;
2683 u8 *ssid = NULL;
2684 size_t ssid_len = 0;
2685
2686 if (req->n_ssids) {
2687 ssid = req->ssids[0].ssid;
2688 ssid_len = req->ssids[0].ssid_len;
2689 }
2956 2690
2957 IWL_DEBUG_MAC80211("enter\n"); 2691 IWL_DEBUG_MAC80211(priv, "enter\n");
2958 2692
2959 mutex_lock(&priv->mutex); 2693 mutex_lock(&priv->mutex);
2960 spin_lock_irqsave(&priv->lock, flags); 2694 spin_lock_irqsave(&priv->lock, flags);
2961 2695
2962 if (!iwl_is_ready_rf(priv)) { 2696 if (!iwl_is_ready_rf(priv)) {
2963 ret = -EIO; 2697 ret = -EIO;
2964 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); 2698 IWL_DEBUG_MAC80211(priv, "leave - not ready or exit pending\n");
2965 goto out_unlock; 2699 goto out_unlock;
2966 } 2700 }
2967 2701
@@ -2971,7 +2705,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
2971 */ 2705 */
2972 if (priv->next_scan_jiffies && 2706 if (priv->next_scan_jiffies &&
2973 time_after(priv->next_scan_jiffies, jiffies)) { 2707 time_after(priv->next_scan_jiffies, jiffies)) {
2974 IWL_DEBUG_SCAN("scan rejected: within next scan period\n"); 2708 IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
2975 queue_work(priv->workqueue, &priv->scan_completed); 2709 queue_work(priv->workqueue, &priv->scan_completed);
2976 ret = 0; 2710 ret = 0;
2977 goto out_unlock; 2711 goto out_unlock;
@@ -2980,7 +2714,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
2980 /* if we just finished scan ask for delay */ 2714 /* if we just finished scan ask for delay */
2981 if (iwl_is_associated(priv) && priv->last_scan_jiffies && 2715 if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
2982 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) { 2716 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
2983 IWL_DEBUG_SCAN("scan rejected: within previous scan period\n"); 2717 IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
2984 queue_work(priv->workqueue, &priv->scan_completed); 2718 queue_work(priv->workqueue, &priv->scan_completed);
2985 ret = 0; 2719 ret = 0;
2986 goto out_unlock; 2720 goto out_unlock;
@@ -2988,7 +2722,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
2988 2722
2989 if (ssid_len) { 2723 if (ssid_len) {
2990 priv->one_direct_scan = 1; 2724 priv->one_direct_scan = 1;
2991 priv->direct_ssid_len = min_t(u8, ssid_len, IW_ESSID_MAX_SIZE); 2725 priv->direct_ssid_len = ssid_len;
2992 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); 2726 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
2993 } else { 2727 } else {
2994 priv->one_direct_scan = 0; 2728 priv->one_direct_scan = 0;
@@ -2996,7 +2730,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
2996 2730
2997 ret = iwl_scan_initiate(priv); 2731 ret = iwl_scan_initiate(priv);
2998 2732
2999 IWL_DEBUG_MAC80211("leave\n"); 2733 IWL_DEBUG_MAC80211(priv, "leave\n");
3000 2734
3001out_unlock: 2735out_unlock:
3002 spin_unlock_irqrestore(&priv->lock, flags); 2736 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3011,36 +2745,34 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3011{ 2745{
3012 2746
3013 struct iwl_priv *priv = hw->priv; 2747 struct iwl_priv *priv = hw->priv;
3014 IWL_DEBUG_MAC80211("enter\n"); 2748 IWL_DEBUG_MAC80211(priv, "enter\n");
3015 2749
3016 iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key); 2750 iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
3017 2751
3018 IWL_DEBUG_MAC80211("leave\n"); 2752 IWL_DEBUG_MAC80211(priv, "leave\n");
3019} 2753}
3020 2754
3021static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2755static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3022 const u8 *local_addr, const u8 *addr, 2756 struct ieee80211_vif *vif,
2757 struct ieee80211_sta *sta,
3023 struct ieee80211_key_conf *key) 2758 struct ieee80211_key_conf *key)
3024{ 2759{
3025 struct iwl_priv *priv = hw->priv; 2760 struct iwl_priv *priv = hw->priv;
3026 int ret = 0; 2761 const u8 *addr;
3027 u8 sta_id = IWL_INVALID_STATION; 2762 int ret;
3028 u8 is_default_wep_key = 0; 2763 u8 sta_id;
2764 bool is_default_wep_key = false;
3029 2765
3030 IWL_DEBUG_MAC80211("enter\n"); 2766 IWL_DEBUG_MAC80211(priv, "enter\n");
3031 2767
3032 if (priv->hw_params.sw_crypto) { 2768 if (priv->hw_params.sw_crypto) {
3033 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 2769 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
3034 return -EOPNOTSUPP; 2770 return -EOPNOTSUPP;
3035 } 2771 }
3036 2772 addr = sta ? sta->addr : iwl_bcast_addr;
3037 if (is_zero_ether_addr(addr))
3038 /* only support pairwise keys */
3039 return -EOPNOTSUPP;
3040
3041 sta_id = iwl_find_station(priv, addr); 2773 sta_id = iwl_find_station(priv, addr);
3042 if (sta_id == IWL_INVALID_STATION) { 2774 if (sta_id == IWL_INVALID_STATION) {
3043 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n", 2775 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
3044 addr); 2776 addr);
3045 return -EINVAL; 2777 return -EINVAL;
3046 2778
@@ -3070,7 +2802,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3070 else 2802 else
3071 ret = iwl_set_dynamic_key(priv, key, sta_id); 2803 ret = iwl_set_dynamic_key(priv, key, sta_id);
3072 2804
3073 IWL_DEBUG_MAC80211("enable hwcrypto key\n"); 2805 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3074 break; 2806 break;
3075 case DISABLE_KEY: 2807 case DISABLE_KEY:
3076 if (is_default_wep_key) 2808 if (is_default_wep_key)
@@ -3078,13 +2810,13 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3078 else 2810 else
3079 ret = iwl_remove_dynamic_key(priv, key, sta_id); 2811 ret = iwl_remove_dynamic_key(priv, key, sta_id);
3080 2812
3081 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 2813 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3082 break; 2814 break;
3083 default: 2815 default:
3084 ret = -EINVAL; 2816 ret = -EINVAL;
3085 } 2817 }
3086 2818
3087 IWL_DEBUG_MAC80211("leave\n"); 2819 IWL_DEBUG_MAC80211(priv, "leave\n");
3088 2820
3089 return ret; 2821 return ret;
3090} 2822}
@@ -3096,15 +2828,15 @@ static int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3096 unsigned long flags; 2828 unsigned long flags;
3097 int q; 2829 int q;
3098 2830
3099 IWL_DEBUG_MAC80211("enter\n"); 2831 IWL_DEBUG_MAC80211(priv, "enter\n");
3100 2832
3101 if (!iwl_is_ready_rf(priv)) { 2833 if (!iwl_is_ready_rf(priv)) {
3102 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 2834 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
3103 return -EIO; 2835 return -EIO;
3104 } 2836 }
3105 2837
3106 if (queue >= AC_NUM) { 2838 if (queue >= AC_NUM) {
3107 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); 2839 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
3108 return 0; 2840 return 0;
3109 } 2841 }
3110 2842
@@ -3128,7 +2860,7 @@ static int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3128 2860
3129 spin_unlock_irqrestore(&priv->lock, flags); 2861 spin_unlock_irqrestore(&priv->lock, flags);
3130 2862
3131 IWL_DEBUG_MAC80211("leave\n"); 2863 IWL_DEBUG_MAC80211(priv, "leave\n");
3132 return 0; 2864 return 0;
3133} 2865}
3134 2866
@@ -3138,7 +2870,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3138{ 2870{
3139 struct iwl_priv *priv = hw->priv; 2871 struct iwl_priv *priv = hw->priv;
3140 2872
3141 IWL_DEBUG_HT("A-MPDU action on addr %pM tid %d\n", 2873 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
3142 sta->addr, tid); 2874 sta->addr, tid);
3143 2875
3144 if (!(priv->cfg->sku & IWL_SKU_N)) 2876 if (!(priv->cfg->sku & IWL_SKU_N))
@@ -3146,19 +2878,19 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3146 2878
3147 switch (action) { 2879 switch (action) {
3148 case IEEE80211_AMPDU_RX_START: 2880 case IEEE80211_AMPDU_RX_START:
3149 IWL_DEBUG_HT("start Rx\n"); 2881 IWL_DEBUG_HT(priv, "start Rx\n");
3150 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); 2882 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
3151 case IEEE80211_AMPDU_RX_STOP: 2883 case IEEE80211_AMPDU_RX_STOP:
3152 IWL_DEBUG_HT("stop Rx\n"); 2884 IWL_DEBUG_HT(priv, "stop Rx\n");
3153 return iwl_sta_rx_agg_stop(priv, sta->addr, tid); 2885 return iwl_sta_rx_agg_stop(priv, sta->addr, tid);
3154 case IEEE80211_AMPDU_TX_START: 2886 case IEEE80211_AMPDU_TX_START:
3155 IWL_DEBUG_HT("start Tx\n"); 2887 IWL_DEBUG_HT(priv, "start Tx\n");
3156 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 2888 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
3157 case IEEE80211_AMPDU_TX_STOP: 2889 case IEEE80211_AMPDU_TX_STOP:
3158 IWL_DEBUG_HT("stop Tx\n"); 2890 IWL_DEBUG_HT(priv, "stop Tx\n");
3159 return iwl_tx_agg_stop(priv, sta->addr, tid); 2891 return iwl_tx_agg_stop(priv, sta->addr, tid);
3160 default: 2892 default:
3161 IWL_DEBUG_HT("unknown\n"); 2893 IWL_DEBUG_HT(priv, "unknown\n");
3162 return -EINVAL; 2894 return -EINVAL;
3163 break; 2895 break;
3164 } 2896 }
@@ -3174,10 +2906,10 @@ static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
3174 struct iwl_queue *q; 2906 struct iwl_queue *q;
3175 unsigned long flags; 2907 unsigned long flags;
3176 2908
3177 IWL_DEBUG_MAC80211("enter\n"); 2909 IWL_DEBUG_MAC80211(priv, "enter\n");
3178 2910
3179 if (!iwl_is_ready_rf(priv)) { 2911 if (!iwl_is_ready_rf(priv)) {
3180 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 2912 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
3181 return -EIO; 2913 return -EIO;
3182 } 2914 }
3183 2915
@@ -3195,7 +2927,7 @@ static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
3195 } 2927 }
3196 spin_unlock_irqrestore(&priv->lock, flags); 2928 spin_unlock_irqrestore(&priv->lock, flags);
3197 2929
3198 IWL_DEBUG_MAC80211("leave\n"); 2930 IWL_DEBUG_MAC80211(priv, "leave\n");
3199 2931
3200 return 0; 2932 return 0;
3201} 2933}
@@ -3206,8 +2938,8 @@ static int iwl_mac_get_stats(struct ieee80211_hw *hw,
3206 struct iwl_priv *priv = hw->priv; 2938 struct iwl_priv *priv = hw->priv;
3207 2939
3208 priv = hw->priv; 2940 priv = hw->priv;
3209 IWL_DEBUG_MAC80211("enter\n"); 2941 IWL_DEBUG_MAC80211(priv, "enter\n");
3210 IWL_DEBUG_MAC80211("leave\n"); 2942 IWL_DEBUG_MAC80211(priv, "leave\n");
3211 2943
3212 return 0; 2944 return 0;
3213} 2945}
@@ -3218,7 +2950,7 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
3218 unsigned long flags; 2950 unsigned long flags;
3219 2951
3220 mutex_lock(&priv->mutex); 2952 mutex_lock(&priv->mutex);
3221 IWL_DEBUG_MAC80211("enter\n"); 2953 IWL_DEBUG_MAC80211(priv, "enter\n");
3222 2954
3223 spin_lock_irqsave(&priv->lock, flags); 2955 spin_lock_irqsave(&priv->lock, flags);
3224 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info)); 2956 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
@@ -3245,7 +2977,7 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
3245 spin_unlock_irqrestore(&priv->lock, flags); 2977 spin_unlock_irqrestore(&priv->lock, flags);
3246 2978
3247 if (!iwl_is_ready_rf(priv)) { 2979 if (!iwl_is_ready_rf(priv)) {
3248 IWL_DEBUG_MAC80211("leave - not ready\n"); 2980 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
3249 mutex_unlock(&priv->mutex); 2981 mutex_unlock(&priv->mutex);
3250 return; 2982 return;
3251 } 2983 }
@@ -3274,7 +3006,7 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
3274 IEEE80211_CHAN_RADAR)) 3006 IEEE80211_CHAN_RADAR))
3275 iwl_power_disable_management(priv, 3000); 3007 iwl_power_disable_management(priv, 3000);
3276 3008
3277 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 3009 IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
3278 mutex_unlock(&priv->mutex); 3010 mutex_unlock(&priv->mutex);
3279 return; 3011 return;
3280 } 3012 }
@@ -3283,7 +3015,7 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
3283 3015
3284 mutex_unlock(&priv->mutex); 3016 mutex_unlock(&priv->mutex);
3285 3017
3286 IWL_DEBUG_MAC80211("leave\n"); 3018 IWL_DEBUG_MAC80211(priv, "leave\n");
3287} 3019}
3288 3020
3289static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3021static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -3292,15 +3024,15 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3292 unsigned long flags; 3024 unsigned long flags;
3293 __le64 timestamp; 3025 __le64 timestamp;
3294 3026
3295 IWL_DEBUG_MAC80211("enter\n"); 3027 IWL_DEBUG_MAC80211(priv, "enter\n");
3296 3028
3297 if (!iwl_is_ready_rf(priv)) { 3029 if (!iwl_is_ready_rf(priv)) {
3298 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 3030 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
3299 return -EIO; 3031 return -EIO;
3300 } 3032 }
3301 3033
3302 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 3034 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3303 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 3035 IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
3304 return -EIO; 3036 return -EIO;
3305 } 3037 }
3306 3038
@@ -3315,7 +3047,7 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3315 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 3047 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
3316 priv->timestamp = le64_to_cpu(timestamp); 3048 priv->timestamp = le64_to_cpu(timestamp);
3317 3049
3318 IWL_DEBUG_MAC80211("leave\n"); 3050 IWL_DEBUG_MAC80211(priv, "leave\n");
3319 spin_unlock_irqrestore(&priv->lock, flags); 3051 spin_unlock_irqrestore(&priv->lock, flags);
3320 3052
3321 iwl_reset_qos(priv); 3053 iwl_reset_qos(priv);
@@ -3359,8 +3091,7 @@ static ssize_t store_debug_level(struct device *d,
3359 3091
3360 ret = strict_strtoul(buf, 0, &val); 3092 ret = strict_strtoul(buf, 0, &val);
3361 if (ret) 3093 if (ret)
3362 printk(KERN_INFO DRV_NAME 3094 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
3363 ": %s is not in hex or decimal form.\n", buf);
3364 else 3095 else
3365 priv->debug_level = val; 3096 priv->debug_level = val;
3366 3097
@@ -3439,8 +3170,7 @@ static ssize_t store_tx_power(struct device *d,
3439 3170
3440 ret = strict_strtoul(buf, 10, &val); 3171 ret = strict_strtoul(buf, 10, &val);
3441 if (ret) 3172 if (ret)
3442 printk(KERN_INFO DRV_NAME 3173 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
3443 ": %s is not in decimal form.\n", buf);
3444 else 3174 else
3445 iwl_set_tx_power(priv, val, false); 3175 iwl_set_tx_power(priv, val, false);
3446 3176
@@ -3473,9 +3203,9 @@ static ssize_t store_flags(struct device *d,
3473 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3203 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
3474 /* Cancel any currently running scans... */ 3204 /* Cancel any currently running scans... */
3475 if (iwl_scan_cancel_timeout(priv, 100)) 3205 if (iwl_scan_cancel_timeout(priv, 100))
3476 IWL_WARNING("Could not cancel scan.\n"); 3206 IWL_WARN(priv, "Could not cancel scan.\n");
3477 else { 3207 else {
3478 IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags); 3208 IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
3479 priv->staging_rxon.flags = cpu_to_le32(flags); 3209 priv->staging_rxon.flags = cpu_to_le32(flags);
3480 iwl_commit_rxon(priv); 3210 iwl_commit_rxon(priv);
3481 } 3211 }
@@ -3512,9 +3242,9 @@ static ssize_t store_filter_flags(struct device *d,
3512 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3242 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
3513 /* Cancel any currently running scans... */ 3243 /* Cancel any currently running scans... */
3514 if (iwl_scan_cancel_timeout(priv, 100)) 3244 if (iwl_scan_cancel_timeout(priv, 100))
3515 IWL_WARNING("Could not cancel scan.\n"); 3245 IWL_WARN(priv, "Could not cancel scan.\n");
3516 else { 3246 else {
3517 IWL_DEBUG_INFO("Committing rxon.filter_flags = " 3247 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3518 "0x%04X\n", filter_flags); 3248 "0x%04X\n", filter_flags);
3519 priv->staging_rxon.filter_flags = 3249 priv->staging_rxon.filter_flags =
3520 cpu_to_le32(filter_flags); 3250 cpu_to_le32(filter_flags);
@@ -3529,31 +3259,6 @@ static ssize_t store_filter_flags(struct device *d,
3529static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3259static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3530 store_filter_flags); 3260 store_filter_flags);
3531 3261
3532static ssize_t store_retry_rate(struct device *d,
3533 struct device_attribute *attr,
3534 const char *buf, size_t count)
3535{
3536 struct iwl_priv *priv = dev_get_drvdata(d);
3537 long val;
3538 int ret = strict_strtol(buf, 10, &val);
3539 if (!ret)
3540 return ret;
3541
3542 priv->retry_rate = (val > 0) ? val : 1;
3543
3544 return count;
3545}
3546
3547static ssize_t show_retry_rate(struct device *d,
3548 struct device_attribute *attr, char *buf)
3549{
3550 struct iwl_priv *priv = dev_get_drvdata(d);
3551 return sprintf(buf, "%d", priv->retry_rate);
3552}
3553
3554static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
3555 store_retry_rate);
3556
3557static ssize_t store_power_level(struct device *d, 3262static ssize_t store_power_level(struct device *d,
3558 struct device_attribute *attr, 3263 struct device_attribute *attr,
3559 const char *buf, size_t count) 3264 const char *buf, size_t count)
@@ -3576,7 +3281,7 @@ static ssize_t store_power_level(struct device *d,
3576 3281
3577 ret = iwl_power_set_user_mode(priv, mode); 3282 ret = iwl_power_set_user_mode(priv, mode);
3578 if (ret) { 3283 if (ret) {
3579 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 3284 IWL_DEBUG_MAC80211(priv, "failed setting power mode.\n");
3580 goto out; 3285 goto out;
3581 } 3286 }
3582 ret = count; 3287 ret = count;
@@ -3656,16 +3361,6 @@ static ssize_t show_statistics(struct device *d,
3656 3361
3657static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 3362static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
3658 3363
3659static ssize_t show_status(struct device *d,
3660 struct device_attribute *attr, char *buf)
3661{
3662 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3663 if (!iwl_is_alive(priv))
3664 return -EAGAIN;
3665 return sprintf(buf, "0x%08x\n", (int)priv->status);
3666}
3667
3668static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
3669 3364
3670/***************************************************************************** 3365/*****************************************************************************
3671 * 3366 *
@@ -3719,9 +3414,7 @@ static struct attribute *iwl_sysfs_entries[] = {
3719 &dev_attr_flags.attr, 3414 &dev_attr_flags.attr,
3720 &dev_attr_filter_flags.attr, 3415 &dev_attr_filter_flags.attr,
3721 &dev_attr_power_level.attr, 3416 &dev_attr_power_level.attr,
3722 &dev_attr_retry_rate.attr,
3723 &dev_attr_statistics.attr, 3417 &dev_attr_statistics.attr,
3724 &dev_attr_status.attr,
3725 &dev_attr_temperature.attr, 3418 &dev_attr_temperature.attr,
3726 &dev_attr_tx_power.attr, 3419 &dev_attr_tx_power.attr,
3727#ifdef CONFIG_IWLWIFI_DEBUG 3420#ifdef CONFIG_IWLWIFI_DEBUG
@@ -3764,6 +3457,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3764 struct ieee80211_hw *hw; 3457 struct ieee80211_hw *hw;
3765 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3458 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3766 unsigned long flags; 3459 unsigned long flags;
3460 u16 pci_cmd;
3767 3461
3768 /************************ 3462 /************************
3769 * 1. Allocating HW data 3463 * 1. Allocating HW data
@@ -3788,7 +3482,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3788 3482
3789 SET_IEEE80211_DEV(hw, &pdev->dev); 3483 SET_IEEE80211_DEV(hw, &pdev->dev);
3790 3484
3791 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); 3485 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3792 priv->cfg = cfg; 3486 priv->cfg = cfg;
3793 priv->pci_dev = pdev; 3487 priv->pci_dev = pdev;
3794 3488
@@ -3816,8 +3510,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3816 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3510 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3817 /* both attempts failed: */ 3511 /* both attempts failed: */
3818 if (err) { 3512 if (err) {
3819 printk(KERN_WARNING "%s: No suitable DMA available.\n", 3513 IWL_WARN(priv, "No suitable DMA available.\n");
3820 DRV_NAME);
3821 goto out_pci_disable_device; 3514 goto out_pci_disable_device;
3822 } 3515 }
3823 } 3516 }
@@ -3838,13 +3531,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3838 goto out_pci_release_regions; 3531 goto out_pci_release_regions;
3839 } 3532 }
3840 3533
3841 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", 3534 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3842 (unsigned long long) pci_resource_len(pdev, 0)); 3535 (unsigned long long) pci_resource_len(pdev, 0));
3843 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 3536 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3844 3537
3845 iwl_hw_detect(priv); 3538 iwl_hw_detect(priv);
3846 printk(KERN_INFO DRV_NAME 3539 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
3847 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
3848 priv->cfg->name, priv->hw_rev); 3540 priv->cfg->name, priv->hw_rev);
3849 3541
3850 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3542 /* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3854,7 +3546,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3854 /* amp init */ 3546 /* amp init */
3855 err = priv->cfg->ops->lib->apm_ops.init(priv); 3547 err = priv->cfg->ops->lib->apm_ops.init(priv);
3856 if (err < 0) { 3548 if (err < 0) {
3857 IWL_DEBUG_INFO("Failed to init APMG\n"); 3549 IWL_DEBUG_INFO(priv, "Failed to init APMG\n");
3858 goto out_iounmap; 3550 goto out_iounmap;
3859 } 3551 }
3860 /***************** 3552 /*****************
@@ -3863,7 +3555,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3863 /* Read the EEPROM */ 3555 /* Read the EEPROM */
3864 err = iwl_eeprom_init(priv); 3556 err = iwl_eeprom_init(priv);
3865 if (err) { 3557 if (err) {
3866 IWL_ERROR("Unable to init EEPROM\n"); 3558 IWL_ERR(priv, "Unable to init EEPROM\n");
3867 goto out_iounmap; 3559 goto out_iounmap;
3868 } 3560 }
3869 err = iwl_eeprom_check_version(priv); 3561 err = iwl_eeprom_check_version(priv);
@@ -3872,14 +3564,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3872 3564
3873 /* extract MAC Address */ 3565 /* extract MAC Address */
3874 iwl_eeprom_get_mac(priv, priv->mac_addr); 3566 iwl_eeprom_get_mac(priv, priv->mac_addr);
3875 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr); 3567 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
3876 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 3568 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
3877 3569
3878 /************************ 3570 /************************
3879 * 5. Setup HW constants 3571 * 5. Setup HW constants
3880 ************************/ 3572 ************************/
3881 if (iwl_set_hw_params(priv)) { 3573 if (iwl_set_hw_params(priv)) {
3882 IWL_ERROR("failed to set hw parameters\n"); 3574 IWL_ERR(priv, "failed to set hw parameters\n");
3883 goto out_free_eeprom; 3575 goto out_free_eeprom;
3884 } 3576 }
3885 3577
@@ -3899,7 +3591,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3899 /* Disable radio (SW RF KILL) via parameter when loading driver */ 3591 /* Disable radio (SW RF KILL) via parameter when loading driver */
3900 if (priv->cfg->mod_params->disable) { 3592 if (priv->cfg->mod_params->disable) {
3901 set_bit(STATUS_RF_KILL_SW, &priv->status); 3593 set_bit(STATUS_RF_KILL_SW, &priv->status);
3902 IWL_DEBUG_INFO("Radio disabled.\n"); 3594 IWL_DEBUG_INFO(priv, "Radio disabled.\n");
3903 } 3595 }
3904 3596
3905 /******************** 3597 /********************
@@ -3909,43 +3601,65 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3909 iwl_disable_interrupts(priv); 3601 iwl_disable_interrupts(priv);
3910 spin_unlock_irqrestore(&priv->lock, flags); 3602 spin_unlock_irqrestore(&priv->lock, flags);
3911 3603
3604 pci_enable_msi(priv->pci_dev);
3605
3606 err = request_irq(priv->pci_dev->irq, iwl_isr, IRQF_SHARED,
3607 DRV_NAME, priv);
3608 if (err) {
3609 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3610 goto out_disable_msi;
3611 }
3912 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); 3612 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group);
3913 if (err) { 3613 if (err) {
3914 IWL_ERROR("failed to create sysfs device attributes\n"); 3614 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3915 goto out_uninit_drv; 3615 goto out_uninit_drv;
3916 } 3616 }
3917 3617
3918
3919 iwl_setup_deferred_work(priv); 3618 iwl_setup_deferred_work(priv);
3920 iwl_setup_rx_handlers(priv); 3619 iwl_setup_rx_handlers(priv);
3921 3620
3922 /********************
3923 * 9. Conclude
3924 ********************/
3925 pci_save_state(pdev);
3926 pci_disable_device(pdev);
3927
3928 /********************************** 3621 /**********************************
3929 * 10. Setup and register mac80211 3622 * 9. Setup and register mac80211
3930 **********************************/ 3623 **********************************/
3931 3624
3625 /* enable interrupts if needed: hw bug w/a */
3626 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3627 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3628 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3629 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3630 }
3631
3632 iwl_enable_interrupts(priv);
3633
3932 err = iwl_setup_mac(priv); 3634 err = iwl_setup_mac(priv);
3933 if (err) 3635 if (err)
3934 goto out_remove_sysfs; 3636 goto out_remove_sysfs;
3935 3637
3936 err = iwl_dbgfs_register(priv, DRV_NAME); 3638 err = iwl_dbgfs_register(priv, DRV_NAME);
3937 if (err) 3639 if (err)
3938 IWL_ERROR("failed to create debugfs files\n"); 3640 IWL_ERR(priv, "failed to create debugfs files\n");
3641
3642 /* If platform's RF_KILL switch is NOT set to KILL */
3643 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3644 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3645 else
3646 set_bit(STATUS_RF_KILL_HW, &priv->status);
3939 3647
3940 err = iwl_rfkill_init(priv); 3648 err = iwl_rfkill_init(priv);
3941 if (err) 3649 if (err)
3942 IWL_ERROR("Unable to initialize RFKILL system. " 3650 IWL_ERR(priv, "Unable to initialize RFKILL system. "
3943 "Ignoring error: %d\n", err); 3651 "Ignoring error: %d\n", err);
3652 else
3653 iwl_rfkill_set_hw_state(priv);
3654
3944 iwl_power_initialize(priv); 3655 iwl_power_initialize(priv);
3945 return 0; 3656 return 0;
3946 3657
3947 out_remove_sysfs: 3658 out_remove_sysfs:
3948 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); 3659 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3660 out_disable_msi:
3661 pci_disable_msi(priv->pci_dev);
3662 pci_disable_device(priv->pci_dev);
3949 out_uninit_drv: 3663 out_uninit_drv:
3950 iwl_uninit_drv(priv); 3664 iwl_uninit_drv(priv);
3951 out_free_eeprom: 3665 out_free_eeprom:
@@ -3971,7 +3685,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3971 if (!priv) 3685 if (!priv)
3972 return; 3686 return;
3973 3687
3974 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 3688 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3975 3689
3976 iwl_dbgfs_unregister(priv); 3690 iwl_dbgfs_unregister(priv);
3977 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); 3691 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
@@ -4017,6 +3731,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4017 destroy_workqueue(priv->workqueue); 3731 destroy_workqueue(priv->workqueue);
4018 priv->workqueue = NULL; 3732 priv->workqueue = NULL;
4019 3733
3734 free_irq(priv->pci_dev->irq, priv);
3735 pci_disable_msi(priv->pci_dev);
4020 pci_iounmap(pdev, priv->hw_base); 3736 pci_iounmap(pdev, priv->hw_base);
4021 pci_release_regions(pdev); 3737 pci_release_regions(pdev);
4022 pci_disable_device(pdev); 3738 pci_disable_device(pdev);
@@ -4042,19 +3758,8 @@ static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4042 priv->is_open = 1; 3758 priv->is_open = 1;
4043 } 3759 }
4044 3760
4045 /* pci driver assumes state will be saved in this function. 3761 pci_save_state(pdev);
4046 * pci state is saved and device disabled when interface is 3762 pci_disable_device(pdev);
4047 * stopped, so at this time pci device will always be disabled -
4048 * whether interface was started or not. saving pci state now will
4049 * cause saved state be that of a disabled device, which will cause
4050 * problems during resume in that we will end up with a disabled device.
4051 *
4052 * indicate that the current saved state (from when interface was
4053 * stopped) is valid. if interface was never up at time of suspend
4054 * then the saved state will still be valid as it was saved during
4055 * .probe. */
4056 pdev->state_saved = true;
4057
4058 pci_set_power_state(pdev, PCI_D3hot); 3763 pci_set_power_state(pdev, PCI_D3hot);
4059 3764
4060 return 0; 3765 return 0;
@@ -4063,8 +3768,14 @@ static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4063static int iwl_pci_resume(struct pci_dev *pdev) 3768static int iwl_pci_resume(struct pci_dev *pdev)
4064{ 3769{
4065 struct iwl_priv *priv = pci_get_drvdata(pdev); 3770 struct iwl_priv *priv = pci_get_drvdata(pdev);
3771 int ret;
4066 3772
4067 pci_set_power_state(pdev, PCI_D0); 3773 pci_set_power_state(pdev, PCI_D0);
3774 ret = pci_enable_device(pdev);
3775 if (ret)
3776 return ret;
3777 pci_restore_state(pdev);
3778 iwl_enable_interrupts(priv);
4068 3779
4069 if (priv->is_open) 3780 if (priv->is_open)
4070 iwl_mac_start(priv->hw); 3781 iwl_mac_start(priv->hw);
@@ -4105,6 +3816,21 @@ static struct pci_device_id iwl_hw_card_ids[] = {
4105/* 5150 Wifi/WiMax */ 3816/* 5150 Wifi/WiMax */
4106 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)}, 3817 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)},
4107 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)}, 3818 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)},
3819/* 6000/6050 Series */
3820 {IWL_PCI_DEVICE(0x0082, 0x1102, iwl6000_2ag_cfg)},
3821 {IWL_PCI_DEVICE(0x0085, 0x1112, iwl6000_2ag_cfg)},
3822 {IWL_PCI_DEVICE(0x0082, 0x1122, iwl6000_2ag_cfg)},
3823 {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)},
3824 {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)},
3825 {IWL_PCI_DEVICE(0x0082, PCI_ANY_ID, iwl6000_2agn_cfg)},
3826 {IWL_PCI_DEVICE(0x0085, PCI_ANY_ID, iwl6000_3agn_cfg)},
3827 {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)},
3828 {IWL_PCI_DEVICE(0x0087, PCI_ANY_ID, iwl6050_2agn_cfg)},
3829 {IWL_PCI_DEVICE(0x0088, PCI_ANY_ID, iwl6050_3agn_cfg)},
3830 {IWL_PCI_DEVICE(0x0089, PCI_ANY_ID, iwl6050_2agn_cfg)},
3831/* 100 Series WiFi */
3832 {IWL_PCI_DEVICE(0x0083, PCI_ANY_ID, iwl100_bgn_cfg)},
3833 {IWL_PCI_DEVICE(0x0084, PCI_ANY_ID, iwl100_bgn_cfg)},
4108#endif /* CONFIG_IWL5000 */ 3834#endif /* CONFIG_IWL5000 */
4109 3835
4110 {0} 3836 {0}
@@ -4131,13 +3857,14 @@ static int __init iwl_init(void)
4131 3857
4132 ret = iwlagn_rate_control_register(); 3858 ret = iwlagn_rate_control_register();
4133 if (ret) { 3859 if (ret) {
4134 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret); 3860 printk(KERN_ERR DRV_NAME
3861 "Unable to register rate control algorithm: %d\n", ret);
4135 return ret; 3862 return ret;
4136 } 3863 }
4137 3864
4138 ret = pci_register_driver(&iwl_driver); 3865 ret = pci_register_driver(&iwl_driver);
4139 if (ret) { 3866 if (ret) {
4140 IWL_ERROR("Unable to initialize PCI module\n"); 3867 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
4141 goto error_register; 3868 goto error_register;
4142 } 3869 }
4143 3870
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index f836ecc5575..735f3f19928 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -102,7 +102,7 @@ int iwl_send_calib_results(struct iwl_priv *priv)
102 102
103 return 0; 103 return 0;
104err: 104err:
105 IWL_ERROR("Error %d iteration %d\n", ret, i); 105 IWL_ERR(priv, "Error %d iteration %d\n", ret, i);
106 return ret; 106 return ret;
107} 107}
108EXPORT_SYMBOL(iwl_send_calib_results); 108EXPORT_SYMBOL(iwl_send_calib_results);
@@ -202,7 +202,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
202 val = data->nrg_silence_rssi[i]; 202 val = data->nrg_silence_rssi[i];
203 silence_ref = max(silence_ref, val); 203 silence_ref = max(silence_ref, val);
204 } 204 }
205 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", 205 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
206 silence_rssi_a, silence_rssi_b, silence_rssi_c, 206 silence_rssi_a, silence_rssi_b, silence_rssi_c,
207 silence_ref); 207 silence_ref);
208 208
@@ -226,7 +226,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
226 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); 226 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
227 max_nrg_cck += 6; 227 max_nrg_cck += 6;
228 228
229 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", 229 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
230 rx_info->beacon_energy_a, rx_info->beacon_energy_b, 230 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
231 rx_info->beacon_energy_c, max_nrg_cck - 6); 231 rx_info->beacon_energy_c, max_nrg_cck - 6);
232 232
@@ -236,15 +236,15 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
236 data->num_in_cck_no_fa++; 236 data->num_in_cck_no_fa++;
237 else 237 else
238 data->num_in_cck_no_fa = 0; 238 data->num_in_cck_no_fa = 0;
239 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n", 239 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
240 data->num_in_cck_no_fa); 240 data->num_in_cck_no_fa);
241 241
242 /* If we got too many false alarms this time, reduce sensitivity */ 242 /* If we got too many false alarms this time, reduce sensitivity */
243 if ((false_alarms > max_false_alarms) && 243 if ((false_alarms > max_false_alarms) &&
244 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { 244 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
245 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n", 245 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
246 false_alarms, max_false_alarms); 246 false_alarms, max_false_alarms);
247 IWL_DEBUG_CALIB("... reducing sensitivity\n"); 247 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
248 data->nrg_curr_state = IWL_FA_TOO_MANY; 248 data->nrg_curr_state = IWL_FA_TOO_MANY;
249 /* Store for "fewer than desired" on later beacon */ 249 /* Store for "fewer than desired" on later beacon */
250 data->nrg_silence_ref = silence_ref; 250 data->nrg_silence_ref = silence_ref;
@@ -266,7 +266,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
266 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - 266 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
267 (s32)silence_ref; 267 (s32)silence_ref;
268 268
269 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n", 269 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n",
270 false_alarms, min_false_alarms, 270 false_alarms, min_false_alarms,
271 data->nrg_auto_corr_silence_diff); 271 data->nrg_auto_corr_silence_diff);
272 272
@@ -280,17 +280,17 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
280 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 280 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
281 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 281 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
282 282
283 IWL_DEBUG_CALIB("... increasing sensitivity\n"); 283 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
284 /* Increase nrg value to increase sensitivity */ 284 /* Increase nrg value to increase sensitivity */
285 val = data->nrg_th_cck + NRG_STEP_CCK; 285 val = data->nrg_th_cck + NRG_STEP_CCK;
286 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); 286 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
287 } else { 287 } else {
288 IWL_DEBUG_CALIB("... but not changing sensitivity\n"); 288 IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n");
289 } 289 }
290 290
291 /* Else we got a healthy number of false alarms, keep status quo */ 291 /* Else we got a healthy number of false alarms, keep status quo */
292 } else { 292 } else {
293 IWL_DEBUG_CALIB(" FA in safe zone\n"); 293 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
294 data->nrg_curr_state = IWL_FA_GOOD_RANGE; 294 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
295 295
296 /* Store for use in "fewer than desired" with later beacon */ 296 /* Store for use in "fewer than desired" with later beacon */
@@ -300,7 +300,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
300 * give it some extra margin by reducing sensitivity again 300 * give it some extra margin by reducing sensitivity again
301 * (but don't go below measured energy of desired Rx) */ 301 * (but don't go below measured energy of desired Rx) */
302 if (IWL_FA_TOO_MANY == data->nrg_prev_state) { 302 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
303 IWL_DEBUG_CALIB("... increasing margin\n"); 303 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
304 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) 304 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
305 data->nrg_th_cck -= NRG_MARGIN; 305 data->nrg_th_cck -= NRG_MARGIN;
306 else 306 else
@@ -314,7 +314,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
314 * Lower value is higher energy, so we use max()! 314 * Lower value is higher energy, so we use max()!
315 */ 315 */
316 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); 316 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
317 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); 317 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
318 318
319 data->nrg_prev_state = data->nrg_curr_state; 319 data->nrg_prev_state = data->nrg_curr_state;
320 320
@@ -367,7 +367,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
367 /* If we got too many false alarms this time, reduce sensitivity */ 367 /* If we got too many false alarms this time, reduce sensitivity */
368 if (false_alarms > max_false_alarms) { 368 if (false_alarms > max_false_alarms) {
369 369
370 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n", 370 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
371 false_alarms, max_false_alarms); 371 false_alarms, max_false_alarms);
372 372
373 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; 373 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
@@ -390,7 +390,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
390 /* Else if we got fewer than desired, increase sensitivity */ 390 /* Else if we got fewer than desired, increase sensitivity */
391 else if (false_alarms < min_false_alarms) { 391 else if (false_alarms < min_false_alarms) {
392 392
393 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n", 393 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
394 false_alarms, min_false_alarms); 394 false_alarms, min_false_alarms);
395 395
396 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; 396 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
@@ -409,7 +409,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
409 data->auto_corr_ofdm_mrc_x1 = 409 data->auto_corr_ofdm_mrc_x1 =
410 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); 410 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
411 } else { 411 } else {
412 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n", 412 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
413 min_false_alarms, false_alarms, max_false_alarms); 413 min_false_alarms, false_alarms, max_false_alarms);
414 } 414 }
415 return 0; 415 return 0;
@@ -452,18 +452,18 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
452 cpu_to_le16((u16)data->nrg_th_ofdm); 452 cpu_to_le16((u16)data->nrg_th_ofdm);
453 453
454 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 454 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
455 __constant_cpu_to_le16(190); 455 cpu_to_le16(190);
456 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 456 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
457 __constant_cpu_to_le16(390); 457 cpu_to_le16(390);
458 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = 458 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
459 __constant_cpu_to_le16(62); 459 cpu_to_le16(62);
460 460
461 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 461 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
462 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 462 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
463 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, 463 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
464 data->nrg_th_ofdm); 464 data->nrg_th_ofdm);
465 465
466 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n", 466 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
467 data->auto_corr_cck, data->auto_corr_cck_mrc, 467 data->auto_corr_cck, data->auto_corr_cck_mrc,
468 data->nrg_th_cck); 468 data->nrg_th_cck);
469 469
@@ -473,7 +473,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
473 /* Don't send command to uCode if nothing has changed */ 473 /* Don't send command to uCode if nothing has changed */
474 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), 474 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
475 sizeof(u16)*HD_TABLE_SIZE)) { 475 sizeof(u16)*HD_TABLE_SIZE)) {
476 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n"); 476 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
477 return 0; 477 return 0;
478 } 478 }
479 479
@@ -483,7 +483,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
483 483
484 ret = iwl_send_cmd(priv, &cmd_out); 484 ret = iwl_send_cmd(priv, &cmd_out);
485 if (ret) 485 if (ret)
486 IWL_ERROR("SENSITIVITY_CMD failed\n"); 486 IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
487 487
488 return ret; 488 return ret;
489} 489}
@@ -498,7 +498,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
498 if (priv->disable_sens_cal) 498 if (priv->disable_sens_cal)
499 return; 499 return;
500 500
501 IWL_DEBUG_CALIB("Start iwl_init_sensitivity\n"); 501 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
502 502
503 /* Clear driver's sensitivity algo data */ 503 /* Clear driver's sensitivity algo data */
504 data = &(priv->sensitivity_data); 504 data = &(priv->sensitivity_data);
@@ -536,7 +536,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
536 data->last_fa_cnt_cck = 0; 536 data->last_fa_cnt_cck = 0;
537 537
538 ret |= iwl_sensitivity_write(priv); 538 ret |= iwl_sensitivity_write(priv);
539 IWL_DEBUG_CALIB("<<return 0x%X\n", ret); 539 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
540} 540}
541EXPORT_SYMBOL(iwl_init_sensitivity); 541EXPORT_SYMBOL(iwl_init_sensitivity);
542 542
@@ -562,13 +562,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
562 data = &(priv->sensitivity_data); 562 data = &(priv->sensitivity_data);
563 563
564 if (!iwl_is_associated(priv)) { 564 if (!iwl_is_associated(priv)) {
565 IWL_DEBUG_CALIB("<< - not associated\n"); 565 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
566 return; 566 return;
567 } 567 }
568 568
569 spin_lock_irqsave(&priv->lock, flags); 569 spin_lock_irqsave(&priv->lock, flags);
570 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 570 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
571 IWL_DEBUG_CALIB("<< invalid data.\n"); 571 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
572 spin_unlock_irqrestore(&priv->lock, flags); 572 spin_unlock_irqrestore(&priv->lock, flags);
573 return; 573 return;
574 } 574 }
@@ -595,10 +595,10 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
595 595
596 spin_unlock_irqrestore(&priv->lock, flags); 596 spin_unlock_irqrestore(&priv->lock, flags);
597 597
598 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); 598 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
599 599
600 if (!rx_enable_time) { 600 if (!rx_enable_time) {
601 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n"); 601 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n");
602 return; 602 return;
603 } 603 }
604 604
@@ -637,7 +637,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
637 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 637 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
638 norm_fa_cck = fa_cck + bad_plcp_cck; 638 norm_fa_cck = fa_cck + bad_plcp_cck;
639 639
640 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 640 IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
641 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); 641 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
642 642
643 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 643 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
@@ -690,13 +690,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
690 * then we're done forever. */ 690 * then we're done forever. */
691 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 691 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
692 if (data->state == IWL_CHAIN_NOISE_ALIVE) 692 if (data->state == IWL_CHAIN_NOISE_ALIVE)
693 IWL_DEBUG_CALIB("Wait for noise calib reset\n"); 693 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
694 return; 694 return;
695 } 695 }
696 696
697 spin_lock_irqsave(&priv->lock, flags); 697 spin_lock_irqsave(&priv->lock, flags);
698 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 698 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
699 IWL_DEBUG_CALIB(" << Interference data unavailable\n"); 699 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
700 spin_unlock_irqrestore(&priv->lock, flags); 700 spin_unlock_irqrestore(&priv->lock, flags);
701 return; 701 return;
702 } 702 }
@@ -709,7 +709,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
709 /* Make sure we accumulate data for just the associated channel 709 /* Make sure we accumulate data for just the associated channel
710 * (even if scanning). */ 710 * (even if scanning). */
711 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { 711 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
712 IWL_DEBUG_CALIB("Stats not from chan=%d, band24=%d\n", 712 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
713 rxon_chnum, rxon_band24); 713 rxon_chnum, rxon_band24);
714 spin_unlock_irqrestore(&priv->lock, flags); 714 spin_unlock_irqrestore(&priv->lock, flags);
715 return; 715 return;
@@ -739,11 +739,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
739 data->chain_signal_b = (chain_sig_b + data->chain_signal_b); 739 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
740 data->chain_signal_c = (chain_sig_c + data->chain_signal_c); 740 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
741 741
742 IWL_DEBUG_CALIB("chan=%d, band24=%d, beacon=%d\n", 742 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
743 rxon_chnum, rxon_band24, data->beacon_count); 743 rxon_chnum, rxon_band24, data->beacon_count);
744 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n", 744 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
745 chain_sig_a, chain_sig_b, chain_sig_c); 745 chain_sig_a, chain_sig_b, chain_sig_c);
746 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n", 746 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
747 chain_noise_a, chain_noise_b, chain_noise_c); 747 chain_noise_a, chain_noise_b, chain_noise_c);
748 748
749 /* If this is the 20th beacon, determine: 749 /* If this is the 20th beacon, determine:
@@ -773,9 +773,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
773 active_chains = (1 << max_average_sig_antenna_i); 773 active_chains = (1 << max_average_sig_antenna_i);
774 } 774 }
775 775
776 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n", 776 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
777 average_sig[0], average_sig[1], average_sig[2]); 777 average_sig[0], average_sig[1], average_sig[2]);
778 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n", 778 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
779 max_average_sig, max_average_sig_antenna_i); 779 max_average_sig, max_average_sig_antenna_i);
780 780
781 /* Compare signal strengths for all 3 receivers. */ 781 /* Compare signal strengths for all 3 receivers. */
@@ -789,7 +789,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
789 data->disconn_array[i] = 1; 789 data->disconn_array[i] = 1;
790 else 790 else
791 active_chains |= (1 << i); 791 active_chains |= (1 << i);
792 IWL_DEBUG_CALIB("i = %d rssiDelta = %d " 792 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
793 "disconn_array[i] = %d\n", 793 "disconn_array[i] = %d\n",
794 i, rssi_delta, data->disconn_array[i]); 794 i, rssi_delta, data->disconn_array[i]);
795 } 795 }
@@ -813,7 +813,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
813 * disconnected connect it anyway */ 813 * disconnected connect it anyway */
814 data->disconn_array[i] = 0; 814 data->disconn_array[i] = 0;
815 active_chains |= ant_msk; 815 active_chains |= ant_msk;
816 IWL_DEBUG_CALIB("All Tx chains are disconnected W/A - " 816 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - "
817 "declare %d as connected\n", i); 817 "declare %d as connected\n", i);
818 break; 818 break;
819 } 819 }
@@ -821,7 +821,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
821 821
822 /* Save for use within RXON, TX, SCAN commands, etc. */ 822 /* Save for use within RXON, TX, SCAN commands, etc. */
823 priv->chain_noise_data.active_chains = active_chains; 823 priv->chain_noise_data.active_chains = active_chains;
824 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", 824 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
825 active_chains); 825 active_chains);
826 826
827 /* Analyze noise for rx balance */ 827 /* Analyze noise for rx balance */
@@ -839,15 +839,16 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
839 } 839 }
840 } 840 }
841 841
842 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", 842 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
843 average_noise[0], average_noise[1], 843 average_noise[0], average_noise[1],
844 average_noise[2]); 844 average_noise[2]);
845 845
846 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", 846 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
847 min_average_noise, min_average_noise_antenna_i); 847 min_average_noise, min_average_noise_antenna_i);
848 848
849 priv->cfg->ops->utils->gain_computation(priv, average_noise, 849 if (priv->cfg->ops->utils->gain_computation)
850 min_average_noise_antenna_i, min_average_noise); 850 priv->cfg->ops->utils->gain_computation(priv, average_noise,
851 min_average_noise_antenna_i, min_average_noise);
851 852
852 /* Some power changes may have been made during the calibration. 853 /* Some power changes may have been made during the calibration.
853 * Update and commit the RXON 854 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index 1abe84bb74a..b6cef989a79 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index ba997204c8d..29d40746da6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -69,12 +69,20 @@
69#ifndef __iwl_commands_h__ 69#ifndef __iwl_commands_h__
70#define __iwl_commands_h__ 70#define __iwl_commands_h__
71 71
72struct iwl_priv;
73
72/* uCode version contains 4 values: Major/Minor/API/Serial */ 74/* uCode version contains 4 values: Major/Minor/API/Serial */
73#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) 75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
74#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) 76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
75#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) 77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
76#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) 78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
77 79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
78enum { 86enum {
79 REPLY_ALIVE = 0x1, 87 REPLY_ALIVE = 0x1,
80 REPLY_ERROR = 0x2, 88 REPLY_ERROR = 0x2,
@@ -136,9 +144,11 @@ enum {
136 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ 144 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
137 145
138 /* Miscellaneous commands */ 146 /* Miscellaneous commands */
147 REPLY_TX_POWER_DBM_CMD = 0x95,
139 QUIET_NOTIFICATION = 0x96, /* not used */ 148 QUIET_NOTIFICATION = 0x96, /* not used */
140 REPLY_TX_PWR_TABLE_CMD = 0x97, 149 REPLY_TX_PWR_TABLE_CMD = 0x97,
141 REPLY_TX_POWER_DBM_CMD = 0x98, 150 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
151 TX_ANT_CONFIGURATION_CMD = 0x98, /* not used */
142 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 152 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
143 153
144 /* Bluetooth device coexistence config command */ 154 /* Bluetooth device coexistence config command */
@@ -219,6 +229,37 @@ struct iwl_cmd_header {
219 u8 data[0]; 229 u8 data[0];
220} __attribute__ ((packed)); 230} __attribute__ ((packed));
221 231
232
233/**
234 * struct iwl3945_tx_power
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
237 *
238 * Each entry contains two values:
239 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
240 * linear value that multiplies the output of the digital signal processor,
241 * before being sent to the analog radio.
242 * 2) Radio gain. This sets the analog gain of the radio Tx path.
243 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
244 *
245 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
246 */
247struct iwl3945_tx_power {
248 u8 tx_gain; /* gain for analog radio */
249 u8 dsp_atten; /* gain for DSP */
250} __attribute__ ((packed));
251
252/**
253 * struct iwl3945_power_per_rate
254 *
255 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
256 */
257struct iwl3945_power_per_rate {
258 u8 rate; /* plcp */
259 struct iwl3945_tx_power tpc;
260 u8 reserved;
261} __attribute__ ((packed));
262
222/** 263/**
223 * iwlagn rate_n_flags bit fields 264 * iwlagn rate_n_flags bit fields
224 * 265 *
@@ -300,11 +341,12 @@ struct iwl_cmd_header {
300 * 5350 has 3 transmitters 341 * 5350 has 3 transmitters
301 * bit14:16 342 * bit14:16
302 */ 343 */
303#define RATE_MCS_ANT_POS 14 344#define RATE_MCS_ANT_POS 14
304#define RATE_MCS_ANT_A_MSK 0x04000 345#define RATE_MCS_ANT_A_MSK 0x04000
305#define RATE_MCS_ANT_B_MSK 0x08000 346#define RATE_MCS_ANT_B_MSK 0x08000
306#define RATE_MCS_ANT_C_MSK 0x10000 347#define RATE_MCS_ANT_C_MSK 0x10000
307#define RATE_MCS_ANT_ABC_MSK 0x1C000 348#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
349#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
308#define RATE_ANT_NUM 3 350#define RATE_ANT_NUM 3
309 351
310#define POWER_TABLE_NUM_ENTRIES 33 352#define POWER_TABLE_NUM_ENTRIES 33
@@ -492,8 +534,6 @@ struct iwl_alive_resp {
492 __le32 is_valid; 534 __le32 is_valid;
493} __attribute__ ((packed)); 535} __attribute__ ((packed));
494 536
495
496
497/* 537/*
498 * REPLY_ERROR = 0x2 (response only, not a command) 538 * REPLY_ERROR = 0x2 (response only, not a command)
499 */ 539 */
@@ -525,6 +565,7 @@ enum {
525 565
526 566
527#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) 567#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
568#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
528#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) 569#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
529#define RXON_RX_CHAIN_VALID_POS (1) 570#define RXON_RX_CHAIN_VALID_POS (1)
530#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) 571#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
@@ -611,6 +652,26 @@ enum {
611 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), 652 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
612 * regardless of whether RXON_FILTER_ASSOC_MSK is set. 653 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
613 */ 654 */
655
656struct iwl3945_rxon_cmd {
657 u8 node_addr[6];
658 __le16 reserved1;
659 u8 bssid_addr[6];
660 __le16 reserved2;
661 u8 wlap_bssid_addr[6];
662 __le16 reserved3;
663 u8 dev_type;
664 u8 air_propagation;
665 __le16 reserved4;
666 u8 ofdm_basic_rates;
667 u8 cck_basic_rates;
668 __le16 assoc_id;
669 __le32 flags;
670 __le32 filter_flags;
671 __le16 channel;
672 __le16 reserved5;
673} __attribute__ ((packed));
674
614struct iwl4965_rxon_cmd { 675struct iwl4965_rxon_cmd {
615 u8 node_addr[6]; 676 u8 node_addr[6];
616 __le16 reserved1; 677 __le16 reserved1;
@@ -656,33 +717,41 @@ struct iwl_rxon_cmd {
656 __le16 reserved6; 717 __le16 reserved6;
657} __attribute__ ((packed)); 718} __attribute__ ((packed));
658 719
659struct iwl5000_rxon_assoc_cmd { 720/*
721 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
722 */
723struct iwl3945_rxon_assoc_cmd {
724 __le32 flags;
725 __le32 filter_flags;
726 u8 ofdm_basic_rates;
727 u8 cck_basic_rates;
728 __le16 reserved;
729} __attribute__ ((packed));
730
731struct iwl4965_rxon_assoc_cmd {
660 __le32 flags; 732 __le32 flags;
661 __le32 filter_flags; 733 __le32 filter_flags;
662 u8 ofdm_basic_rates; 734 u8 ofdm_basic_rates;
663 u8 cck_basic_rates; 735 u8 cck_basic_rates;
664 __le16 reserved1;
665 u8 ofdm_ht_single_stream_basic_rates; 736 u8 ofdm_ht_single_stream_basic_rates;
666 u8 ofdm_ht_dual_stream_basic_rates; 737 u8 ofdm_ht_dual_stream_basic_rates;
667 u8 ofdm_ht_triple_stream_basic_rates;
668 u8 reserved2;
669 __le16 rx_chain_select_flags; 738 __le16 rx_chain_select_flags;
670 __le16 acquisition_data; 739 __le16 reserved;
671 __le32 reserved3;
672} __attribute__ ((packed)); 740} __attribute__ ((packed));
673 741
674/* 742struct iwl5000_rxon_assoc_cmd {
675 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
676 */
677struct iwl4965_rxon_assoc_cmd {
678 __le32 flags; 743 __le32 flags;
679 __le32 filter_flags; 744 __le32 filter_flags;
680 u8 ofdm_basic_rates; 745 u8 ofdm_basic_rates;
681 u8 cck_basic_rates; 746 u8 cck_basic_rates;
747 __le16 reserved1;
682 u8 ofdm_ht_single_stream_basic_rates; 748 u8 ofdm_ht_single_stream_basic_rates;
683 u8 ofdm_ht_dual_stream_basic_rates; 749 u8 ofdm_ht_dual_stream_basic_rates;
750 u8 ofdm_ht_triple_stream_basic_rates;
751 u8 reserved2;
684 __le16 rx_chain_select_flags; 752 __le16 rx_chain_select_flags;
685 __le16 reserved; 753 __le16 acquisition_data;
754 __le32 reserved3;
686} __attribute__ ((packed)); 755} __attribute__ ((packed));
687 756
688#define IWL_CONN_MAX_LISTEN_INTERVAL 10 757#define IWL_CONN_MAX_LISTEN_INTERVAL 10
@@ -702,6 +771,16 @@ struct iwl_rxon_time_cmd {
702/* 771/*
703 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 772 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
704 */ 773 */
774struct iwl3945_channel_switch_cmd {
775 u8 band;
776 u8 expect_beacon;
777 __le16 channel;
778 __le32 rxon_flags;
779 __le32 rxon_filter_flags;
780 __le32 switch_time;
781 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
782} __attribute__ ((packed));
783
705struct iwl_channel_switch_cmd { 784struct iwl_channel_switch_cmd {
706 u8 band; 785 u8 band;
707 u8 expect_beacon; 786 u8 expect_beacon;
@@ -783,6 +862,8 @@ struct iwl_qosparam_cmd {
783#define IWL_AP_ID 0 862#define IWL_AP_ID 0
784#define IWL_MULTICAST_ID 1 863#define IWL_MULTICAST_ID 1
785#define IWL_STA_ID 2 864#define IWL_STA_ID 2
865#define IWL3945_BROADCAST_ID 24
866#define IWL3945_STATION_COUNT 25
786#define IWL4965_BROADCAST_ID 31 867#define IWL4965_BROADCAST_ID 31
787#define IWL4965_STATION_COUNT 32 868#define IWL4965_STATION_COUNT 32
788#define IWL5000_BROADCAST_ID 15 869#define IWL5000_BROADCAST_ID 15
@@ -791,6 +872,8 @@ struct iwl_qosparam_cmd {
791#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 872#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
792#define IWL_INVALID_STATION 255 873#define IWL_INVALID_STATION 255
793 874
875#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2);
876#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8);
794#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8); 877#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8);
795#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) 878#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
796#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) 879#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
@@ -901,6 +984,35 @@ struct sta_id_modify {
901 * used as AP, or in an IBSS network, driver must set up station table 984 * used as AP, or in an IBSS network, driver must set up station table
902 * entries for all STAs in network, starting with index IWL_STA_ID. 985 * entries for all STAs in network, starting with index IWL_STA_ID.
903 */ 986 */
987
988struct iwl3945_addsta_cmd {
989 u8 mode; /* 1: modify existing, 0: add new station */
990 u8 reserved[3];
991 struct sta_id_modify sta;
992 struct iwl4965_keyinfo key;
993 __le32 station_flags; /* STA_FLG_* */
994 __le32 station_flags_msk; /* STA_FLG_* */
995
996 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
997 * corresponding to bit (e.g. bit 5 controls TID 5).
998 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
999 __le16 tid_disable_tx;
1000
1001 __le16 rate_n_flags;
1002
1003 /* TID for which to add block-ack support.
1004 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1005 u8 add_immediate_ba_tid;
1006
1007 /* TID for which to remove block-ack support.
1008 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1009 u8 remove_immediate_ba_tid;
1010
1011 /* Starting Sequence Number for added block-ack support.
1012 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1013 __le16 add_immediate_ba_ssn;
1014} __attribute__ ((packed));
1015
904struct iwl4965_addsta_cmd { 1016struct iwl4965_addsta_cmd {
905 u8 mode; /* 1: modify existing, 0: add new station */ 1017 u8 mode; /* 1: modify existing, 0: add new station */
906 u8 reserved[3]; 1018 u8 reserved[3];
@@ -1054,6 +1166,48 @@ struct iwl_wep_cmd {
1054#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) 1166#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1055#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1167#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1056 1168
1169
1170struct iwl3945_rx_frame_stats {
1171 u8 phy_count;
1172 u8 id;
1173 u8 rssi;
1174 u8 agc;
1175 __le16 sig_avg;
1176 __le16 noise_diff;
1177 u8 payload[0];
1178} __attribute__ ((packed));
1179
1180struct iwl3945_rx_frame_hdr {
1181 __le16 channel;
1182 __le16 phy_flags;
1183 u8 reserved1;
1184 u8 rate;
1185 __le16 len;
1186 u8 payload[0];
1187} __attribute__ ((packed));
1188
1189struct iwl3945_rx_frame_end {
1190 __le32 status;
1191 __le64 timestamp;
1192 __le32 beacon_timestamp;
1193} __attribute__ ((packed));
1194
1195/*
1196 * REPLY_3945_RX = 0x1b (response only, not a command)
1197 *
1198 * NOTE: DO NOT dereference from casts to this structure
1199 * It is provided only for calculating minimum data set size.
1200 * The actual offsets of the hdr and end are dynamic based on
1201 * stats.phy_count
1202 */
1203struct iwl3945_rx_frame {
1204 struct iwl3945_rx_frame_stats stats;
1205 struct iwl3945_rx_frame_hdr hdr;
1206 struct iwl3945_rx_frame_end end;
1207} __attribute__ ((packed));
1208
1209#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1210
1057/* Fixed (non-configurable) rx data from phy */ 1211/* Fixed (non-configurable) rx data from phy */
1058 1212
1059#define IWL49_RX_RES_PHY_CNT 14 1213#define IWL49_RX_RES_PHY_CNT 14
@@ -1234,6 +1388,84 @@ struct iwl4965_rx_mpdu_res_start {
1234#define TKIP_ICV_LEN 4 1388#define TKIP_ICV_LEN 4
1235 1389
1236/* 1390/*
1391 * REPLY_TX = 0x1c (command)
1392 */
1393
1394struct iwl3945_tx_cmd {
1395 /*
1396 * MPDU byte count:
1397 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1398 * + 8 byte IV for CCM or TKIP (not used for WEP)
1399 * + Data payload
1400 * + 8-byte MIC (not used for CCM/WEP)
1401 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1402 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1403 * Range: 14-2342 bytes.
1404 */
1405 __le16 len;
1406
1407 /*
1408 * MPDU or MSDU byte count for next frame.
1409 * Used for fragmentation and bursting, but not 11n aggregation.
1410 * Same as "len", but for next frame. Set to 0 if not applicable.
1411 */
1412 __le16 next_frame_len;
1413
1414 __le32 tx_flags; /* TX_CMD_FLG_* */
1415
1416 u8 rate;
1417
1418 /* Index of recipient station in uCode's station table */
1419 u8 sta_id;
1420 u8 tid_tspec;
1421 u8 sec_ctl;
1422 u8 key[16];
1423 union {
1424 u8 byte[8];
1425 __le16 word[4];
1426 __le32 dw[2];
1427 } tkip_mic;
1428 __le32 next_frame_info;
1429 union {
1430 __le32 life_time;
1431 __le32 attempt;
1432 } stop_time;
1433 u8 supp_rates[2];
1434 u8 rts_retry_limit; /*byte 50 */
1435 u8 data_retry_limit; /*byte 51 */
1436 union {
1437 __le16 pm_frame_timeout;
1438 __le16 attempt_duration;
1439 } timeout;
1440
1441 /*
1442 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1443 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1444 */
1445 __le16 driver_txop;
1446
1447 /*
1448 * MAC header goes here, followed by 2 bytes padding if MAC header
1449 * length is 26 or 30 bytes, followed by payload data
1450 */
1451 u8 payload[0];
1452 struct ieee80211_hdr hdr[0];
1453} __attribute__ ((packed));
1454
1455/*
1456 * REPLY_TX = 0x1c (response)
1457 */
1458struct iwl3945_tx_resp {
1459 u8 failure_rts;
1460 u8 failure_frame;
1461 u8 bt_kill_count;
1462 u8 rate;
1463 __le32 wireless_media_time;
1464 __le32 status; /* TX status */
1465} __attribute__ ((packed));
1466
1467
1468/*
1237 * 4965 uCode updates these Tx attempt count values in host DRAM. 1469 * 4965 uCode updates these Tx attempt count values in host DRAM.
1238 * Used for managing Tx retries when expecting block-acks. 1470 * Used for managing Tx retries when expecting block-acks.
1239 * Driver should set these fields to 0. 1471 * Driver should set these fields to 0.
@@ -1244,9 +1476,6 @@ struct iwl_dram_scratch {
1244 __le16 reserved; 1476 __le16 reserved;
1245} __attribute__ ((packed)); 1477} __attribute__ ((packed));
1246 1478
1247/*
1248 * REPLY_TX = 0x1c (command)
1249 */
1250struct iwl_tx_cmd { 1479struct iwl_tx_cmd {
1251 /* 1480 /*
1252 * MPDU byte count: 1481 * MPDU byte count:
@@ -1584,6 +1813,14 @@ struct iwl_compressed_ba_resp {
1584 * 1813 *
1585 * See details under "TXPOWER" in iwl-4965-hw.h. 1814 * See details under "TXPOWER" in iwl-4965-hw.h.
1586 */ 1815 */
1816
1817struct iwl3945_txpowertable_cmd {
1818 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1819 u8 reserved;
1820 __le16 channel;
1821 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1822} __attribute__ ((packed));
1823
1587struct iwl4965_txpowertable_cmd { 1824struct iwl4965_txpowertable_cmd {
1588 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1825 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1589 u8 reserved; 1826 u8 reserved;
@@ -1591,6 +1828,35 @@ struct iwl4965_txpowertable_cmd {
1591 struct iwl4965_tx_power_db tx_power; 1828 struct iwl4965_tx_power_db tx_power;
1592} __attribute__ ((packed)); 1829} __attribute__ ((packed));
1593 1830
1831
1832/**
1833 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1834 *
1835 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1836 *
1837 * NOTE: The table of rates passed to the uCode via the
1838 * RATE_SCALE command sets up the corresponding order of
1839 * rates used for all related commands, including rate
1840 * masks, etc.
1841 *
1842 * For example, if you set 9MB (PLCP 0x0f) as the first
1843 * rate in the rate table, the bit mask for that rate
1844 * when passed through ofdm_basic_rates on the REPLY_RXON
1845 * command would be bit 0 (1 << 0)
1846 */
1847struct iwl3945_rate_scaling_info {
1848 __le16 rate_n_flags;
1849 u8 try_cnt;
1850 u8 next_rate_index;
1851} __attribute__ ((packed));
1852
1853struct iwl3945_rate_scaling_cmd {
1854 u8 table_id;
1855 u8 reserved[3];
1856 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1857} __attribute__ ((packed));
1858
1859
1594/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1860/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1595#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) 1861#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1596 1862
@@ -2044,15 +2310,23 @@ struct iwl_spectrum_notification {
2044 */ 2310 */
2045#define IWL_POWER_VEC_SIZE 5 2311#define IWL_POWER_VEC_SIZE 5
2046 2312
2047#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(1 << 0) 2313#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2048#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(1 << 2) 2314#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2049#define IWL_POWER_PCI_PM_MSK cpu_to_le16(1 << 3) 2315#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2050#define IWL_POWER_FAST_PD cpu_to_le16(1 << 4) 2316#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2317
2318struct iwl3945_powertable_cmd {
2319 __le16 flags;
2320 u8 reserved[2];
2321 __le32 rx_data_timeout;
2322 __le32 tx_data_timeout;
2323 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2324} __attribute__ ((packed));
2051 2325
2052struct iwl_powertable_cmd { 2326struct iwl_powertable_cmd {
2053 __le16 flags; 2327 __le16 flags;
2054 u8 keep_alive_seconds; 2328 u8 keep_alive_seconds; /* 3945 reserved */
2055 u8 debug_flags; 2329 u8 debug_flags; /* 3945 reserved */
2056 __le32 rx_data_timeout; 2330 __le32 rx_data_timeout;
2057 __le32 tx_data_timeout; 2331 __le32 tx_data_timeout;
2058 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2332 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2143,6 +2417,26 @@ struct iwl_ct_kill_config {
2143 * passive_dwell < max_out_time 2417 * passive_dwell < max_out_time
2144 * active_dwell < max_out_time 2418 * active_dwell < max_out_time
2145 */ 2419 */
2420
2421/* FIXME: rename to AP1, remove tpc */
2422struct iwl3945_scan_channel {
2423 /*
2424 * type is defined as:
2425 * 0:0 1 = active, 0 = passive
2426 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2427 * SSID IE is transmitted in probe request.
2428 * 5:7 reserved
2429 */
2430 u8 type;
2431 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2432 struct iwl3945_tx_power tpc;
2433 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2434 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2435} __attribute__ ((packed));
2436
2437/* set number of direct probes u8 type */
2438#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2439
2146struct iwl_scan_channel { 2440struct iwl_scan_channel {
2147 /* 2441 /*
2148 * type is defined as: 2442 * type is defined as:
@@ -2159,6 +2453,9 @@ struct iwl_scan_channel {
2159 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2453 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2160} __attribute__ ((packed)); 2454} __attribute__ ((packed));
2161 2455
2456/* set number of direct probes __le32 type */
2457#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2458
2162/** 2459/**
2163 * struct iwl_ssid_ie - directed scan network information element 2460 * struct iwl_ssid_ie - directed scan network information element
2164 * 2461 *
@@ -2172,6 +2469,7 @@ struct iwl_ssid_ie {
2172 u8 ssid[32]; 2469 u8 ssid[32];
2173} __attribute__ ((packed)); 2470} __attribute__ ((packed));
2174 2471
2472#define PROBE_OPTION_MAX_API1 0x4
2175#define PROBE_OPTION_MAX 0x14 2473#define PROBE_OPTION_MAX 0x14
2176#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2474#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2177#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2475#define IWL_GOOD_CRC_TH cpu_to_le16(1)
@@ -2229,6 +2527,51 @@ struct iwl_ssid_ie {
2229 * To avoid uCode errors, see timing restrictions described under 2527 * To avoid uCode errors, see timing restrictions described under
2230 * struct iwl_scan_channel. 2528 * struct iwl_scan_channel.
2231 */ 2529 */
2530
2531struct iwl3945_scan_cmd {
2532 __le16 len;
2533 u8 reserved0;
2534 u8 channel_count; /* # channels in channel list */
2535 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2536 * (only for active scan) */
2537 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2538 __le16 good_CRC_th; /* passive -> active promotion threshold */
2539 __le16 reserved1;
2540 __le32 max_out_time; /* max usec to be away from associated (service)
2541 * channel */
2542 __le32 suspend_time; /* pause scan this long (in "extended beacon
2543 * format") when returning to service channel:
2544 * 3945; 31:24 # beacons, 19:0 additional usec,
2545 * 4965; 31:22 # beacons, 21:0 additional usec.
2546 */
2547 __le32 flags; /* RXON_FLG_* */
2548 __le32 filter_flags; /* RXON_FILTER_* */
2549
2550 /* For active scans (set to all-0s for passive scans).
2551 * Does not include payload. Must specify Tx rate; no rate scaling. */
2552 struct iwl3945_tx_cmd tx_cmd;
2553
2554 /* For directed active scans (set to all-0s otherwise) */
2555 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_API1];
2556
2557 /*
2558 * Probe request frame, followed by channel list.
2559 *
2560 * Size of probe request frame is specified by byte count in tx_cmd.
2561 * Channel list follows immediately after probe request frame.
2562 * Number of channels in list is specified by channel_count.
2563 * Each channel in list is of type:
2564 *
2565 * struct iwl3945_scan_channel channels[0];
2566 *
2567 * NOTE: Only one band of channels can be scanned per pass. You
2568 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2569 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2570 * before requesting another scan.
2571 */
2572 u8 data[0];
2573} __attribute__ ((packed));
2574
2232struct iwl_scan_cmd { 2575struct iwl_scan_cmd {
2233 __le16 len; 2576 __le16 len;
2234 u8 reserved0; 2577 u8 reserved0;
@@ -2336,6 +2679,14 @@ struct iwl_scancomplete_notification {
2336/* 2679/*
2337 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 2680 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2338 */ 2681 */
2682
2683struct iwl3945_beacon_notif {
2684 struct iwl3945_tx_resp beacon_notify_hdr;
2685 __le32 low_tsf;
2686 __le32 high_tsf;
2687 __le32 ibss_mgr_status;
2688} __attribute__ ((packed));
2689
2339struct iwl4965_beacon_notif { 2690struct iwl4965_beacon_notif {
2340 struct iwl4965_tx_resp beacon_notify_hdr; 2691 struct iwl4965_tx_resp beacon_notify_hdr;
2341 __le32 low_tsf; 2692 __le32 low_tsf;
@@ -2346,6 +2697,15 @@ struct iwl4965_beacon_notif {
2346/* 2697/*
2347 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2698 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2348 */ 2699 */
2700
2701struct iwl3945_tx_beacon_cmd {
2702 struct iwl3945_tx_cmd tx;
2703 __le16 tim_idx;
2704 u8 tim_size;
2705 u8 reserved1;
2706 struct ieee80211_hdr frame[0]; /* beacon frame */
2707} __attribute__ ((packed));
2708
2349struct iwl_tx_beacon_cmd { 2709struct iwl_tx_beacon_cmd {
2350 struct iwl_tx_cmd tx; 2710 struct iwl_tx_cmd tx;
2351 __le16 tim_idx; 2711 __le16 tim_idx;
@@ -2382,6 +2742,76 @@ struct rate_histogram {
2382 2742
2383/* statistics command response */ 2743/* statistics command response */
2384 2744
2745struct iwl39_statistics_rx_phy {
2746 __le32 ina_cnt;
2747 __le32 fina_cnt;
2748 __le32 plcp_err;
2749 __le32 crc32_err;
2750 __le32 overrun_err;
2751 __le32 early_overrun_err;
2752 __le32 crc32_good;
2753 __le32 false_alarm_cnt;
2754 __le32 fina_sync_err_cnt;
2755 __le32 sfd_timeout;
2756 __le32 fina_timeout;
2757 __le32 unresponded_rts;
2758 __le32 rxe_frame_limit_overrun;
2759 __le32 sent_ack_cnt;
2760 __le32 sent_cts_cnt;
2761} __attribute__ ((packed));
2762
2763struct iwl39_statistics_rx_non_phy {
2764 __le32 bogus_cts; /* CTS received when not expecting CTS */
2765 __le32 bogus_ack; /* ACK received when not expecting ACK */
2766 __le32 non_bssid_frames; /* number of frames with BSSID that
2767 * doesn't belong to the STA BSSID */
2768 __le32 filtered_frames; /* count frames that were dumped in the
2769 * filtering process */
2770 __le32 non_channel_beacons; /* beacons with our bss id but not on
2771 * our serving channel */
2772} __attribute__ ((packed));
2773
2774struct iwl39_statistics_rx {
2775 struct iwl39_statistics_rx_phy ofdm;
2776 struct iwl39_statistics_rx_phy cck;
2777 struct iwl39_statistics_rx_non_phy general;
2778} __attribute__ ((packed));
2779
2780struct iwl39_statistics_tx {
2781 __le32 preamble_cnt;
2782 __le32 rx_detected_cnt;
2783 __le32 bt_prio_defer_cnt;
2784 __le32 bt_prio_kill_cnt;
2785 __le32 few_bytes_cnt;
2786 __le32 cts_timeout;
2787 __le32 ack_timeout;
2788 __le32 expected_ack_cnt;
2789 __le32 actual_ack_cnt;
2790} __attribute__ ((packed));
2791
2792struct statistics_dbg {
2793 __le32 burst_check;
2794 __le32 burst_count;
2795 __le32 reserved[4];
2796} __attribute__ ((packed));
2797
2798struct iwl39_statistics_div {
2799 __le32 tx_on_a;
2800 __le32 tx_on_b;
2801 __le32 exec_time;
2802 __le32 probe_time;
2803} __attribute__ ((packed));
2804
2805struct iwl39_statistics_general {
2806 __le32 temperature;
2807 struct statistics_dbg dbg;
2808 __le32 sleep_time;
2809 __le32 slots_out;
2810 __le32 slots_idle;
2811 __le32 ttl_timestamp;
2812 struct iwl39_statistics_div div;
2813} __attribute__ ((packed));
2814
2385struct statistics_rx_phy { 2815struct statistics_rx_phy {
2386 __le32 ina_cnt; 2816 __le32 ina_cnt;
2387 __le32 fina_cnt; 2817 __le32 fina_cnt;
@@ -2418,7 +2848,7 @@ struct statistics_rx_ht_phy {
2418 __le32 reserved2; 2848 __le32 reserved2;
2419} __attribute__ ((packed)); 2849} __attribute__ ((packed));
2420 2850
2421#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) 2851#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2422 2852
2423struct statistics_rx_non_phy { 2853struct statistics_rx_non_phy {
2424 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2854 __le32 bogus_cts; /* CTS received when not expecting CTS */
@@ -2493,11 +2923,6 @@ struct statistics_tx {
2493 struct statistics_tx_non_phy_agg agg; 2923 struct statistics_tx_non_phy_agg agg;
2494} __attribute__ ((packed)); 2924} __attribute__ ((packed));
2495 2925
2496struct statistics_dbg {
2497 __le32 burst_check;
2498 __le32 burst_count;
2499 __le32 reserved[4];
2500} __attribute__ ((packed));
2501 2926
2502struct statistics_div { 2927struct statistics_div {
2503 __le32 tx_on_a; 2928 __le32 tx_on_a;
@@ -2561,6 +2986,14 @@ struct iwl_statistics_cmd {
2561 */ 2986 */
2562#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) 2987#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2563#define STATISTICS_REPLY_FLG_FAT_MODE_MSK cpu_to_le32(0x8) 2988#define STATISTICS_REPLY_FLG_FAT_MODE_MSK cpu_to_le32(0x8)
2989
2990struct iwl3945_notif_statistics {
2991 __le32 flag;
2992 struct iwl39_statistics_rx rx;
2993 struct iwl39_statistics_tx tx;
2994 struct iwl39_statistics_general general;
2995} __attribute__ ((packed));
2996
2564struct iwl_notif_statistics { 2997struct iwl_notif_statistics {
2565 __le32 flag; 2998 __le32 flag;
2566 struct statistics_rx rx; 2999 struct statistics_rx rx;
@@ -3012,6 +3445,10 @@ struct iwl_rx_packet {
3012 __le32 len; 3445 __le32 len;
3013 struct iwl_cmd_header hdr; 3446 struct iwl_cmd_header hdr;
3014 union { 3447 union {
3448 struct iwl3945_rx_frame rx_frame;
3449 struct iwl3945_tx_resp tx_resp;
3450 struct iwl3945_beacon_notif beacon_status;
3451
3015 struct iwl_alive_resp alive_frame; 3452 struct iwl_alive_resp alive_frame;
3016 struct iwl_spectrum_notification spectrum_notif; 3453 struct iwl_spectrum_notification spectrum_notif;
3017 struct iwl_csa_notification csa_notif; 3454 struct iwl_csa_notification csa_notif;
@@ -3029,6 +3466,6 @@ struct iwl_rx_packet {
3029 } u; 3466 } u;
3030} __attribute__ ((packed)); 3467} __attribute__ ((packed));
3031 3468
3032int iwl_agn_check_rxon_cmd(struct iwl_rxon_cmd *rxon); 3469int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
3033 3470
3034#endif /* __iwl_commands_h__ */ 3471#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 73d7973707e..260bf903cb7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +28,7 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/etherdevice.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32 33
33#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
@@ -170,7 +171,8 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
170 struct ieee80211_hw *hw = 171 struct ieee80211_hw *hw =
171 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); 172 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
172 if (hw == NULL) { 173 if (hw == NULL) {
173 IWL_ERROR("Can not allocate network device\n"); 174 printk(KERN_ERR "%s: Can not allocate network device\n",
175 cfg->name);
174 goto out; 176 goto out;
175 } 177 }
176 178
@@ -210,7 +212,7 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
210 if (!rxq->bd) { 212 if (!rxq->bd) {
211 ret = iwl_rx_queue_alloc(priv); 213 ret = iwl_rx_queue_alloc(priv);
212 if (ret) { 214 if (ret) {
213 IWL_ERROR("Unable to initialize Rx queue\n"); 215 IWL_ERR(priv, "Unable to initialize Rx queue\n");
214 return -ENOMEM; 216 return -ENOMEM;
215 } 217 }
216 } else 218 } else
@@ -321,7 +323,7 @@ void iwl_reset_qos(struct iwl_priv *priv)
321 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; 323 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
322 } 324 }
323 } 325 }
324 IWL_DEBUG_QOS("set QoS to default \n"); 326 IWL_DEBUG_QOS(priv, "set QoS to default \n");
325 327
326 spin_unlock_irqrestore(&priv->lock, flags); 328 spin_unlock_irqrestore(&priv->lock, flags);
327} 329}
@@ -402,10 +404,11 @@ static void iwlcore_init_hw_rates(struct iwl_priv *priv,
402 } 404 }
403} 405}
404 406
407
405/** 408/**
406 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom 409 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
407 */ 410 */
408static int iwlcore_init_geos(struct iwl_priv *priv) 411int iwlcore_init_geos(struct iwl_priv *priv)
409{ 412{
410 struct iwl_channel_info *ch; 413 struct iwl_channel_info *ch;
411 struct ieee80211_supported_band *sband; 414 struct ieee80211_supported_band *sband;
@@ -416,7 +419,7 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
416 419
417 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 420 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
418 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 421 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
419 IWL_DEBUG_INFO("Geography modes already initialized.\n"); 422 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
420 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 423 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
421 return 0; 424 return 0;
422 } 425 }
@@ -457,8 +460,6 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
457 priv->ieee_channels = channels; 460 priv->ieee_channels = channels;
458 priv->ieee_rates = rates; 461 priv->ieee_rates = rates;
459 462
460 iwlcore_init_hw_rates(priv, rates);
461
462 for (i = 0; i < priv->channel_count; i++) { 463 for (i = 0; i < priv->channel_count; i++) {
463 ch = &priv->channel_info[i]; 464 ch = &priv->channel_info[i];
464 465
@@ -500,7 +501,7 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
500 /* Save flags for reg domain usage */ 501 /* Save flags for reg domain usage */
501 geo_ch->orig_flags = geo_ch->flags; 502 geo_ch->orig_flags = geo_ch->flags;
502 503
503 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", 504 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
504 ch->channel, geo_ch->center_freq, 505 ch->channel, geo_ch->center_freq,
505 is_channel_a_band(ch) ? "5.2" : "2.4", 506 is_channel_a_band(ch) ? "5.2" : "2.4",
506 geo_ch->flags & IEEE80211_CHAN_DISABLED ? 507 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
@@ -510,33 +511,33 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
510 511
511 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 512 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
512 priv->cfg->sku & IWL_SKU_A) { 513 priv->cfg->sku & IWL_SKU_A) {
513 printk(KERN_INFO DRV_NAME 514 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
514 ": Incorrectly detected BG card as ABG. Please send " 515 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
515 "your PCI ID 0x%04X:0x%04X to maintainer.\n", 516 priv->pci_dev->device,
516 priv->pci_dev->device, priv->pci_dev->subsystem_device); 517 priv->pci_dev->subsystem_device);
517 priv->cfg->sku &= ~IWL_SKU_A; 518 priv->cfg->sku &= ~IWL_SKU_A;
518 } 519 }
519 520
520 printk(KERN_INFO DRV_NAME 521 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
521 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", 522 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
522 priv->bands[IEEE80211_BAND_2GHZ].n_channels, 523 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
523 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
524
525 524
526 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 525 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
527 526
528 return 0; 527 return 0;
529} 528}
529EXPORT_SYMBOL(iwlcore_init_geos);
530 530
531/* 531/*
532 * iwlcore_free_geos - undo allocations in iwlcore_init_geos 532 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
533 */ 533 */
534static void iwlcore_free_geos(struct iwl_priv *priv) 534void iwlcore_free_geos(struct iwl_priv *priv)
535{ 535{
536 kfree(priv->ieee_channels); 536 kfree(priv->ieee_channels);
537 kfree(priv->ieee_rates); 537 kfree(priv->ieee_rates);
538 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 538 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
539} 539}
540EXPORT_SYMBOL(iwlcore_free_geos);
540 541
541static bool is_single_rx_stream(struct iwl_priv *priv) 542static bool is_single_rx_stream(struct iwl_priv *priv)
542{ 543{
@@ -587,6 +588,167 @@ u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
587} 588}
588EXPORT_SYMBOL(iwl_is_fat_tx_allowed); 589EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
589 590
591void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
592{
593 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
594
595 if (hw_decrypt)
596 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
597 else
598 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
599
600}
601EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
602
603/**
604 * iwl_check_rxon_cmd - validate RXON structure is valid
605 *
606 * NOTE: This is really only useful during development and can eventually
607 * be #ifdef'd out once the driver is stable and folks aren't actively
608 * making changes
609 */
610int iwl_check_rxon_cmd(struct iwl_priv *priv)
611{
612 int error = 0;
613 int counter = 1;
614 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
615
616 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
617 error |= le32_to_cpu(rxon->flags &
618 (RXON_FLG_TGJ_NARROW_BAND_MSK |
619 RXON_FLG_RADAR_DETECT_MSK));
620 if (error)
621 IWL_WARN(priv, "check 24G fields %d | %d\n",
622 counter++, error);
623 } else {
624 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
625 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
626 if (error)
627 IWL_WARN(priv, "check 52 fields %d | %d\n",
628 counter++, error);
629 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
630 if (error)
631 IWL_WARN(priv, "check 52 CCK %d | %d\n",
632 counter++, error);
633 }
634 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
635 if (error)
636 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
637
638 /* make sure basic rates 6Mbps and 1Mbps are supported */
639 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
640 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
641 if (error)
642 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
643
644 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
645 if (error)
646 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
647
648 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
649 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
650 if (error)
651 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
652 counter++, error);
653
654 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
655 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
656 if (error)
657 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
658 counter++, error);
659
660 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
661 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
662 if (error)
663 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
664 counter++, error);
665
666 if (error)
667 IWL_WARN(priv, "Tuning to channel %d\n",
668 le16_to_cpu(rxon->channel));
669
670 if (error) {
671 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n");
672 return -1;
673 }
674 return 0;
675}
676EXPORT_SYMBOL(iwl_check_rxon_cmd);
677
678/**
679 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
680 * @priv: staging_rxon is compared to active_rxon
681 *
682 * If the RXON structure is changing enough to require a new tune,
683 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
684 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
685 */
686int iwl_full_rxon_required(struct iwl_priv *priv)
687{
688
689 /* These items are only settable from the full RXON command */
690 if (!(iwl_is_associated(priv)) ||
691 compare_ether_addr(priv->staging_rxon.bssid_addr,
692 priv->active_rxon.bssid_addr) ||
693 compare_ether_addr(priv->staging_rxon.node_addr,
694 priv->active_rxon.node_addr) ||
695 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
696 priv->active_rxon.wlap_bssid_addr) ||
697 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
698 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
699 (priv->staging_rxon.air_propagation !=
700 priv->active_rxon.air_propagation) ||
701 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
702 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
703 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
704 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
705 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
706 return 1;
707
708 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
709 * be updated with the RXON_ASSOC command -- however only some
710 * flag transitions are allowed using RXON_ASSOC */
711
712 /* Check if we are not switching bands */
713 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
714 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
715 return 1;
716
717 /* Check if we are switching association toggle */
718 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
719 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
720 return 1;
721
722 return 0;
723}
724EXPORT_SYMBOL(iwl_full_rxon_required);
725
726u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
727{
728 int i;
729 int rate_mask;
730
731 /* Set rate mask*/
732 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
733 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
734 else
735 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
736
737 /* Find lowest valid rate */
738 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
739 i = iwl_rates[i].next_ieee) {
740 if (rate_mask & (1 << i))
741 return iwl_rates[i].plcp;
742 }
743
744 /* No valid rate was found. Assign the lowest one */
745 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
746 return IWL_RATE_1M_PLCP;
747 else
748 return IWL_RATE_6M_PLCP;
749}
750EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
751
590void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) 752void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
591{ 753{
592 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 754 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
@@ -628,7 +790,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
628 790
629 iwl_set_rxon_chain(priv); 791 iwl_set_rxon_chain(priv);
630 792
631 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X " 793 IWL_DEBUG_ASSOC(priv, "supported HT rate 0x%X 0x%X 0x%X "
632 "rxon flags 0x%X operation mode :0x%X " 794 "rxon flags 0x%X operation mode :0x%X "
633 "extension channel offset 0x%x\n", 795 "extension channel offset 0x%x\n",
634 ht_info->mcs.rx_mask[0], 796 ht_info->mcs.rx_mask[0],
@@ -679,7 +841,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
679 break; 841 break;
680 case WLAN_HT_CAP_SM_PS_INVALID: 842 case WLAN_HT_CAP_SM_PS_INVALID:
681 default: 843 default:
682 IWL_ERROR("invalid mimo ps mode %d\n", 844 IWL_ERR(priv, "invalid mimo ps mode %d\n",
683 priv->current_ht_config.sm_ps); 845 priv->current_ht_config.sm_ps);
684 WARN_ON(1); 846 WARN_ON(1);
685 idle_cnt = -1; 847 idle_cnt = -1;
@@ -700,6 +862,18 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
700} 862}
701 863
702/** 864/**
865 * iwl_is_monitor_mode - Determine if interface in monitor mode
866 *
867 * priv->iw_mode is set in add_interface, but add_interface is
868 * never called for monitor mode. The only way mac80211 informs us about
869 * monitor mode is through configuring filters (call to configure_filter).
870 */
871static bool iwl_is_monitor_mode(struct iwl_priv *priv)
872{
873 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
874}
875
876/**
703 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image 877 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
704 * 878 *
705 * Selects how many and which Rx receivers/antennas/chains to use. 879 * Selects how many and which Rx receivers/antennas/chains to use.
@@ -742,6 +916,19 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
742 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 916 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
743 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 917 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
744 918
919 /* copied from 'iwl_bg_request_scan()' */
920 /* Force use of chains B and C (0x6) for Rx for 4965
921 * Avoid A (0x1) because of its off-channel reception on A-band.
922 * MIMO is not used here, but value is required */
923 if (iwl_is_monitor_mode(priv) &&
924 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
925 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
926 rx_chain = 0x07 << RXON_RX_CHAIN_VALID_POS;
927 rx_chain |= 0x06 << RXON_RX_CHAIN_FORCE_SEL_POS;
928 rx_chain |= 0x07 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
929 rx_chain |= 0x01 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
930 }
931
745 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 932 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
746 933
747 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) 934 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -749,7 +936,7 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
749 else 936 else
750 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 937 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
751 938
752 IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n", 939 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
753 priv->staging_rxon.rx_chain, 940 priv->staging_rxon.rx_chain,
754 active_rx_cnt, idle_rx_cnt); 941 active_rx_cnt, idle_rx_cnt);
755 942
@@ -774,7 +961,7 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
774 u16 channel = ieee80211_frequency_to_channel(ch->center_freq); 961 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
775 962
776 if (!iwl_get_channel_info(priv, band, channel)) { 963 if (!iwl_get_channel_info(priv, band, channel)) {
777 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", 964 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
778 channel, band); 965 channel, band);
779 return -EINVAL; 966 return -EINVAL;
780 } 967 }
@@ -791,12 +978,283 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
791 978
792 priv->band = band; 979 priv->band = band;
793 980
794 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band); 981 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
795 982
796 return 0; 983 return 0;
797} 984}
798EXPORT_SYMBOL(iwl_set_rxon_channel); 985EXPORT_SYMBOL(iwl_set_rxon_channel);
799 986
987void iwl_set_flags_for_band(struct iwl_priv *priv,
988 enum ieee80211_band band)
989{
990 if (band == IEEE80211_BAND_5GHZ) {
991 priv->staging_rxon.flags &=
992 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
993 | RXON_FLG_CCK_MSK);
994 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
995 } else {
996 /* Copied from iwl_post_associate() */
997 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
998 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
999 else
1000 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1001
1002 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
1003 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1004
1005 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1006 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1007 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
1008 }
1009}
1010EXPORT_SYMBOL(iwl_set_flags_for_band);
1011
1012/*
1013 * initialize rxon structure with default values from eeprom
1014 */
1015void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1016{
1017 const struct iwl_channel_info *ch_info;
1018
1019 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1020
1021 switch (mode) {
1022 case NL80211_IFTYPE_AP:
1023 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
1024 break;
1025
1026 case NL80211_IFTYPE_STATION:
1027 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
1028 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
1029 break;
1030
1031 case NL80211_IFTYPE_ADHOC:
1032 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1033 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1034 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1035 RXON_FILTER_ACCEPT_GRP_MSK;
1036 break;
1037
1038 case NL80211_IFTYPE_MONITOR:
1039 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
1040 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
1041 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
1042 break;
1043 default:
1044 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
1045 break;
1046 }
1047
1048#if 0
1049 /* TODO: Figure out when short_preamble would be set and cache from
1050 * that */
1051 if (!hw_to_local(priv->hw)->short_preamble)
1052 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1053 else
1054 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1055#endif
1056
1057 ch_info = iwl_get_channel_info(priv, priv->band,
1058 le16_to_cpu(priv->active_rxon.channel));
1059
1060 if (!ch_info)
1061 ch_info = &priv->channel_info[0];
1062
1063 /*
1064 * in some case A channels are all non IBSS
1065 * in this case force B/G channel
1066 */
1067 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
1068 !(is_channel_ibss(ch_info)))
1069 ch_info = &priv->channel_info[0];
1070
1071 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1072 priv->band = ch_info->band;
1073
1074 iwl_set_flags_for_band(priv, priv->band);
1075
1076 priv->staging_rxon.ofdm_basic_rates =
1077 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1078 priv->staging_rxon.cck_basic_rates =
1079 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1080
1081 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
1082 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
1083 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1084 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1085 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1086 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1087}
1088EXPORT_SYMBOL(iwl_connection_init_rx_config);
1089
1090void iwl_set_rate(struct iwl_priv *priv)
1091{
1092 const struct ieee80211_supported_band *hw = NULL;
1093 struct ieee80211_rate *rate;
1094 int i;
1095
1096 hw = iwl_get_hw_mode(priv, priv->band);
1097 if (!hw) {
1098 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1099 return;
1100 }
1101
1102 priv->active_rate = 0;
1103 priv->active_rate_basic = 0;
1104
1105 for (i = 0; i < hw->n_bitrates; i++) {
1106 rate = &(hw->bitrates[i]);
1107 if (rate->hw_value < IWL_RATE_COUNT)
1108 priv->active_rate |= (1 << rate->hw_value);
1109 }
1110
1111 IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n",
1112 priv->active_rate, priv->active_rate_basic);
1113
1114 /*
1115 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
1116 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
1117 * OFDM
1118 */
1119 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
1120 priv->staging_rxon.cck_basic_rates =
1121 ((priv->active_rate_basic &
1122 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
1123 else
1124 priv->staging_rxon.cck_basic_rates =
1125 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1126
1127 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
1128 priv->staging_rxon.ofdm_basic_rates =
1129 ((priv->active_rate_basic &
1130 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
1131 IWL_FIRST_OFDM_RATE) & 0xFF;
1132 else
1133 priv->staging_rxon.ofdm_basic_rates =
1134 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1135}
1136EXPORT_SYMBOL(iwl_set_rate);
1137
1138void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1139{
1140 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1141 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1142 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1143 IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n",
1144 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
1145 rxon->channel = csa->channel;
1146 priv->staging_rxon.channel = csa->channel;
1147}
1148EXPORT_SYMBOL(iwl_rx_csa);
1149
1150#ifdef CONFIG_IWLWIFI_DEBUG
1151static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1152{
1153 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1154
1155 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
1156 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
1157 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1158 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1159 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
1160 le32_to_cpu(rxon->filter_flags));
1161 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
1162 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
1163 rxon->ofdm_basic_rates);
1164 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1165 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
1166 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1167 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1168}
1169#endif
1170
1171/**
1172 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1173 */
1174void iwl_irq_handle_error(struct iwl_priv *priv)
1175{
1176 /* Set the FW error flag -- cleared on iwl_down */
1177 set_bit(STATUS_FW_ERROR, &priv->status);
1178
1179 /* Cancel currently queued command. */
1180 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1181
1182#ifdef CONFIG_IWLWIFI_DEBUG
1183 if (priv->debug_level & IWL_DL_FW_ERRORS) {
1184 iwl_dump_nic_error_log(priv);
1185 iwl_dump_nic_event_log(priv);
1186 iwl_print_rx_config_cmd(priv);
1187 }
1188#endif
1189
1190 wake_up_interruptible(&priv->wait_command_queue);
1191
1192 /* Keep the restart process from trying to send host
1193 * commands by clearing the INIT status bit */
1194 clear_bit(STATUS_READY, &priv->status);
1195
1196 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1197 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
1198 "Restarting adapter due to uCode error.\n");
1199
1200 if (iwl_is_associated(priv)) {
1201 memcpy(&priv->recovery_rxon, &priv->active_rxon,
1202 sizeof(priv->recovery_rxon));
1203 priv->error_recovering = 1;
1204 }
1205 if (priv->cfg->mod_params->restart_fw)
1206 queue_work(priv->workqueue, &priv->restart);
1207 }
1208}
1209EXPORT_SYMBOL(iwl_irq_handle_error);
1210
1211void iwl_configure_filter(struct ieee80211_hw *hw,
1212 unsigned int changed_flags,
1213 unsigned int *total_flags,
1214 int mc_count, struct dev_addr_list *mc_list)
1215{
1216 struct iwl_priv *priv = hw->priv;
1217 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
1218
1219 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
1220 changed_flags, *total_flags);
1221
1222 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1223 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
1224 *filter_flags |= RXON_FILTER_PROMISC_MSK;
1225 else
1226 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
1227 }
1228 if (changed_flags & FIF_ALLMULTI) {
1229 if (*total_flags & FIF_ALLMULTI)
1230 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
1231 else
1232 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
1233 }
1234 if (changed_flags & FIF_CONTROL) {
1235 if (*total_flags & FIF_CONTROL)
1236 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
1237 else
1238 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
1239 }
1240 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
1241 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1242 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1243 else
1244 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
1245 }
1246
1247 /* We avoid iwl_commit_rxon here to commit the new filter flags
1248 * since mac80211 will call ieee80211_hw_config immediately.
1249 * (mc_list is not supported at this time). Otherwise, we need to
1250 * queue a background iwl_commit_rxon work.
1251 */
1252
1253 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1254 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1255}
1256EXPORT_SYMBOL(iwl_configure_filter);
1257
800int iwl_setup_mac(struct iwl_priv *priv) 1258int iwl_setup_mac(struct iwl_priv *priv)
801{ 1259{
802 int ret; 1260 int ret;
@@ -806,12 +1264,14 @@ int iwl_setup_mac(struct iwl_priv *priv)
806 /* Tell mac80211 our characteristics */ 1264 /* Tell mac80211 our characteristics */
807 hw->flags = IEEE80211_HW_SIGNAL_DBM | 1265 hw->flags = IEEE80211_HW_SIGNAL_DBM |
808 IEEE80211_HW_NOISE_DBM | 1266 IEEE80211_HW_NOISE_DBM |
809 IEEE80211_HW_AMPDU_AGGREGATION; 1267 IEEE80211_HW_AMPDU_AGGREGATION |
1268 IEEE80211_HW_SUPPORTS_PS;
810 hw->wiphy->interface_modes = 1269 hw->wiphy->interface_modes =
811 BIT(NL80211_IFTYPE_STATION) | 1270 BIT(NL80211_IFTYPE_STATION) |
812 BIT(NL80211_IFTYPE_ADHOC); 1271 BIT(NL80211_IFTYPE_ADHOC);
813 1272
814 hw->wiphy->fw_handles_regulatory = true; 1273 hw->wiphy->custom_regulatory = true;
1274 hw->wiphy->max_scan_ssids = 1;
815 1275
816 /* Default value; 4 EDCA QOS priorities */ 1276 /* Default value; 4 EDCA QOS priorities */
817 hw->queues = 4; 1277 hw->queues = 4;
@@ -831,7 +1291,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
831 1291
832 ret = ieee80211_register_hw(priv->hw); 1292 ret = ieee80211_register_hw(priv->hw);
833 if (ret) { 1293 if (ret) {
834 IWL_ERROR("Failed to register hw (error %d)\n", ret); 1294 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
835 return ret; 1295 return ret;
836 } 1296 }
837 priv->mac80211_registered = 1; 1297 priv->mac80211_registered = 1;
@@ -863,7 +1323,6 @@ int iwl_init_drv(struct iwl_priv *priv)
863{ 1323{
864 int ret; 1324 int ret;
865 1325
866 priv->retry_rate = 1;
867 priv->ibss_beacon = NULL; 1326 priv->ibss_beacon = NULL;
868 1327
869 spin_lock_init(&priv->lock); 1328 spin_lock_init(&priv->lock);
@@ -897,21 +1356,22 @@ int iwl_init_drv(struct iwl_priv *priv)
897 priv->qos_data.qos_cap.val = 0; 1356 priv->qos_data.qos_cap.val = 0;
898 1357
899 priv->rates_mask = IWL_RATES_MASK; 1358 priv->rates_mask = IWL_RATES_MASK;
900 /* If power management is turned on, default to AC mode */ 1359 /* If power management is turned on, default to CAM mode */
901 priv->power_mode = IWL_POWER_AC; 1360 priv->power_mode = IWL_POWER_MODE_CAM;
902 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX; 1361 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX;
903 1362
904 ret = iwl_init_channel_map(priv); 1363 ret = iwl_init_channel_map(priv);
905 if (ret) { 1364 if (ret) {
906 IWL_ERROR("initializing regulatory failed: %d\n", ret); 1365 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
907 goto err; 1366 goto err;
908 } 1367 }
909 1368
910 ret = iwlcore_init_geos(priv); 1369 ret = iwlcore_init_geos(priv);
911 if (ret) { 1370 if (ret) {
912 IWL_ERROR("initializing geos failed: %d\n", ret); 1371 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
913 goto err_free_channel_map; 1372 goto err_free_channel_map;
914 } 1373 }
1374 iwlcore_init_hw_rates(priv, priv->ieee_rates);
915 1375
916 return 0; 1376 return 0;
917 1377
@@ -926,13 +1386,13 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
926{ 1386{
927 int ret = 0; 1387 int ret = 0;
928 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { 1388 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
929 IWL_WARNING("Requested user TXPOWER %d below limit.\n", 1389 IWL_WARN(priv, "Requested user TXPOWER %d below limit.\n",
930 priv->tx_power_user_lmt); 1390 priv->tx_power_user_lmt);
931 return -EINVAL; 1391 return -EINVAL;
932 } 1392 }
933 1393
934 if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) { 1394 if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) {
935 IWL_WARNING("Requested user TXPOWER %d above limit.\n", 1395 IWL_WARN(priv, "Requested user TXPOWER %d above limit.\n",
936 priv->tx_power_user_lmt); 1396 priv->tx_power_user_lmt);
937 return -EINVAL; 1397 return -EINVAL;
938 } 1398 }
@@ -970,18 +1430,33 @@ void iwl_disable_interrupts(struct iwl_priv *priv)
970 * from uCode or flow handler (Rx/Tx DMA) */ 1430 * from uCode or flow handler (Rx/Tx DMA) */
971 iwl_write32(priv, CSR_INT, 0xffffffff); 1431 iwl_write32(priv, CSR_INT, 0xffffffff);
972 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); 1432 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
973 IWL_DEBUG_ISR("Disabled interrupts\n"); 1433 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
974} 1434}
975EXPORT_SYMBOL(iwl_disable_interrupts); 1435EXPORT_SYMBOL(iwl_disable_interrupts);
976 1436
977void iwl_enable_interrupts(struct iwl_priv *priv) 1437void iwl_enable_interrupts(struct iwl_priv *priv)
978{ 1438{
979 IWL_DEBUG_ISR("Enabling interrupts\n"); 1439 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
980 set_bit(STATUS_INT_ENABLED, &priv->status); 1440 set_bit(STATUS_INT_ENABLED, &priv->status);
981 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); 1441 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
982} 1442}
983EXPORT_SYMBOL(iwl_enable_interrupts); 1443EXPORT_SYMBOL(iwl_enable_interrupts);
984 1444
1445int iwl_send_bt_config(struct iwl_priv *priv)
1446{
1447 struct iwl_bt_cmd bt_cmd = {
1448 .flags = 3,
1449 .lead_time = 0xAA,
1450 .max_kill = 1,
1451 .kill_ack_mask = 0,
1452 .kill_cts_mask = 0,
1453 };
1454
1455 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1456 sizeof(struct iwl_bt_cmd), &bt_cmd);
1457}
1458EXPORT_SYMBOL(iwl_send_bt_config);
1459
985int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) 1460int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
986{ 1461{
987 u32 stat_flags = 0; 1462 u32 stat_flags = 0;
@@ -1007,7 +1482,7 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
1007 u32 errcnt = 0; 1482 u32 errcnt = 0;
1008 u32 i; 1483 u32 i;
1009 1484
1010 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 1485 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1011 1486
1012 ret = iwl_grab_nic_access(priv); 1487 ret = iwl_grab_nic_access(priv);
1013 if (ret) 1488 if (ret)
@@ -1018,7 +1493,7 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
1018 /* NOTE: Use the debugless read so we don't flood kernel log 1493 /* NOTE: Use the debugless read so we don't flood kernel log
1019 * if IWL_DL_IO is set */ 1494 * if IWL_DL_IO is set */
1020 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 1495 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1021 i + RTC_INST_LOWER_BOUND); 1496 i + IWL49_RTC_INST_LOWER_BOUND);
1022 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1497 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1023 if (val != le32_to_cpu(*image)) { 1498 if (val != le32_to_cpu(*image)) {
1024 ret = -EIO; 1499 ret = -EIO;
@@ -1045,13 +1520,14 @@ static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1045 int ret = 0; 1520 int ret = 0;
1046 u32 errcnt; 1521 u32 errcnt;
1047 1522
1048 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 1523 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1049 1524
1050 ret = iwl_grab_nic_access(priv); 1525 ret = iwl_grab_nic_access(priv);
1051 if (ret) 1526 if (ret)
1052 return ret; 1527 return ret;
1053 1528
1054 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); 1529 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1530 IWL49_RTC_INST_LOWER_BOUND);
1055 1531
1056 errcnt = 0; 1532 errcnt = 0;
1057 for (; len > 0; len -= sizeof(u32), image++) { 1533 for (; len > 0; len -= sizeof(u32), image++) {
@@ -1060,7 +1536,7 @@ static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1060 * if IWL_DL_IO is set */ 1536 * if IWL_DL_IO is set */
1061 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1537 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1062 if (val != le32_to_cpu(*image)) { 1538 if (val != le32_to_cpu(*image)) {
1063 IWL_ERROR("uCode INST section is invalid at " 1539 IWL_ERR(priv, "uCode INST section is invalid at "
1064 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1540 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1065 save_len - len, val, le32_to_cpu(*image)); 1541 save_len - len, val, le32_to_cpu(*image));
1066 ret = -EIO; 1542 ret = -EIO;
@@ -1073,8 +1549,8 @@ static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1073 iwl_release_nic_access(priv); 1549 iwl_release_nic_access(priv);
1074 1550
1075 if (!errcnt) 1551 if (!errcnt)
1076 IWL_DEBUG_INFO 1552 IWL_DEBUG_INFO(priv,
1077 ("ucode image in INSTRUCTION memory is good\n"); 1553 "ucode image in INSTRUCTION memory is good\n");
1078 1554
1079 return ret; 1555 return ret;
1080} 1556}
@@ -1094,7 +1570,7 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1094 len = priv->ucode_boot.len; 1570 len = priv->ucode_boot.len;
1095 ret = iwlcore_verify_inst_sparse(priv, image, len); 1571 ret = iwlcore_verify_inst_sparse(priv, image, len);
1096 if (!ret) { 1572 if (!ret) {
1097 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); 1573 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1098 return 0; 1574 return 0;
1099 } 1575 }
1100 1576
@@ -1103,7 +1579,7 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1103 len = priv->ucode_init.len; 1579 len = priv->ucode_init.len;
1104 ret = iwlcore_verify_inst_sparse(priv, image, len); 1580 ret = iwlcore_verify_inst_sparse(priv, image, len);
1105 if (!ret) { 1581 if (!ret) {
1106 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); 1582 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1107 return 0; 1583 return 0;
1108 } 1584 }
1109 1585
@@ -1112,11 +1588,11 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1112 len = priv->ucode_code.len; 1588 len = priv->ucode_code.len;
1113 ret = iwlcore_verify_inst_sparse(priv, image, len); 1589 ret = iwlcore_verify_inst_sparse(priv, image, len);
1114 if (!ret) { 1590 if (!ret) {
1115 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); 1591 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1116 return 0; 1592 return 0;
1117 } 1593 }
1118 1594
1119 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 1595 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1120 1596
1121 /* Since nothing seems to match, show first several data entries in 1597 /* Since nothing seems to match, show first several data entries in
1122 * instruction SRAM, so maybe visual inspection will give a clue. 1598 * instruction SRAM, so maybe visual inspection will give a clue.
@@ -1188,21 +1664,22 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1188 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 1664 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1189 1665
1190 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 1666 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1191 IWL_ERROR("Not valid error log pointer 0x%08X\n", base); 1667 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1192 return; 1668 return;
1193 } 1669 }
1194 1670
1195 ret = iwl_grab_nic_access(priv); 1671 ret = iwl_grab_nic_access(priv);
1196 if (ret) { 1672 if (ret) {
1197 IWL_WARNING("Can not read from adapter at this time.\n"); 1673 IWL_WARN(priv, "Can not read from adapter at this time.\n");
1198 return; 1674 return;
1199 } 1675 }
1200 1676
1201 count = iwl_read_targ_mem(priv, base); 1677 count = iwl_read_targ_mem(priv, base);
1202 1678
1203 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1204 IWL_ERROR("Start IWL Error Log Dump:\n"); 1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1205 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count); 1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1206 } 1683 }
1207 1684
1208 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 1685 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
@@ -1215,12 +1692,12 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1215 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1692 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1216 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1693 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1217 1694
1218 IWL_ERROR("Desc Time " 1695 IWL_ERR(priv, "Desc Time "
1219 "data1 data2 line\n"); 1696 "data1 data2 line\n");
1220 IWL_ERROR("%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 1697 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1221 desc_lookup(desc), desc, time, data1, data2, line); 1698 desc_lookup(desc), desc, time, data1, data2, line);
1222 IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); 1699 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1223 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, 1700 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1224 ilink1, ilink2); 1701 ilink1, ilink2);
1225 1702
1226 iwl_release_nic_access(priv); 1703 iwl_release_nic_access(priv);
@@ -1266,11 +1743,11 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1266 ptr += sizeof(u32); 1743 ptr += sizeof(u32);
1267 if (mode == 0) { 1744 if (mode == 0) {
1268 /* data, ev */ 1745 /* data, ev */
1269 IWL_ERROR("EVT_LOG:0x%08x:%04u\n", time, ev); 1746 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1270 } else { 1747 } else {
1271 data = iwl_read_targ_mem(priv, ptr); 1748 data = iwl_read_targ_mem(priv, ptr);
1272 ptr += sizeof(u32); 1749 ptr += sizeof(u32);
1273 IWL_ERROR("EVT_LOGT:%010u:0x%08x:%04u\n", 1750 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1274 time, data, ev); 1751 time, data, ev);
1275 } 1752 }
1276 } 1753 }
@@ -1292,13 +1769,13 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
1292 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1769 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1293 1770
1294 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 1771 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1295 IWL_ERROR("Invalid event log pointer 0x%08X\n", base); 1772 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1296 return; 1773 return;
1297 } 1774 }
1298 1775
1299 ret = iwl_grab_nic_access(priv); 1776 ret = iwl_grab_nic_access(priv);
1300 if (ret) { 1777 if (ret) {
1301 IWL_WARNING("Can not read from adapter at this time.\n"); 1778 IWL_WARN(priv, "Can not read from adapter at this time.\n");
1302 return; 1779 return;
1303 } 1780 }
1304 1781
@@ -1312,12 +1789,12 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
1312 1789
1313 /* bail out if nothing in log */ 1790 /* bail out if nothing in log */
1314 if (size == 0) { 1791 if (size == 0) {
1315 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); 1792 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1316 iwl_release_nic_access(priv); 1793 iwl_release_nic_access(priv);
1317 return; 1794 return;
1318 } 1795 }
1319 1796
1320 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", 1797 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1321 size, num_wraps); 1798 size, num_wraps);
1322 1799
1323 /* if uCode has wrapped back to top of log, start at the oldest entry, 1800 /* if uCode has wrapped back to top of log, start at the oldest entry,
@@ -1349,9 +1826,9 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1349 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, 1826 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1350 sizeof(cmd), &cmd); 1827 sizeof(cmd), &cmd);
1351 if (ret) 1828 if (ret)
1352 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); 1829 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1353 else 1830 else
1354 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, " 1831 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD succeeded, "
1355 "critical temperature is %d\n", 1832 "critical temperature is %d\n",
1356 cmd.critical_temperature_R); 1833 cmd.critical_temperature_R);
1357} 1834}
@@ -1368,7 +1845,7 @@ EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1368 * When in the 'halt' state, the card is shut down and must be fully 1845 * When in the 'halt' state, the card is shut down and must be fully
1369 * restarted to come back on. 1846 * restarted to come back on.
1370 */ 1847 */
1371static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) 1848int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1372{ 1849{
1373 struct iwl_host_cmd cmd = { 1850 struct iwl_host_cmd cmd = {
1374 .id = REPLY_CARD_STATE_CMD, 1851 .id = REPLY_CARD_STATE_CMD,
@@ -1379,6 +1856,7 @@ static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1379 1856
1380 return iwl_send_cmd(priv, &cmd); 1857 return iwl_send_cmd(priv, &cmd);
1381} 1858}
1859EXPORT_SYMBOL(iwl_send_card_state);
1382 1860
1383void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv) 1861void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1384{ 1862{
@@ -1387,7 +1865,7 @@ void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1387 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) 1865 if (test_bit(STATUS_RF_KILL_SW, &priv->status))
1388 return; 1866 return;
1389 1867
1390 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO OFF\n"); 1868 IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO OFF\n");
1391 1869
1392 iwl_scan_cancel(priv); 1870 iwl_scan_cancel(priv);
1393 /* FIXME: This is a workaround for AP */ 1871 /* FIXME: This is a workaround for AP */
@@ -1416,7 +1894,7 @@ int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1416 if (!test_bit(STATUS_RF_KILL_SW, &priv->status)) 1894 if (!test_bit(STATUS_RF_KILL_SW, &priv->status))
1417 return 0; 1895 return 0;
1418 1896
1419 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO ON\n"); 1897 IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO ON\n");
1420 1898
1421 spin_lock_irqsave(&priv->lock, flags); 1899 spin_lock_irqsave(&priv->lock, flags);
1422 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1900 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -1441,7 +1919,7 @@ int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1441 spin_unlock_irqrestore(&priv->lock, flags); 1919 spin_unlock_irqrestore(&priv->lock, flags);
1442 1920
1443 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 1921 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
1444 IWL_DEBUG_RF_KILL("Can not turn radio back on - " 1922 IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
1445 "disabled by HW switch\n"); 1923 "disabled by HW switch\n");
1446 return 0; 1924 return 0;
1447 } 1925 }
@@ -1463,3 +1941,39 @@ int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1463 return 1; 1941 return 1;
1464} 1942}
1465EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio); 1943EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio);
1944
1945void iwl_bg_rf_kill(struct work_struct *work)
1946{
1947 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
1948
1949 wake_up_interruptible(&priv->wait_command_queue);
1950
1951 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1952 return;
1953
1954 mutex_lock(&priv->mutex);
1955
1956 if (!iwl_is_rfkill(priv)) {
1957 IWL_DEBUG_RF_KILL(priv,
1958 "HW and/or SW RF Kill no longer active, restarting "
1959 "device\n");
1960 if (!test_bit(STATUS_EXIT_PENDING, &priv->status) &&
1961 test_bit(STATUS_ALIVE, &priv->status))
1962 queue_work(priv->workqueue, &priv->restart);
1963 } else {
1964 /* make sure mac80211 stop sending Tx frame */
1965 if (priv->mac80211_registered)
1966 ieee80211_stop_queues(priv->hw);
1967
1968 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
1969 IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
1970 "disabled by SW switch\n");
1971 else
1972 IWL_WARN(priv, "Radio Frequency Kill Switch is On:\n"
1973 "Kill switch must be turned off for "
1974 "wireless networking to work.\n");
1975 }
1976 mutex_unlock(&priv->mutex);
1977 iwl_rfkill_set_hw_state(priv);
1978}
1979EXPORT_SYMBOL(iwl_bg_rf_kill);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7c3a20a986b..9d464ec99dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,7 @@ struct iwl_cmd;
71 71
72 72
73#define IWLWIFI_VERSION "1.3.27k" 73#define IWLWIFI_VERSION "1.3.27k"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>" 75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76 76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -110,6 +110,14 @@ struct iwl_lib_ops {
110 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv, 110 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq); 111 struct iwl_tx_queue *txq);
112 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask); 112 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
113 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq,
115 dma_addr_t addr,
116 u16 len, u8 reset, u8 pad);
117 void (*txq_free_tfd)(struct iwl_priv *priv,
118 struct iwl_tx_queue *txq);
119 int (*txq_init)(struct iwl_priv *priv,
120 struct iwl_tx_queue *txq);
113 /* aggregations */ 121 /* aggregations */
114 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo, 122 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
115 int sta_id, int tid, u16 ssn_idx); 123 int sta_id, int tid, u16 ssn_idx);
@@ -203,6 +211,9 @@ struct iwl_cfg {
203 u16 eeprom_calib_ver; 211 u16 eeprom_calib_ver;
204 const struct iwl_ops *ops; 212 const struct iwl_ops *ops;
205 const struct iwl_mod_params *mod_params; 213 const struct iwl_mod_params *mod_params;
214 u8 valid_tx_ant;
215 u8 valid_rx_ant;
216 bool need_pll_cfg;
206}; 217};
207 218
208/*************************** 219/***************************
@@ -213,11 +224,25 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
213 struct ieee80211_ops *hw_ops); 224 struct ieee80211_ops *hw_ops);
214void iwl_hw_detect(struct iwl_priv *priv); 225void iwl_hw_detect(struct iwl_priv *priv);
215void iwl_reset_qos(struct iwl_priv *priv); 226void iwl_reset_qos(struct iwl_priv *priv);
227void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
228int iwl_check_rxon_cmd(struct iwl_priv *priv);
229int iwl_full_rxon_required(struct iwl_priv *priv);
216void iwl_set_rxon_chain(struct iwl_priv *priv); 230void iwl_set_rxon_chain(struct iwl_priv *priv);
217int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 231int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
218void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 232void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
219u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, 233u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
220 struct ieee80211_sta_ht_cap *sta_ht_inf); 234 struct ieee80211_sta_ht_cap *sta_ht_inf);
235void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
236void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode);
237int iwl_set_decrypted_flag(struct iwl_priv *priv,
238 struct ieee80211_hdr *hdr,
239 u32 decrypt_res,
240 struct ieee80211_rx_status *stats);
241void iwl_irq_handle_error(struct iwl_priv *priv);
242void iwl_configure_filter(struct ieee80211_hw *hw,
243 unsigned int changed_flags,
244 unsigned int *total_flags,
245 int mc_count, struct dev_addr_list *mc_list);
221int iwl_hw_nic_init(struct iwl_priv *priv); 246int iwl_hw_nic_init(struct iwl_priv *priv);
222int iwl_setup_mac(struct iwl_priv *priv); 247int iwl_setup_mac(struct iwl_priv *priv);
223int iwl_set_hw_params(struct iwl_priv *priv); 248int iwl_set_hw_params(struct iwl_priv *priv);
@@ -245,6 +270,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
245 struct iwl_rx_mem_buffer *rxb); 270 struct iwl_rx_mem_buffer *rxb);
246void iwl_rx_statistics(struct iwl_priv *priv, 271void iwl_rx_statistics(struct iwl_priv *priv,
247 struct iwl_rx_mem_buffer *rxb); 272 struct iwl_rx_mem_buffer *rxb);
273void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
248 274
249/* TX helpers */ 275/* TX helpers */
250 276
@@ -252,9 +278,18 @@ void iwl_rx_statistics(struct iwl_priv *priv,
252* TX 278* TX
253******************************************************/ 279******************************************************/
254int iwl_txq_ctx_reset(struct iwl_priv *priv); 280int iwl_txq_ctx_reset(struct iwl_priv *priv);
281void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
282int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
283 struct iwl_tx_queue *txq,
284 dma_addr_t addr, u16 len, u8 reset, u8 pad);
255int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 285int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
256void iwl_hw_txq_ctx_free(struct iwl_priv *priv); 286void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
287int iwl_hw_tx_queue_init(struct iwl_priv *priv,
288 struct iwl_tx_queue *txq);
257int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 289int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
290int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
291 int slots_num, u32 txq_id);
292void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
258int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 293int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
259int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); 294int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
260int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); 295int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
@@ -267,7 +302,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
267 * RF -Kill - here and not in iwl-rfkill.h to be available when 302 * RF -Kill - here and not in iwl-rfkill.h to be available when
268 * RF-kill subsystem is not compiled. 303 * RF-kill subsystem is not compiled.
269 ****************************************************/ 304 ****************************************************/
270void iwl_rf_kill(struct iwl_priv *priv); 305void iwl_bg_rf_kill(struct work_struct *work);
271void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv); 306void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
272int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv); 307int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
273 308
@@ -279,6 +314,10 @@ void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
279 struct ieee80211_tx_info *info); 314 struct ieee80211_tx_info *info);
280int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 315int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
281 316
317u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
318
319void iwl_set_rate(struct iwl_priv *priv);
320
282u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx); 321u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx);
283 322
284static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) 323static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -306,8 +345,29 @@ void iwl_init_scan_params(struct iwl_priv *priv);
306int iwl_scan_cancel(struct iwl_priv *priv); 345int iwl_scan_cancel(struct iwl_priv *priv);
307int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 346int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
308int iwl_scan_initiate(struct iwl_priv *priv); 347int iwl_scan_initiate(struct iwl_priv *priv);
348u16 iwl_fill_probe_req(struct iwl_priv *priv, enum ieee80211_band band,
349 struct ieee80211_mgmt *frame, int left);
309void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); 350void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
351u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
352 enum ieee80211_band band,
353 u8 n_probes);
354u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
355 enum ieee80211_band band);
356void iwl_bg_scan_check(struct work_struct *data);
357void iwl_bg_abort_scan(struct work_struct *work);
358void iwl_bg_scan_completed(struct work_struct *work);
310void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 359void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
360int iwl_send_scan_abort(struct iwl_priv *priv);
361
362/* For faster active scanning, scan will move to the next channel if fewer than
363 * PLCP_QUIET_THRESH packets are heard on this channel within
364 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
365 * time if it's a quiet channel (nothing responded to our probe, and there's
366 * no other traffic).
367 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
368#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
369#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
370
311 371
312/******************************************************************************* 372/*******************************************************************************
313 * Calibrations - implemented in iwl-calib.c 373 * Calibrations - implemented in iwl-calib.c
@@ -342,11 +402,22 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
342 402
343int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 403int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
344 404
405int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
406 u8 meta_flag);
407
345/***************************************************** 408/*****************************************************
346 * PCI * 409 * PCI *
347 *****************************************************/ 410 *****************************************************/
348void iwl_disable_interrupts(struct iwl_priv *priv); 411void iwl_disable_interrupts(struct iwl_priv *priv);
349void iwl_enable_interrupts(struct iwl_priv *priv); 412void iwl_enable_interrupts(struct iwl_priv *priv);
413static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
414{
415 int pos;
416 u16 pci_lnk_ctl;
417 pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
418 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
419 return pci_lnk_ctl;
420}
350 421
351/***************************************************** 422/*****************************************************
352* Error Handling Debugging 423* Error Handling Debugging
@@ -354,6 +425,11 @@ void iwl_enable_interrupts(struct iwl_priv *priv);
354void iwl_dump_nic_error_log(struct iwl_priv *priv); 425void iwl_dump_nic_error_log(struct iwl_priv *priv);
355void iwl_dump_nic_event_log(struct iwl_priv *priv); 426void iwl_dump_nic_event_log(struct iwl_priv *priv);
356 427
428/*****************************************************
429* GEOS
430******************************************************/
431int iwlcore_init_geos(struct iwl_priv *priv);
432void iwlcore_free_geos(struct iwl_priv *priv);
357 433
358/*************** DRIVER STATUS FUNCTIONS *****/ 434/*************** DRIVER STATUS FUNCTIONS *****/
359 435
@@ -422,6 +498,7 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
422} 498}
423 499
424extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); 500extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
501extern int iwl_send_bt_config(struct iwl_priv *priv);
425extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags); 502extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
426extern int iwl_verify_ucode(struct iwl_priv *priv); 503extern int iwl_verify_ucode(struct iwl_priv *priv);
427extern int iwl_send_lq_cmd(struct iwl_priv *priv, 504extern int iwl_send_lq_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index f34ede44ed1..5028c781275 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -211,6 +211,9 @@
211#define CSR_HW_REV_TYPE_5350 (0x0000030) 211#define CSR_HW_REV_TYPE_5350 (0x0000030)
212#define CSR_HW_REV_TYPE_5100 (0x0000050) 212#define CSR_HW_REV_TYPE_5100 (0x0000050)
213#define CSR_HW_REV_TYPE_5150 (0x0000040) 213#define CSR_HW_REV_TYPE_5150 (0x0000040)
214#define CSR_HW_REV_TYPE_100 (0x0000060)
215#define CSR_HW_REV_TYPE_6x00 (0x0000070)
216#define CSR_HW_REV_TYPE_6x50 (0x0000080)
214#define CSR_HW_REV_TYPE_NONE (0x00000F0) 217#define CSR_HW_REV_TYPE_NONE (0x00000F0)
215 218
216/* EEPROM REG */ 219/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 56c13b458de..65d1a7f2db9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -29,16 +29,29 @@
29#ifndef __iwl_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32struct iwl_priv;
33
34#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
35#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
36#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
37#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
38
32#ifdef CONFIG_IWLWIFI_DEBUG 39#ifdef CONFIG_IWLWIFI_DEBUG
33#define IWL_DEBUG(level, fmt, args...) \ 40#define IWL_DEBUG(__priv, level, fmt, args...) \
34do { if (priv->debug_level & (level)) \ 41do { \
35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 42 if (__priv->debug_level & (level)) \
36 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) 43 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
44 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
45 __func__ , ## args); \
46} while (0)
37 47
38#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 48#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
39do { if ((priv->debug_level & (level)) && net_ratelimit()) \ 49do { \
40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 50 if ((__priv->debug_level & (level)) && net_ratelimit()) \
41 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) 51 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
52 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
53 __func__ , ## args); \
54} while (0)
42 55
43#define iwl_print_hex_dump(priv, level, p, len) \ 56#define iwl_print_hex_dump(priv, level, p, len) \
44do { \ 57do { \
@@ -61,6 +74,7 @@ struct iwl_debugfs {
61 struct dentry *file_tx_statistics; 74 struct dentry *file_tx_statistics;
62 struct dentry *file_log_event; 75 struct dentry *file_log_event;
63 struct dentry *file_channels; 76 struct dentry *file_channels;
77 struct dentry *file_status;
64 } dbgfs_data_files; 78 } dbgfs_data_files;
65 struct dir_rf_files { 79 struct dir_rf_files {
66 struct dentry *file_disable_sensitivity; 80 struct dentry *file_disable_sensitivity;
@@ -76,8 +90,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
76#endif 90#endif
77 91
78#else 92#else
79#define IWL_DEBUG(level, fmt, args...) 93#define IWL_DEBUG(__priv, level, fmt, args...)
80#define IWL_DEBUG_LIMIT(level, fmt, args...) 94#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
81static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, 95static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
82 void *p, u32 len) 96 void *p, u32 len)
83{} 97{}
@@ -117,84 +131,85 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
117 * when CONFIG_IWLWIFI_DEBUG=y. 131 * when CONFIG_IWLWIFI_DEBUG=y.
118 */ 132 */
119 133
134/* 0x0000000F - 0x00000001 */
120#define IWL_DL_INFO (1 << 0) 135#define IWL_DL_INFO (1 << 0)
121#define IWL_DL_MAC80211 (1 << 1) 136#define IWL_DL_MAC80211 (1 << 1)
122#define IWL_DL_HCMD (1 << 2) 137#define IWL_DL_HCMD (1 << 2)
123#define IWL_DL_STATE (1 << 3) 138#define IWL_DL_STATE (1 << 3)
139/* 0x000000F0 - 0x00000010 */
124#define IWL_DL_MACDUMP (1 << 4) 140#define IWL_DL_MACDUMP (1 << 4)
125#define IWL_DL_HCMD_DUMP (1 << 5) 141#define IWL_DL_HCMD_DUMP (1 << 5)
126#define IWL_DL_RADIO (1 << 7) 142#define IWL_DL_RADIO (1 << 7)
127#define IWL_DL_POWER (1 << 8) 143/* 0x00000F00 - 0x00000100 */
128#define IWL_DL_TEMP (1 << 9) 144#define IWL_DL_POWER (1 << 8)
129 145#define IWL_DL_TEMP (1 << 9)
130#define IWL_DL_NOTIF (1 << 10) 146#define IWL_DL_NOTIF (1 << 10)
131#define IWL_DL_SCAN (1 << 11) 147#define IWL_DL_SCAN (1 << 11)
132#define IWL_DL_ASSOC (1 << 12) 148/* 0x0000F000 - 0x00001000 */
133#define IWL_DL_DROP (1 << 13) 149#define IWL_DL_ASSOC (1 << 12)
134 150#define IWL_DL_DROP (1 << 13)
135#define IWL_DL_TXPOWER (1 << 14) 151#define IWL_DL_TXPOWER (1 << 14)
136 152#define IWL_DL_AP (1 << 15)
137#define IWL_DL_AP (1 << 15) 153/* 0x000F0000 - 0x00010000 */
138 154#define IWL_DL_FW (1 << 16)
139#define IWL_DL_FW (1 << 16) 155#define IWL_DL_RF_KILL (1 << 17)
140#define IWL_DL_RF_KILL (1 << 17) 156#define IWL_DL_FW_ERRORS (1 << 18)
141#define IWL_DL_FW_ERRORS (1 << 18) 157#define IWL_DL_LED (1 << 19)
142 158/* 0x00F00000 - 0x00100000 */
143#define IWL_DL_LED (1 << 19) 159#define IWL_DL_RATE (1 << 20)
144 160#define IWL_DL_CALIB (1 << 21)
145#define IWL_DL_RATE (1 << 20) 161#define IWL_DL_WEP (1 << 22)
146 162#define IWL_DL_TX (1 << 23)
147#define IWL_DL_CALIB (1 << 21) 163/* 0x0F000000 - 0x01000000 */
148#define IWL_DL_WEP (1 << 22) 164#define IWL_DL_RX (1 << 24)
149#define IWL_DL_TX (1 << 23) 165#define IWL_DL_ISR (1 << 25)
150#define IWL_DL_RX (1 << 24) 166#define IWL_DL_HT (1 << 26)
151#define IWL_DL_ISR (1 << 25) 167#define IWL_DL_IO (1 << 27)
152#define IWL_DL_HT (1 << 26) 168/* 0xF0000000 - 0x10000000 */
153#define IWL_DL_IO (1 << 27) 169#define IWL_DL_11H (1 << 28)
154#define IWL_DL_11H (1 << 28) 170#define IWL_DL_STATS (1 << 29)
155 171#define IWL_DL_TX_REPLY (1 << 30)
156#define IWL_DL_STATS (1 << 29) 172#define IWL_DL_QOS (1 << 31)
157#define IWL_DL_TX_REPLY (1 << 30) 173
158#define IWL_DL_QOS (1 << 31) 174#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
159 175#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
160#define IWL_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) 176#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
161#define IWL_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) 177#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
162#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) 178#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
163 179#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
164#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) 180#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
165#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a) 181#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
166#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) 182#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
167#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) 183#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
168#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) 184#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
169#define IWL_DEBUG_TX(f, a...) IWL_DEBUG(IWL_DL_TX, f, ## a) 185#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
170#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a) 186#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
171#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a) 187#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
172#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a) 188#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
173#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HCMD, f, ## a) 189#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
174#define IWL_DEBUG_HC_DUMP(f, a...) IWL_DEBUG(IWL_DL_HCMD_DUMP, f, ## a) 190#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
175#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a) 191 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
176#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a) 192#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
177#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a) 193#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
178#define IWL_DEBUG_DROP(f, a...) IWL_DEBUG(IWL_DL_DROP, f, ## a) 194#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
179#define IWL_DEBUG_DROP_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_DROP, f, ## a) 195#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
180#define IWL_DEBUG_AP(f, a...) IWL_DEBUG(IWL_DL_AP, f, ## a) 196#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
181#define IWL_DEBUG_TXPOWER(f, a...) IWL_DEBUG(IWL_DL_TXPOWER, f, ## a) 197 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
182#define IWL_DEBUG_IO(f, a...) IWL_DEBUG(IWL_DL_IO, f, ## a) 198#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
183#define IWL_DEBUG_RATE(f, a...) IWL_DEBUG(IWL_DL_RATE, f, ## a) 199#define IWL_DEBUG_ASSOC(p, f, a...) \
184#define IWL_DEBUG_RATE_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_RATE, f, ## a) 200 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
185#define IWL_DEBUG_NOTIF(f, a...) IWL_DEBUG(IWL_DL_NOTIF, f, ## a) 201#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
186#define IWL_DEBUG_ASSOC(f, a...) IWL_DEBUG(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) 202 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
187#define IWL_DEBUG_ASSOC_LIMIT(f, a...) \ 203#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
188 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) 204#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
189#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a) 205#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
190#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) 206 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
191#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a) 207#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
192#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) 208#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
193#define IWL_DEBUG_TX_REPLY_LIMIT(f, a...) \ 209 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
194 IWL_DEBUG_LIMIT(IWL_DL_TX_REPLY, f, ## a) 210#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
195#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) 211#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
196#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) 212#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
197#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a) 213#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
198#define IWL_DEBUG_11H(f, a...) IWL_DEBUG(IWL_DL_11H, f, ## a)
199 214
200#endif 215#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index d5253a179de..36cfeccfafb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -63,6 +63,14 @@
63 goto err; \ 63 goto err; \
64} while (0) 64} while (0)
65 65
66#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
67 dbgfs->dbgfs_##parent##_files.file_##name = \
68 debugfs_create_x32(#name, 0444, dbgfs->dir_##parent, ptr); \
69 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
70 || !dbgfs->dbgfs_##parent##_files.file_##name) \
71 goto err; \
72} while (0)
73
66#define DEBUGFS_REMOVE(name) do { \ 74#define DEBUGFS_REMOVE(name) do { \
67 debugfs_remove(name); \ 75 debugfs_remove(name); \
68 name = NULL; \ 76 name = NULL; \
@@ -164,9 +172,6 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
164 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 172 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
165 const size_t bufsz = sizeof(buf); 173 const size_t bufsz = sizeof(buf);
166 174
167 printk(KERN_DEBUG "offset is: 0x%x\tlen is: 0x%x\n",
168 priv->dbgfs->sram_offset, priv->dbgfs->sram_len);
169
170 iwl_grab_nic_access(priv); 175 iwl_grab_nic_access(priv);
171 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) { 176 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
172 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \ 177 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
@@ -301,14 +306,14 @@ static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
301 buf_size = 4 * eeprom_len + 256; 306 buf_size = 4 * eeprom_len + 256;
302 307
303 if (eeprom_len % 16) { 308 if (eeprom_len % 16) {
304 IWL_ERROR("EEPROM size is not multiple of 16.\n"); 309 IWL_ERR(priv, "EEPROM size is not multiple of 16.\n");
305 return -ENODATA; 310 return -ENODATA;
306 } 311 }
307 312
308 /* 4 characters for byte 0xYY */ 313 /* 4 characters for byte 0xYY */
309 buf = kzalloc(buf_size, GFP_KERNEL); 314 buf = kzalloc(buf_size, GFP_KERNEL);
310 if (!buf) { 315 if (!buf) {
311 IWL_ERROR("Can not allocate Buffer\n"); 316 IWL_ERR(priv, "Can not allocate Buffer\n");
312 return -ENOMEM; 317 return -ENOMEM;
313 } 318 }
314 319
@@ -365,7 +370,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
365 370
366 buf = kzalloc(bufsz, GFP_KERNEL); 371 buf = kzalloc(bufsz, GFP_KERNEL);
367 if (!buf) { 372 if (!buf) {
368 IWL_ERROR("Can not allocate Buffer\n"); 373 IWL_ERR(priv, "Can not allocate Buffer\n");
369 return -ENOMEM; 374 return -ENOMEM;
370 } 375 }
371 376
@@ -420,7 +425,6 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
420 return ret; 425 return ret;
421} 426}
422 427
423
424DEBUGFS_READ_WRITE_FILE_OPS(sram); 428DEBUGFS_READ_WRITE_FILE_OPS(sram);
425DEBUGFS_WRITE_FILE_OPS(log_event); 429DEBUGFS_WRITE_FILE_OPS(log_event);
426DEBUGFS_READ_FILE_OPS(eeprom); 430DEBUGFS_READ_FILE_OPS(eeprom);
@@ -462,6 +466,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
462 DEBUGFS_ADD_FILE(rx_statistics, data); 466 DEBUGFS_ADD_FILE(rx_statistics, data);
463 DEBUGFS_ADD_FILE(tx_statistics, data); 467 DEBUGFS_ADD_FILE(tx_statistics, data);
464 DEBUGFS_ADD_FILE(channels, data); 468 DEBUGFS_ADD_FILE(channels, data);
469 DEBUGFS_ADD_X32(status, data, (u32 *)&priv->status);
465 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 470 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
466 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 471 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
467 &priv->disable_chain_noise_cal); 472 &priv->disable_chain_noise_cal);
@@ -469,7 +474,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
469 return 0; 474 return 0;
470 475
471err: 476err:
472 IWL_ERROR("Can't open the debugfs directory\n"); 477 IWL_ERR(priv, "Can't open the debugfs directory\n");
473 iwl_dbgfs_unregister(priv); 478 iwl_dbgfs_unregister(priv);
474 return ret; 479 return ret;
475} 480}
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 0468fcc1ea9..afde713c806 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -36,13 +36,15 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39#define DRV_NAME "iwlagn"
40#include "iwl-rfkill.h"
41#include "iwl-eeprom.h" 39#include "iwl-eeprom.h"
42#include "iwl-4965-hw.h"
43#include "iwl-csr.h" 40#include "iwl-csr.h"
44#include "iwl-prph.h" 41#include "iwl-prph.h"
42#include "iwl-fh.h"
45#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-rfkill.h"
45#include "iwl-4965-hw.h"
46#include "iwl-3945-hw.h"
47#include "iwl-3945-led.h"
46#include "iwl-led.h" 48#include "iwl-led.h"
47#include "iwl-power.h" 49#include "iwl-power.h"
48#include "iwl-agn-rs.h" 50#include "iwl-agn-rs.h"
@@ -55,6 +57,28 @@ extern struct iwl_cfg iwl5350_agn_cfg;
55extern struct iwl_cfg iwl5100_bg_cfg; 57extern struct iwl_cfg iwl5100_bg_cfg;
56extern struct iwl_cfg iwl5100_abg_cfg; 58extern struct iwl_cfg iwl5100_abg_cfg;
57extern struct iwl_cfg iwl5150_agn_cfg; 59extern struct iwl_cfg iwl5150_agn_cfg;
60extern struct iwl_cfg iwl6000_2ag_cfg;
61extern struct iwl_cfg iwl6000_2agn_cfg;
62extern struct iwl_cfg iwl6000_3agn_cfg;
63extern struct iwl_cfg iwl6050_2agn_cfg;
64extern struct iwl_cfg iwl6050_3agn_cfg;
65extern struct iwl_cfg iwl100_bgn_cfg;
66
67/* shared structures from iwl-5000.c */
68extern struct iwl_mod_params iwl50_mod_params;
69extern struct iwl_ops iwl5000_ops;
70extern struct iwl_lib_ops iwl5000_lib;
71extern struct iwl_hcmd_ops iwl5000_hcmd;
72extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
73
74/* shared functions from iwl-5000.c */
75extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
76extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
77 u8 *data);
78extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
79 __le32 *tx_flags);
80extern int iwl5000_calc_rssi(struct iwl_priv *priv,
81 struct iwl_rx_phy_res *rx_resp);
58 82
59/* CT-KILL constants */ 83/* CT-KILL constants */
60#define CT_KILL_THRESHOLD 110 /* in Celsius */ 84#define CT_KILL_THRESHOLD 110 /* in Celsius */
@@ -132,9 +156,12 @@ struct iwl_tx_info {
132 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 156 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
133 * descriptors) and required locking structures. 157 * descriptors) and required locking structures.
134 */ 158 */
159#define TFD_TX_CMD_SLOTS 256
160#define TFD_CMD_SLOTS 32
161
135struct iwl_tx_queue { 162struct iwl_tx_queue {
136 struct iwl_queue q; 163 struct iwl_queue q;
137 struct iwl_tfd *tfds; 164 void *tfds;
138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS]; 165 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
139 struct iwl_tx_info *txb; 166 struct iwl_tx_info *txb;
140 u8 need_update; 167 u8 need_update;
@@ -154,6 +181,36 @@ struct iwl4965_channel_tgh_info {
154 s64 last_radar_time; 181 s64 last_radar_time;
155}; 182};
156 183
184#define IWL4965_MAX_RATE (33)
185
186struct iwl3945_clip_group {
187 /* maximum power level to prevent clipping for each rate, derived by
188 * us from this band's saturation power in EEPROM */
189 const s8 clip_powers[IWL_MAX_RATES];
190};
191
192/* current Tx power values to use, one for each rate for each channel.
193 * requested power is limited by:
194 * -- regulatory EEPROM limits for this channel
195 * -- hardware capabilities (clip-powers)
196 * -- spectrum management
197 * -- user preference (e.g. iwconfig)
198 * when requested power is set, base power index must also be set. */
199struct iwl3945_channel_power_info {
200 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
201 s8 power_table_index; /* actual (compenst'd) index into gain table */
202 s8 base_power_index; /* gain index for power at factory temp. */
203 s8 requested_power; /* power (dBm) requested for this chnl/rate */
204};
205
206/* current scan Tx power values to use, one for each scan rate for each
207 * channel. */
208struct iwl3945_scan_power_info {
209 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
210 s8 power_table_index; /* actual (compenst'd) index into gain table */
211 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
212};
213
157/* 214/*
158 * One for each channel, holds all channel setup data 215 * One for each channel, holds all channel setup data
159 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant 216 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
@@ -184,8 +241,15 @@ struct iwl_channel_info {
184 s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */ 241 s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */
185 u8 fat_flags; /* flags copied from EEPROM */ 242 u8 fat_flags; /* flags copied from EEPROM */
186 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */ 243 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */
187};
188 244
245 /* Radio/DSP gain settings for each "normal" data Tx rate.
246 * These include, in addition to RF and DSP gain, a few fields for
247 * remembering/modifying gain settings (indexes). */
248 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
249
250 /* Radio/DSP gain settings for each scan rate, for directed scans. */
251 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
252};
189 253
190#define IWL_TX_FIFO_AC0 0 254#define IWL_TX_FIFO_AC0 0
191#define IWL_TX_FIFO_AC1 1 255#define IWL_TX_FIFO_AC1 1
@@ -370,7 +434,7 @@ struct iwl_hw_key {
370 u8 key[32]; 434 u8 key[32];
371}; 435};
372 436
373union iwl4965_ht_rate_supp { 437union iwl_ht_rate_supp {
374 u16 rates; 438 u16 rates;
375 struct { 439 struct {
376 u8 siso_rate; 440 u8 siso_rate;
@@ -430,6 +494,24 @@ struct iwl_qos_info {
430#define STA_PS_STATUS_WAKE 0 494#define STA_PS_STATUS_WAKE 0
431#define STA_PS_STATUS_SLEEP 1 495#define STA_PS_STATUS_SLEEP 1
432 496
497struct iwl3945_tid_data {
498 u16 seq_number;
499};
500
501struct iwl3945_hw_key {
502 enum ieee80211_key_alg alg;
503 int keylen;
504 u8 key[32];
505};
506
507struct iwl3945_station_entry {
508 struct iwl3945_addsta_cmd sta;
509 struct iwl3945_tid_data tid[MAX_TID_COUNT];
510 u8 used;
511 u8 ps_status;
512 struct iwl3945_hw_key keyinfo;
513};
514
433struct iwl_station_entry { 515struct iwl_station_entry {
434 struct iwl_addsta_cmd sta; 516 struct iwl_addsta_cmd sta;
435 struct iwl_tid_data tid[MAX_TID_COUNT]; 517 struct iwl_tid_data tid[MAX_TID_COUNT];
@@ -497,11 +579,13 @@ struct iwl_sensitivity_ranges {
497 * @max_txq_num: Max # Tx queues supported 579 * @max_txq_num: Max # Tx queues supported
498 * @dma_chnl_num: Number of Tx DMA/FIFO channels 580 * @dma_chnl_num: Number of Tx DMA/FIFO channels
499 * @scd_bc_tbls_size: size of scheduler byte count tables 581 * @scd_bc_tbls_size: size of scheduler byte count tables
582 * @tfd_size: TFD size
500 * @tx/rx_chains_num: Number of TX/RX chains 583 * @tx/rx_chains_num: Number of TX/RX chains
501 * @valid_tx/rx_ant: usable antennas 584 * @valid_tx/rx_ant: usable antennas
502 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 585 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
503 * @max_rxq_log: Log-base-2 of max_rxq_size 586 * @max_rxq_log: Log-base-2 of max_rxq_size
504 * @rx_buf_size: Rx buffer size 587 * @rx_buf_size: Rx buffer size
588 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
505 * @max_stations: 589 * @max_stations:
506 * @bcast_sta_id: 590 * @bcast_sta_id:
507 * @fat_channel: is 40MHz width possible in band 2.4 591 * @fat_channel: is 40MHz width possible in band 2.4
@@ -516,6 +600,7 @@ struct iwl_hw_params {
516 u8 max_txq_num; 600 u8 max_txq_num;
517 u8 dma_chnl_num; 601 u8 dma_chnl_num;
518 u16 scd_bc_tbls_size; 602 u16 scd_bc_tbls_size;
603 u32 tfd_size;
519 u8 tx_chains_num; 604 u8 tx_chains_num;
520 u8 rx_chains_num; 605 u8 rx_chains_num;
521 u8 valid_tx_ant; 606 u8 valid_tx_ant;
@@ -523,6 +608,7 @@ struct iwl_hw_params {
523 u16 max_rxq_size; 608 u16 max_rxq_size;
524 u16 max_rxq_log; 609 u16 max_rxq_log;
525 u32 rx_buf_size; 610 u32 rx_buf_size;
611 u32 rx_wrt_ptr_reg;
526 u32 max_pkt_size; 612 u32 max_pkt_size;
527 u8 max_stations; 613 u8 max_stations;
528 u8 bcast_sta_id; 614 u8 bcast_sta_id;
@@ -755,7 +841,7 @@ struct iwl_priv {
755 841
756 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 842 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
757 843
758#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT 844#if defined(CONFIG_IWLAGN_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
759 /* spectrum measurement report caching */ 845 /* spectrum measurement report caching */
760 struct iwl_spectrum_notification measure_report; 846 struct iwl_spectrum_notification measure_report;
761 u8 measurement_status; 847 u8 measurement_status;
@@ -768,6 +854,10 @@ struct iwl_priv {
768 struct iwl_channel_info *channel_info; /* channel info array */ 854 struct iwl_channel_info *channel_info; /* channel info array */
769 u8 channel_count; /* # of channels */ 855 u8 channel_count; /* # of channels */
770 856
857 /* each calibration channel group in the EEPROM has a derived
858 * clip setting for each rate. 3945 only.*/
859 const struct iwl3945_clip_group clip39_groups[5];
860
771 /* thermal calibration */ 861 /* thermal calibration */
772 s32 temperature; /* degrees Kelvin */ 862 s32 temperature; /* degrees Kelvin */
773 s32 last_temperature; 863 s32 last_temperature;
@@ -781,7 +871,7 @@ struct iwl_priv {
781 unsigned long scan_start; 871 unsigned long scan_start;
782 unsigned long scan_pass_start; 872 unsigned long scan_pass_start;
783 unsigned long scan_start_tsf; 873 unsigned long scan_start_tsf;
784 struct iwl_scan_cmd *scan; 874 void *scan;
785 int scan_bands; 875 int scan_bands;
786 int one_direct_scan; 876 int one_direct_scan;
787 u8 direct_ssid_len; 877 u8 direct_ssid_len;
@@ -832,18 +922,25 @@ struct iwl_priv {
832 * 4965's initialize alive response contains some calibration data. */ 922 * 4965's initialize alive response contains some calibration data. */
833 struct iwl_init_alive_resp card_alive_init; 923 struct iwl_init_alive_resp card_alive_init;
834 struct iwl_alive_resp card_alive; 924 struct iwl_alive_resp card_alive;
835#ifdef CONFIG_IWLWIFI_RFKILL 925#if defined(CONFIG_IWLWIFI_RFKILL) || defined(CONFIG_IWL3945_RFKILL)
836 struct rfkill *rfkill; 926 struct rfkill *rfkill;
837#endif 927#endif
838 928
839#ifdef CONFIG_IWLWIFI_LEDS 929#if defined(CONFIG_IWLWIFI_LEDS) || defined(CONFIG_IWL3945_LEDS)
840 struct iwl_led led[IWL_LED_TRG_MAX];
841 unsigned long last_blink_time; 930 unsigned long last_blink_time;
842 u8 last_blink_rate; 931 u8 last_blink_rate;
843 u8 allow_blinking; 932 u8 allow_blinking;
844 u64 led_tpt; 933 u64 led_tpt;
845#endif 934#endif
846 935
936#ifdef CONFIG_IWLWIFI_LEDS
937 struct iwl_led led[IWL_LED_TRG_MAX];
938#endif
939
940#ifdef CONFIG_IWL3945_LEDS
941 struct iwl3945_led led39[IWL_LED_TRG_MAX];
942 unsigned int rxtxpackets;
943#endif
847 u16 active_rate; 944 u16 active_rate;
848 u16 active_rate_basic; 945 u16 active_rate_basic;
849 946
@@ -893,7 +990,6 @@ struct iwl_priv {
893 u16 rates_mask; 990 u16 rates_mask;
894 991
895 u32 power_mode; 992 u32 power_mode;
896 u32 antenna;
897 u8 bssid[ETH_ALEN]; 993 u8 bssid[ETH_ALEN];
898 u16 rts_threshold; 994 u16 rts_threshold;
899 u8 mac_addr[ETH_ALEN]; 995 u8 mac_addr[ETH_ALEN];
@@ -929,6 +1025,10 @@ struct iwl_priv {
929 u16 beacon_int; 1025 u16 beacon_int;
930 struct ieee80211_vif *vif; 1026 struct ieee80211_vif *vif;
931 1027
1028 /*Added for 3945 */
1029 void *shared_virt;
1030 dma_addr_t shared_phys;
1031 /*End*/
932 struct iwl_hw_params hw_params; 1032 struct iwl_hw_params hw_params;
933 1033
934 1034
@@ -960,6 +1060,11 @@ struct iwl_priv {
960 struct delayed_work init_alive_start; 1060 struct delayed_work init_alive_start;
961 struct delayed_work alive_start; 1061 struct delayed_work alive_start;
962 struct delayed_work scan_check; 1062 struct delayed_work scan_check;
1063
1064 /*For 3945 only*/
1065 struct delayed_work thermal_periodic;
1066 struct delayed_work rfkill_poll;
1067
963 /* TX Power */ 1068 /* TX Power */
964 s8 tx_power_user_lmt; 1069 s8 tx_power_user_lmt;
965 s8 tx_power_channel_lmt; 1070 s8 tx_power_channel_lmt;
@@ -982,6 +1087,15 @@ struct iwl_priv {
982 u32 disable_tx_power_cal; 1087 u32 disable_tx_power_cal;
983 struct work_struct run_time_calib_work; 1088 struct work_struct run_time_calib_work;
984 struct timer_list statistics_periodic; 1089 struct timer_list statistics_periodic;
1090
1091 /*For 3945*/
1092#define IWL_DEFAULT_TX_POWER 0x0F
1093
1094 struct iwl3945_notif_statistics statistics_39;
1095
1096 struct iwl3945_station_entry stations_39[IWL_STATION_COUNT];
1097
1098 u32 sta_supp_rates;
985}; /*iwl_priv */ 1099}; /*iwl_priv */
986 1100
987static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1101static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index ce2f47306ce..75517d05df0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -145,7 +145,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
145{ 145{
146 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 146 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
147 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 147 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
148 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 148 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
149 return -ENOENT; 149 return -ENOENT;
150 } 150 }
151 return 0; 151 return 0;
@@ -173,7 +173,7 @@ int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
173 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 173 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
174 EEPROM_SEM_TIMEOUT); 174 EEPROM_SEM_TIMEOUT);
175 if (ret >= 0) { 175 if (ret >= 0) {
176 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n", 176 IWL_DEBUG_IO(priv, "Acquired semaphore after %d tries.\n",
177 count+1); 177 count+1);
178 return ret; 178 return ret;
179 } 179 }
@@ -223,7 +223,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
223 223
224 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 224 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
225 if (ret < 0) { 225 if (ret < 0) {
226 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 226 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
227 ret = -ENOENT; 227 ret = -ENOENT;
228 goto err; 228 goto err;
229 } 229 }
@@ -231,7 +231,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
231 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 231 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
232 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); 232 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
233 if (ret < 0) { 233 if (ret < 0) {
234 IWL_ERROR("Failed to acquire EEPROM semaphore.\n"); 234 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
235 ret = -ENOENT; 235 ret = -ENOENT;
236 goto err; 236 goto err;
237 } 237 }
@@ -247,7 +247,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
247 CSR_EEPROM_REG_READ_VALID_MSK, 247 CSR_EEPROM_REG_READ_VALID_MSK,
248 IWL_EEPROM_ACCESS_TIMEOUT); 248 IWL_EEPROM_ACCESS_TIMEOUT);
249 if (ret < 0) { 249 if (ret < 0) {
250 IWL_ERROR("Time out reading EEPROM[%d]\n", addr); 250 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
251 goto done; 251 goto done;
252 } 252 }
253 r = _iwl_read_direct32(priv, CSR_EEPROM_REG); 253 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
@@ -285,7 +285,7 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
285 285
286 return 0; 286 return 0;
287err: 287err:
288 IWL_ERROR("Unsupported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", 288 IWL_ERR(priv, "Unsupported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
289 eeprom_ver, priv->cfg->eeprom_ver, 289 eeprom_ver, priv->cfg->eeprom_ver,
290 calib_ver, priv->cfg->eeprom_calib_ver); 290 calib_ver, priv->cfg->eeprom_calib_ver);
291 return -EINVAL; 291 return -EINVAL;
@@ -390,7 +390,7 @@ static int iwl_set_fat_chan_info(struct iwl_priv *priv,
390 if (!is_channel_valid(ch_info)) 390 if (!is_channel_valid(ch_info))
391 return -1; 391 return -1;
392 392
393 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 393 IWL_DEBUG_INFO(priv, "FAT Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
394 " Ad-Hoc %ssupported\n", 394 " Ad-Hoc %ssupported\n",
395 ch_info->channel, 395 ch_info->channel,
396 is_channel_a_band(ch_info) ? 396 is_channel_a_band(ch_info) ?
@@ -432,11 +432,11 @@ int iwl_init_channel_map(struct iwl_priv *priv)
432 struct iwl_channel_info *ch_info; 432 struct iwl_channel_info *ch_info;
433 433
434 if (priv->channel_count) { 434 if (priv->channel_count) {
435 IWL_DEBUG_INFO("Channel map already initialized.\n"); 435 IWL_DEBUG_INFO(priv, "Channel map already initialized.\n");
436 return 0; 436 return 0;
437 } 437 }
438 438
439 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); 439 IWL_DEBUG_INFO(priv, "Initializing regulatory info from EEPROM\n");
440 440
441 priv->channel_count = 441 priv->channel_count =
442 ARRAY_SIZE(iwl_eeprom_band_1) + 442 ARRAY_SIZE(iwl_eeprom_band_1) +
@@ -445,12 +445,12 @@ int iwl_init_channel_map(struct iwl_priv *priv)
445 ARRAY_SIZE(iwl_eeprom_band_4) + 445 ARRAY_SIZE(iwl_eeprom_band_4) +
446 ARRAY_SIZE(iwl_eeprom_band_5); 446 ARRAY_SIZE(iwl_eeprom_band_5);
447 447
448 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); 448 IWL_DEBUG_INFO(priv, "Parsing data for %d channels.\n", priv->channel_count);
449 449
450 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * 450 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
451 priv->channel_count, GFP_KERNEL); 451 priv->channel_count, GFP_KERNEL);
452 if (!priv->channel_info) { 452 if (!priv->channel_info) {
453 IWL_ERROR("Could not allocate channel_info\n"); 453 IWL_ERR(priv, "Could not allocate channel_info\n");
454 priv->channel_count = 0; 454 priv->channel_count = 0;
455 return -ENOMEM; 455 return -ENOMEM;
456 } 456 }
@@ -485,7 +485,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
485 IEEE80211_CHAN_NO_FAT_BELOW); 485 IEEE80211_CHAN_NO_FAT_BELOW);
486 486
487 if (!(is_channel_valid(ch_info))) { 487 if (!(is_channel_valid(ch_info))) {
488 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " 488 IWL_DEBUG_INFO(priv, "Ch. %d Flags %x [%sGHz] - "
489 "No traffic\n", 489 "No traffic\n",
490 ch_info->channel, 490 ch_info->channel,
491 ch_info->flags, 491 ch_info->flags,
@@ -501,7 +501,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
501 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 501 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
502 ch_info->min_power = 0; 502 ch_info->min_power = 0;
503 503
504 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):" 504 IWL_DEBUG_INFO(priv, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):"
505 " Ad-Hoc %ssupported\n", 505 " Ad-Hoc %ssupported\n",
506 ch_info->channel, 506 ch_info->channel,
507 is_channel_a_band(ch_info) ? 507 is_channel_a_band(ch_info) ?
@@ -520,7 +520,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
520 flags & EEPROM_CHANNEL_RADAR)) 520 flags & EEPROM_CHANNEL_RADAR))
521 ? "" : "not "); 521 ? "" : "not ");
522 522
523 /* Set the user_txpower_limit to the highest power 523 /* Set the tx_power_user_lmt to the highest power
524 * supported by any channel */ 524 * supported by any channel */
525 if (eeprom_ch_info[ch].max_power_avg > 525 if (eeprom_ch_info[ch].max_power_avg >
526 priv->tx_power_user_lmt) 526 priv->tx_power_user_lmt)
@@ -531,6 +531,13 @@ int iwl_init_channel_map(struct iwl_priv *priv)
531 } 531 }
532 } 532 }
533 533
534 /* Check if we do have FAT channels */
535 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
536 EEPROM_REGULATORY_BAND_NO_FAT &&
537 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
538 EEPROM_REGULATORY_BAND_NO_FAT)
539 return 0;
540
534 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */ 541 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
535 for (band = 6; band <= 7; band++) { 542 for (band = 6; band <= 7; band++) {
536 enum ieee80211_band ieeeband; 543 enum ieee80211_band ieeeband;
@@ -582,6 +589,7 @@ void iwl_free_channel_map(struct iwl_priv *priv)
582 kfree(priv->channel_info); 589 kfree(priv->channel_info);
583 priv->channel_count = 0; 590 priv->channel_count = 0;
584} 591}
592EXPORT_SYMBOL(iwl_free_channel_map);
585 593
586/** 594/**
587 * iwl_get_channel_info - Find driver's private channel info 595 * iwl_get_channel_info - Find driver's private channel info
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 603c84bed63..3479153d96c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -118,6 +118,9 @@ struct iwl_eeprom_channel {
118 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 118 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
119} __attribute__ ((packed)); 119} __attribute__ ((packed));
120 120
121/* 3945 Specific */
122#define EEPROM_3945_EEPROM_VERSION (0x2f)
123
121/* 4965 has two radio transmitters (and 3 radio receivers) */ 124/* 4965 has two radio transmitters (and 3 radio receivers) */
122#define EEPROM_TX_POWER_TX_CHAINS (2) 125#define EEPROM_TX_POWER_TX_CHAINS (2)
123 126
@@ -367,6 +370,8 @@ struct iwl_eeprom_calib_info {
367 */ 370 */
368#define EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */ 371#define EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
369 372
373#define EEPROM_REGULATORY_BAND_NO_FAT (0)
374
370struct iwl_eeprom_ops { 375struct iwl_eeprom_ops {
371 const u32 regulatory_bands[7]; 376 const u32 regulatory_bands[7];
372 int (*verify_signature) (struct iwl_priv *priv); 377 int (*verify_signature) (struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d7da1986455..65fa8a69fd5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -399,6 +399,21 @@
399 */ 399 */
400#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) 400#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
401 401
402#define RX_QUEUE_SIZE 256
403#define RX_QUEUE_MASK 255
404#define RX_QUEUE_SIZE_LOG 8
405
406/*
407 * RX related structures and functions
408 */
409#define RX_FREE_BUFFERS 64
410#define RX_LOW_WATERMARK 8
411
412/* Size of one Rx buffer in host DRAM */
413#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
414#define IWL_RX_BUF_SIZE_4K (4 * 1024)
415#define IWL_RX_BUF_SIZE_8K (8 * 1024)
416
402/** 417/**
403 * struct iwl_rb_status - reseve buffer status 418 * struct iwl_rb_status - reseve buffer status
404 * host memory mapped FH registers 419 * host memory mapped FH registers
@@ -414,6 +429,7 @@ struct iwl_rb_status {
414 __le16 closed_fr_num; 429 __le16 closed_fr_num;
415 __le16 finished_rb_num; 430 __le16 finished_rb_num;
416 __le16 finished_fr_nam; 431 __le16 finished_fr_nam;
432 __le32 __unused; /* 3945 only */
417} __attribute__ ((packed)); 433} __attribute__ ((packed));
418 434
419 435
@@ -477,7 +493,6 @@ struct iwl_tfd {
477 __le32 __pad; 493 __le32 __pad;
478} __attribute__ ((packed)); 494} __attribute__ ((packed));
479 495
480
481/* Keep Warm Size */ 496/* Keep Warm Size */
482#define IWL_KW_SIZE 0x1000 /* 4k */ 497#define IWL_KW_SIZE 0x1000 /* 4k */
483 498
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 4b35b30e493..17d61ac8ed6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -109,14 +109,14 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
109 struct iwl_rx_packet *pkt = NULL; 109 struct iwl_rx_packet *pkt = NULL;
110 110
111 if (!skb) { 111 if (!skb) {
112 IWL_ERROR("Error: Response NULL in %s.\n", 112 IWL_ERR(priv, "Error: Response NULL in %s.\n",
113 get_cmd_string(cmd->hdr.cmd)); 113 get_cmd_string(cmd->hdr.cmd));
114 return 1; 114 return 1;
115 } 115 }
116 116
117 pkt = (struct iwl_rx_packet *)skb->data; 117 pkt = (struct iwl_rx_packet *)skb->data;
118 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 118 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
119 IWL_ERROR("Bad return from %s (0x%08X)\n", 119 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
120 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 120 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
121 return 1; 121 return 1;
122 } 122 }
@@ -125,11 +125,11 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
125 switch (cmd->hdr.cmd) { 125 switch (cmd->hdr.cmd) {
126 case REPLY_TX_LINK_QUALITY_CMD: 126 case REPLY_TX_LINK_QUALITY_CMD:
127 case SENSITIVITY_CMD: 127 case SENSITIVITY_CMD:
128 IWL_DEBUG_HC_DUMP("back from %s (0x%08X)\n", 128 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
129 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 129 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
130 break; 130 break;
131 default: 131 default:
132 IWL_DEBUG_HC("back from %s (0x%08X)\n", 132 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
133 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 133 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
134 } 134 }
135#endif 135#endif
@@ -156,7 +156,7 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
156 156
157 ret = iwl_enqueue_hcmd(priv, cmd); 157 ret = iwl_enqueue_hcmd(priv, cmd);
158 if (ret < 0) { 158 if (ret < 0) {
159 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 159 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
160 get_cmd_string(cmd->id), ret); 160 get_cmd_string(cmd->id), ret);
161 return ret; 161 return ret;
162 } 162 }
@@ -174,8 +174,9 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
174 BUG_ON(cmd->meta.u.callback != NULL); 174 BUG_ON(cmd->meta.u.callback != NULL);
175 175
176 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) { 176 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
177 IWL_ERROR("Error sending %s: Already sending a host command\n", 177 IWL_ERR(priv,
178 get_cmd_string(cmd->id)); 178 "Error sending %s: Already sending a host command\n",
179 get_cmd_string(cmd->id));
179 ret = -EBUSY; 180 ret = -EBUSY;
180 goto out; 181 goto out;
181 } 182 }
@@ -188,7 +189,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
188 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 189 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
189 if (cmd_idx < 0) { 190 if (cmd_idx < 0) {
190 ret = cmd_idx; 191 ret = cmd_idx;
191 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 192 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
192 get_cmd_string(cmd->id), ret); 193 get_cmd_string(cmd->id), ret);
193 goto out; 194 goto out;
194 } 195 }
@@ -198,9 +199,10 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
198 HOST_COMPLETE_TIMEOUT); 199 HOST_COMPLETE_TIMEOUT);
199 if (!ret) { 200 if (!ret) {
200 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { 201 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
201 IWL_ERROR("Error sending %s: time out after %dms.\n", 202 IWL_ERR(priv,
202 get_cmd_string(cmd->id), 203 "Error sending %s: time out after %dms.\n",
203 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 204 get_cmd_string(cmd->id),
205 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
204 206
205 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 207 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
206 ret = -ETIMEDOUT; 208 ret = -ETIMEDOUT;
@@ -209,19 +211,19 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
209 } 211 }
210 212
211 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 213 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
212 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", 214 IWL_DEBUG_INFO(priv, "Command %s aborted: RF KILL Switch\n",
213 get_cmd_string(cmd->id)); 215 get_cmd_string(cmd->id));
214 ret = -ECANCELED; 216 ret = -ECANCELED;
215 goto fail; 217 goto fail;
216 } 218 }
217 if (test_bit(STATUS_FW_ERROR, &priv->status)) { 219 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
218 IWL_DEBUG_INFO("Command %s failed: FW Error\n", 220 IWL_DEBUG_INFO(priv, "Command %s failed: FW Error\n",
219 get_cmd_string(cmd->id)); 221 get_cmd_string(cmd->id));
220 ret = -EIO; 222 ret = -EIO;
221 goto fail; 223 goto fail;
222 } 224 }
223 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { 225 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
224 IWL_ERROR("Error: Response NULL in '%s'\n", 226 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
225 get_cmd_string(cmd->id)); 227 get_cmd_string(cmd->id));
226 ret = -EIO; 228 ret = -EIO;
227 goto cancel; 229 goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index ca4f638ab9d..fb64d297dd4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 0a92e7431ad..c7b8e5bb4e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -66,7 +66,7 @@
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, 66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val) 67 u32 ofs, u32 val)
68{ 68{
69 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); 69 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
70 _iwl_write32(priv, ofs, val); 70 _iwl_write32(priv, ofs, val);
71} 71}
72#define iwl_write32(priv, ofs, val) \ 72#define iwl_write32(priv, ofs, val) \
@@ -79,7 +79,7 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
79#ifdef CONFIG_IWLWIFI_DEBUG 79#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{ 81{
82 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); 82 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
83 return _iwl_read32(priv, ofs); 83 return _iwl_read32(priv, ofs);
84} 84}
85#define iwl_read32(priv, ofs) __iwl_read32(__FILE__, __LINE__, priv, ofs) 85#define iwl_read32(priv, ofs) __iwl_read32(__FILE__, __LINE__, priv, ofs)
@@ -108,7 +108,7 @@ static inline int __iwl_poll_bit(const char *f, u32 l,
108 u32 bits, u32 mask, int timeout) 108 u32 bits, u32 mask, int timeout)
109{ 109{
110 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout); 110 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout);
111 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", 111 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
112 addr, bits, mask, 112 addr, bits, mask,
113 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); 113 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
114 return ret; 114 return ret;
@@ -128,7 +128,7 @@ static inline void __iwl_set_bit(const char *f, u32 l,
128 struct iwl_priv *priv, u32 reg, u32 mask) 128 struct iwl_priv *priv, u32 reg, u32 mask)
129{ 129{
130 u32 val = _iwl_read32(priv, reg) | mask; 130 u32 val = _iwl_read32(priv, reg) | mask;
131 IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); 131 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
132 _iwl_write32(priv, reg, val); 132 _iwl_write32(priv, reg, val);
133} 133}
134#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m) 134#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m)
@@ -145,7 +145,7 @@ static inline void __iwl_clear_bit(const char *f, u32 l,
145 struct iwl_priv *priv, u32 reg, u32 mask) 145 struct iwl_priv *priv, u32 reg, u32 mask)
146{ 146{
147 u32 val = _iwl_read32(priv, reg) & ~mask; 147 u32 val = _iwl_read32(priv, reg) & ~mask;
148 IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); 148 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
149 _iwl_write32(priv, reg, val); 149 _iwl_write32(priv, reg, val);
150} 150}
151#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m) 151#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m)
@@ -165,9 +165,9 @@ static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
165 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL, 165 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
166 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 166 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
167 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 167 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
168 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50); 168 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
169 if (ret < 0) { 169 if (ret < 0) {
170 IWL_ERROR("MAC is in deep sleep!\n"); 170 IWL_ERR(priv, "MAC is in deep sleep!\n");
171 return -EIO; 171 return -EIO;
172 } 172 }
173 173
@@ -182,9 +182,9 @@ static inline int __iwl_grab_nic_access(const char *f, u32 l,
182 struct iwl_priv *priv) 182 struct iwl_priv *priv)
183{ 183{
184 if (atomic_read(&priv->restrict_refcnt)) 184 if (atomic_read(&priv->restrict_refcnt))
185 IWL_ERROR("Grabbing access while already held %s %d.\n", f, l); 185 IWL_ERR(priv, "Grabbing access while already held %s %d.\n", f, l);
186 186
187 IWL_DEBUG_IO("grabbing nic access - %s %d\n", f, l); 187 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
188 return _iwl_grab_nic_access(priv); 188 return _iwl_grab_nic_access(priv);
189} 189}
190#define iwl_grab_nic_access(priv) \ 190#define iwl_grab_nic_access(priv) \
@@ -207,9 +207,9 @@ static inline void __iwl_release_nic_access(const char *f, u32 l,
207 struct iwl_priv *priv) 207 struct iwl_priv *priv)
208{ 208{
209 if (atomic_read(&priv->restrict_refcnt) <= 0) 209 if (atomic_read(&priv->restrict_refcnt) <= 0)
210 IWL_ERROR("Release unheld nic access at line %s %d.\n", f, l); 210 IWL_ERR(priv, "Release unheld nic access at line %s %d.\n", f, l);
211 211
212 IWL_DEBUG_IO("releasing nic access - %s %d\n", f, l); 212 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
213 _iwl_release_nic_access(priv); 213 _iwl_release_nic_access(priv);
214} 214}
215#define iwl_release_nic_access(priv) \ 215#define iwl_release_nic_access(priv) \
@@ -229,8 +229,8 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
229{ 229{
230 u32 value = _iwl_read_direct32(priv, reg); 230 u32 value = _iwl_read_direct32(priv, reg);
231 if (!atomic_read(&priv->restrict_refcnt)) 231 if (!atomic_read(&priv->restrict_refcnt))
232 IWL_ERROR("Nic access not held from %s %d\n", f, l); 232 IWL_ERR(priv, "Nic access not held from %s %d\n", f, l);
233 IWL_DEBUG_IO("read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value, 233 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
234 f, l); 234 f, l);
235 return value; 235 return value;
236} 236}
@@ -250,7 +250,7 @@ static void __iwl_write_direct32(const char *f , u32 line,
250 struct iwl_priv *priv, u32 reg, u32 value) 250 struct iwl_priv *priv, u32 reg, u32 value)
251{ 251{
252 if (!atomic_read(&priv->restrict_refcnt)) 252 if (!atomic_read(&priv->restrict_refcnt))
253 IWL_ERROR("Nic access not held from %s line %d\n", f, line); 253 IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
254 _iwl_write_direct32(priv, reg, value); 254 _iwl_write_direct32(priv, reg, value);
255} 255}
256#define iwl_write_direct32(priv, reg, value) \ 256#define iwl_write_direct32(priv, reg, value) \
@@ -284,10 +284,10 @@ static inline int __iwl_poll_direct_bit(const char *f, u32 l,
284 int ret = _iwl_poll_direct_bit(priv, addr, mask, timeout); 284 int ret = _iwl_poll_direct_bit(priv, addr, mask, timeout);
285 285
286 if (unlikely(ret == -ETIMEDOUT)) 286 if (unlikely(ret == -ETIMEDOUT))
287 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) - " 287 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
288 "timedout - %s %d\n", addr, mask, f, l); 288 "timedout - %s %d\n", addr, mask, f, l);
289 else 289 else
290 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " 290 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
291 "- %s %d\n", addr, mask, ret, f, l); 291 "- %s %d\n", addr, mask, ret, f, l);
292 return ret; 292 return ret;
293} 293}
@@ -308,7 +308,7 @@ static inline u32 __iwl_read_prph(const char *f, u32 line,
308 struct iwl_priv *priv, u32 reg) 308 struct iwl_priv *priv, u32 reg)
309{ 309{
310 if (!atomic_read(&priv->restrict_refcnt)) 310 if (!atomic_read(&priv->restrict_refcnt))
311 IWL_ERROR("Nic access not held from %s line %d\n", f, line); 311 IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
312 return _iwl_read_prph(priv, reg); 312 return _iwl_read_prph(priv, reg);
313} 313}
314 314
@@ -331,7 +331,7 @@ static inline void __iwl_write_prph(const char *f, u32 line,
331 struct iwl_priv *priv, u32 addr, u32 val) 331 struct iwl_priv *priv, u32 addr, u32 val)
332{ 332{
333 if (!atomic_read(&priv->restrict_refcnt)) 333 if (!atomic_read(&priv->restrict_refcnt))
334 IWL_ERROR("Nic access not held from %s line %d\n", f, line); 334 IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
335 _iwl_write_prph(priv, addr, val); 335 _iwl_write_prph(priv, addr, val);
336} 336}
337 337
@@ -349,7 +349,7 @@ static inline void __iwl_set_bits_prph(const char *f, u32 line,
349 u32 reg, u32 mask) 349 u32 reg, u32 mask)
350{ 350{
351 if (!atomic_read(&priv->restrict_refcnt)) 351 if (!atomic_read(&priv->restrict_refcnt))
352 IWL_ERROR("Nic access not held from %s line %d\n", f, line); 352 IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
353 353
354 _iwl_set_bits_prph(priv, reg, mask); 354 _iwl_set_bits_prph(priv, reg, mask);
355} 355}
@@ -367,7 +367,7 @@ static inline void __iwl_set_bits_mask_prph(const char *f, u32 line,
367 struct iwl_priv *priv, u32 reg, u32 bits, u32 mask) 367 struct iwl_priv *priv, u32 reg, u32 bits, u32 mask)
368{ 368{
369 if (!atomic_read(&priv->restrict_refcnt)) 369 if (!atomic_read(&priv->restrict_refcnt))
370 IWL_ERROR("Nic access not held from %s line %d\n", f, line); 370 IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
371 _iwl_set_bits_mask_prph(priv, reg, bits, mask); 371 _iwl_set_bits_mask_prph(priv, reg, bits, mask);
372} 372}
373#define iwl_set_bits_mask_prph(priv, reg, bits, mask) \ 373#define iwl_set_bits_mask_prph(priv, reg, bits, mask) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 11eccd7d268..19680f72087 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +123,7 @@ static int iwl4965_led_pattern(struct iwl_priv *priv, int led_id,
123/* Set led register off */ 123/* Set led register off */
124static int iwl4965_led_on_reg(struct iwl_priv *priv, int led_id) 124static int iwl4965_led_on_reg(struct iwl_priv *priv, int led_id)
125{ 125{
126 IWL_DEBUG_LED("led on %d\n", led_id); 126 IWL_DEBUG_LED(priv, "led on %d\n", led_id);
127 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); 127 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
128 return 0; 128 return 0;
129} 129}
@@ -150,7 +150,7 @@ int iwl4965_led_off(struct iwl_priv *priv, int led_id)
150 .off = 0, 150 .off = 0,
151 .interval = IWL_DEF_LED_INTRVL 151 .interval = IWL_DEF_LED_INTRVL
152 }; 152 };
153 IWL_DEBUG_LED("led off %d\n", led_id); 153 IWL_DEBUG_LED(priv, "led off %d\n", led_id);
154 return iwl_send_led_cmd(priv, &led_cmd); 154 return iwl_send_led_cmd(priv, &led_cmd);
155} 155}
156#endif 156#endif
@@ -159,7 +159,7 @@ int iwl4965_led_off(struct iwl_priv *priv, int led_id)
159/* Set led register off */ 159/* Set led register off */
160static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id) 160static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
161{ 161{
162 IWL_DEBUG_LED("LED Reg off\n"); 162 IWL_DEBUG_LED(priv, "LED Reg off\n");
163 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); 163 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
164 return 0; 164 return 0;
165} 165}
@@ -169,7 +169,7 @@ static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
169 */ 169 */
170static int iwl_led_associate(struct iwl_priv *priv, int led_id) 170static int iwl_led_associate(struct iwl_priv *priv, int led_id)
171{ 171{
172 IWL_DEBUG_LED("Associated\n"); 172 IWL_DEBUG_LED(priv, "Associated\n");
173 priv->allow_blinking = 1; 173 priv->allow_blinking = 1;
174 return iwl4965_led_on_reg(priv, led_id); 174 return iwl4965_led_on_reg(priv, led_id);
175} 175}
@@ -213,7 +213,7 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
213 return; 213 return;
214 214
215 215
216 IWL_DEBUG_LED("Led type = %s brightness = %d\n", 216 IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
217 led_type_str[led->type], brightness); 217 led_type_str[led->type], brightness);
218 switch (brightness) { 218 switch (brightness) {
219 case LED_FULL: 219 case LED_FULL:
@@ -254,7 +254,7 @@ static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
254 254
255 ret = led_classdev_register(device, &led->led_dev); 255 ret = led_classdev_register(device, &led->led_dev);
256 if (ret) { 256 if (ret) {
257 IWL_ERROR("Error: failed to register led handler.\n"); 257 IWL_ERR(priv, "Error: failed to register led handler.\n");
258 return ret; 258 return ret;
259 } 259 }
260 260
@@ -280,7 +280,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
280 if (tpt < 0) /* wraparound */ 280 if (tpt < 0) /* wraparound */
281 tpt = -tpt; 281 tpt = -tpt;
282 282
283 IWL_DEBUG_LED("tpt %lld current_tpt %llu\n", 283 IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
284 (long long)tpt, 284 (long long)tpt,
285 (unsigned long long)current_tpt); 285 (unsigned long long)current_tpt);
286 priv->led_tpt = current_tpt; 286 priv->led_tpt = current_tpt;
@@ -292,7 +292,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
292 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) 292 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
293 break; 293 break;
294 294
295 IWL_DEBUG_LED("LED BLINK IDX=%d\n", i); 295 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
296 return i; 296 return i;
297} 297}
298 298
@@ -352,7 +352,7 @@ int iwl_leds_register(struct iwl_priv *priv)
352 352
353 trigger = ieee80211_get_radio_led_name(priv->hw); 353 trigger = ieee80211_get_radio_led_name(priv->hw);
354 snprintf(priv->led[IWL_LED_TRG_RADIO].name, 354 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
355 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio", 355 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
356 wiphy_name(priv->hw->wiphy)); 356 wiphy_name(priv->hw->wiphy));
357 357
358 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg; 358 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
@@ -366,7 +366,7 @@ int iwl_leds_register(struct iwl_priv *priv)
366 366
367 trigger = ieee80211_get_assoc_led_name(priv->hw); 367 trigger = ieee80211_get_assoc_led_name(priv->hw);
368 snprintf(priv->led[IWL_LED_TRG_ASSOC].name, 368 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
369 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc", 369 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
370 wiphy_name(priv->hw->wiphy)); 370 wiphy_name(priv->hw->wiphy));
371 371
372 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC], 372 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
@@ -382,7 +382,7 @@ int iwl_leds_register(struct iwl_priv *priv)
382 382
383 trigger = ieee80211_get_rx_led_name(priv->hw); 383 trigger = ieee80211_get_rx_led_name(priv->hw);
384 snprintf(priv->led[IWL_LED_TRG_RX].name, 384 snprintf(priv->led[IWL_LED_TRG_RX].name,
385 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX", 385 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
386 wiphy_name(priv->hw->wiphy)); 386 wiphy_name(priv->hw->wiphy));
387 387
388 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX], 388 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
@@ -397,7 +397,7 @@ int iwl_leds_register(struct iwl_priv *priv)
397 397
398 trigger = ieee80211_get_tx_led_name(priv->hw); 398 trigger = ieee80211_get_tx_led_name(priv->hw);
399 snprintf(priv->led[IWL_LED_TRG_TX].name, 399 snprintf(priv->led[IWL_LED_TRG_TX].name,
400 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX", 400 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
401 wiphy_name(priv->hw->wiphy)); 401 wiphy_name(priv->hw->wiphy));
402 402
403 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX], 403 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 021e00bcd1b..140fd8fa485 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -30,12 +30,12 @@
30 30
31struct iwl_priv; 31struct iwl_priv;
32 32
33#ifdef CONFIG_IWLWIFI_LEDS 33#if defined(CONFIG_IWLWIFI_LEDS) || defined(CONFIG_IWL3945_LEDS)
34#include <linux/leds.h> 34#include <linux/leds.h>
35 35
36#define IWL_LED_SOLID 11 36#define IWL_LED_SOLID 11
37#define IWL_LED_NAME_LEN 31 37#define IWL_LED_NAME_LEN 31
38#define IWL_DEF_LED_INTRVL __constant_cpu_to_le32(1000) 38#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
39 39
40#define IWL_LED_ACTIVITY (0<<1) 40#define IWL_LED_ACTIVITY (0<<1)
41#define IWL_LED_LINK (1<<1) 41#define IWL_LED_LINK (1<<1)
@@ -47,7 +47,9 @@ enum led_type {
47 IWL_LED_TRG_RADIO, 47 IWL_LED_TRG_RADIO,
48 IWL_LED_TRG_MAX, 48 IWL_LED_TRG_MAX,
49}; 49};
50#endif
50 51
52#ifdef CONFIG_IWLWIFI_LEDS
51 53
52struct iwl_led { 54struct iwl_led {
53 struct iwl_priv *priv; 55 struct iwl_priv *priv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 75ca6a54217..18b7e4195ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -60,14 +60,6 @@
60#define IWL_POWER_RANGE_1_MAX (10) 60#define IWL_POWER_RANGE_1_MAX (10)
61 61
62 62
63#define NOSLP __constant_cpu_to_le16(0), 0, 0
64#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
65#define SLP_TOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
66#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
67 __constant_cpu_to_le32(X1), \
68 __constant_cpu_to_le32(X2), \
69 __constant_cpu_to_le32(X3), \
70 __constant_cpu_to_le32(X4)}
71 63
72#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5 64#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5
73#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM 65#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM
@@ -110,6 +102,7 @@ static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
110 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 102 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
111}; 103};
112 104
105
113/* set card power command */ 106/* set card power command */
114static int iwl_set_power(struct iwl_priv *priv, void *cmd) 107static int iwl_set_power(struct iwl_priv *priv, void *cmd)
115{ 108{
@@ -134,13 +127,6 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
134 else 127 else
135 mode = IWL_POWER_ON_AC_DISASSOC; 128 mode = IWL_POWER_ON_AC_DISASSOC;
136 break; 129 break;
137 /* FIXME: remove battery and ac from here */
138 case IWL_POWER_BATTERY:
139 mode = IWL_POWER_INDEX_3;
140 break;
141 case IWL_POWER_AC:
142 mode = IWL_POWER_MODE_CAM;
143 break;
144 default: 130 default:
145 mode = priv->power_data.user_power_setting; 131 mode = priv->power_data.user_power_setting;
146 break; 132 break;
@@ -149,17 +135,17 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
149} 135}
150 136
151/* initialize to default */ 137/* initialize to default */
152static int iwl_power_init_handle(struct iwl_priv *priv) 138static void iwl_power_init_handle(struct iwl_priv *priv)
153{ 139{
154 struct iwl_power_mgr *pow_data; 140 struct iwl_power_mgr *pow_data;
155 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX; 141 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
156 struct iwl_powertable_cmd *cmd; 142 struct iwl_powertable_cmd *cmd;
157 int i; 143 int i;
158 u16 pci_pm; 144 u16 lctl;
159 145
160 IWL_DEBUG_POWER("Initialize power \n"); 146 IWL_DEBUG_POWER(priv, "Initialize power \n");
161 147
162 pow_data = &(priv->power_data); 148 pow_data = &priv->power_data;
163 149
164 memset(pow_data, 0, sizeof(*pow_data)); 150 memset(pow_data, 0, sizeof(*pow_data));
165 151
@@ -167,38 +153,37 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
167 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); 153 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
168 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size); 154 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
169 155
170 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &pci_pm); 156 lctl = iwl_pcie_link_ctl(priv);
171 157
172 IWL_DEBUG_POWER("adjust power command flags\n"); 158 IWL_DEBUG_POWER(priv, "adjust power command flags\n");
173 159
174 for (i = 0; i < IWL_POWER_MAX; i++) { 160 for (i = 0; i < IWL_POWER_MAX; i++) {
175 cmd = &pow_data->pwr_range_0[i].cmd; 161 cmd = &pow_data->pwr_range_0[i].cmd;
176 162
177 if (pci_pm & PCI_CFG_LINK_CTRL_VAL_L0S_EN) 163 if (lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN)
178 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 164 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
179 else 165 else
180 cmd->flags |= IWL_POWER_PCI_PM_MSK; 166 cmd->flags |= IWL_POWER_PCI_PM_MSK;
181 } 167 }
182 return 0;
183} 168}
184 169
185/* adjust power command according to DTIM period and power level*/ 170/* adjust power command according to DTIM period and power level*/
186static int iwl_update_power_command(struct iwl_priv *priv, 171static int iwl_update_power_cmd(struct iwl_priv *priv,
187 struct iwl_powertable_cmd *cmd, 172 struct iwl_powertable_cmd *cmd, u16 mode)
188 u16 mode)
189{ 173{
190 int ret = 0, i;
191 u8 skip;
192 u32 max_sleep = 0;
193 struct iwl_power_vec_entry *range; 174 struct iwl_power_vec_entry *range;
194 u8 period = 0;
195 struct iwl_power_mgr *pow_data; 175 struct iwl_power_mgr *pow_data;
176 int i;
177 u32 max_sleep = 0;
178 u8 period;
179 bool skip;
196 180
197 if (mode > IWL_POWER_INDEX_5) { 181 if (mode > IWL_POWER_INDEX_5) {
198 IWL_DEBUG_POWER("Error invalid power mode \n"); 182 IWL_DEBUG_POWER(priv, "Error invalid power mode \n");
199 return -1; 183 return -EINVAL;
200 } 184 }
201 pow_data = &(priv->power_data); 185
186 pow_data = &priv->power_data;
202 187
203 if (pow_data->dtim_period <= IWL_POWER_RANGE_0_MAX) 188 if (pow_data->dtim_period <= IWL_POWER_RANGE_0_MAX)
204 range = &pow_data->pwr_range_0[0]; 189 range = &pow_data->pwr_range_0[0];
@@ -212,14 +197,12 @@ static int iwl_update_power_command(struct iwl_priv *priv,
212 197
213 if (period == 0) { 198 if (period == 0) {
214 period = 1; 199 period = 1;
215 skip = 0; 200 skip = false;
216 } else
217 skip = range[mode].no_dtim;
218
219 if (skip == 0) {
220 max_sleep = period;
221 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
222 } else { 201 } else {
202 skip = !!range[mode].no_dtim;
203 }
204
205 if (skip) {
223 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; 206 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
224 max_sleep = le32_to_cpu(slp_itrvl); 207 max_sleep = le32_to_cpu(slp_itrvl);
225 if (max_sleep == 0xFF) 208 if (max_sleep == 0xFF)
@@ -227,24 +210,26 @@ static int iwl_update_power_command(struct iwl_priv *priv,
227 else if (max_sleep > period) 210 else if (max_sleep > period)
228 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; 211 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
229 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; 212 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
213 } else {
214 max_sleep = period;
215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
230 } 216 }
231 217
232 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { 218 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
233 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 219 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
234 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 220 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
235 }
236 221
237 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); 222 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
238 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 223 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
239 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 224 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
240 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 225 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
241 le32_to_cpu(cmd->sleep_interval[0]), 226 le32_to_cpu(cmd->sleep_interval[0]),
242 le32_to_cpu(cmd->sleep_interval[1]), 227 le32_to_cpu(cmd->sleep_interval[1]),
243 le32_to_cpu(cmd->sleep_interval[2]), 228 le32_to_cpu(cmd->sleep_interval[2]),
244 le32_to_cpu(cmd->sleep_interval[3]), 229 le32_to_cpu(cmd->sleep_interval[3]),
245 le32_to_cpu(cmd->sleep_interval[4])); 230 le32_to_cpu(cmd->sleep_interval[4]));
246 231
247 return ret; 232 return 0;
248} 233}
249 234
250 235
@@ -295,7 +280,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
295 if (final_mode != IWL_POWER_MODE_CAM) 280 if (final_mode != IWL_POWER_MODE_CAM)
296 set_bit(STATUS_POWER_PMI, &priv->status); 281 set_bit(STATUS_POWER_PMI, &priv->status);
297 282
298 iwl_update_power_command(priv, &cmd, final_mode); 283 iwl_update_power_cmd(priv, &cmd, final_mode);
299 cmd.keep_alive_beacons = 0; 284 cmd.keep_alive_beacons = 0;
300 285
301 if (final_mode == IWL_POWER_INDEX_5) 286 if (final_mode == IWL_POWER_INDEX_5)
@@ -311,7 +296,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
311 if (priv->cfg->ops->lib->update_chain_flags && update_chains) 296 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
312 priv->cfg->ops->lib->update_chain_flags(priv); 297 priv->cfg->ops->lib->update_chain_flags(priv);
313 else 298 else
314 IWL_DEBUG_POWER("Cannot update the power, chain noise " 299 IWL_DEBUG_POWER(priv, "Cannot update the power, chain noise "
315 "calibration running: %d\n", 300 "calibration running: %d\n",
316 priv->chain_noise_data.state); 301 priv->chain_noise_data.state);
317 if (!ret) 302 if (!ret)
@@ -366,7 +351,7 @@ EXPORT_SYMBOL(iwl_power_enable_management);
366/* set user_power_setting */ 351/* set user_power_setting */
367int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode) 352int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
368{ 353{
369 if (mode > IWL_POWER_LIMIT) 354 if (mode > IWL_POWER_MAX)
370 return -EINVAL; 355 return -EINVAL;
371 356
372 priv->power_data.user_power_setting = mode; 357 priv->power_data.user_power_setting = mode;
@@ -380,11 +365,10 @@ EXPORT_SYMBOL(iwl_power_set_user_mode);
380 */ 365 */
381int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode) 366int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
382{ 367{
383 if (mode > IWL_POWER_LIMIT) 368 if (mode < IWL_POWER_SYS_MAX)
369 priv->power_data.system_power_setting = mode;
370 else
384 return -EINVAL; 371 return -EINVAL;
385
386 priv->power_data.system_power_setting = mode;
387
388 return iwl_power_update_mode(priv, 0); 372 return iwl_power_update_mode(priv, 0);
389} 373}
390EXPORT_SYMBOL(iwl_power_set_system_mode); 374EXPORT_SYMBOL(iwl_power_set_system_mode);
@@ -392,13 +376,11 @@ EXPORT_SYMBOL(iwl_power_set_system_mode);
392/* initialize to default */ 376/* initialize to default */
393void iwl_power_initialize(struct iwl_priv *priv) 377void iwl_power_initialize(struct iwl_priv *priv)
394{ 378{
395
396 iwl_power_init_handle(priv); 379 iwl_power_init_handle(priv);
397 priv->power_data.user_power_setting = IWL_POWER_AUTO; 380 priv->power_data.user_power_setting = IWL_POWER_AUTO;
398 priv->power_data.power_disabled = 0;
399 priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO; 381 priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO;
400 priv->power_data.is_battery_active = 0;
401 priv->power_data.power_disabled = 0; 382 priv->power_data.power_disabled = 0;
383 priv->power_data.is_battery_active = 0;
402 priv->power_data.critical_power_setting = 0; 384 priv->power_data.critical_power_setting = 0;
403} 385}
404EXPORT_SYMBOL(iwl_power_initialize); 386EXPORT_SYMBOL(iwl_power_initialize);
@@ -407,8 +389,8 @@ EXPORT_SYMBOL(iwl_power_initialize);
407int iwl_power_temperature_change(struct iwl_priv *priv) 389int iwl_power_temperature_change(struct iwl_priv *priv)
408{ 390{
409 int ret = 0; 391 int ret = 0;
410 u16 new_critical = priv->power_data.critical_power_setting;
411 s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature); 392 s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature);
393 u16 new_critical = priv->power_data.critical_power_setting;
412 394
413 if (temperature > IWL_CT_KILL_TEMPERATURE) 395 if (temperature > IWL_CT_KILL_TEMPERATURE)
414 return 0; 396 return 0;
@@ -434,7 +416,7 @@ static void iwl_bg_set_power_save(struct work_struct *work)
434{ 416{
435 struct iwl_priv *priv = container_of(work, 417 struct iwl_priv *priv = container_of(work,
436 struct iwl_priv, set_power_save.work); 418 struct iwl_priv, set_power_save.work);
437 IWL_DEBUG(IWL_DL_STATE, "update power\n"); 419 IWL_DEBUG_POWER(priv, "update power\n");
438 420
439 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 421 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
440 return; 422 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index fa098d8975c..18963392121 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -42,22 +42,26 @@ enum {
42 IWL_POWER_INDEX_5, 42 IWL_POWER_INDEX_5,
43 IWL_POWER_AUTO, 43 IWL_POWER_AUTO,
44 IWL_POWER_MAX = IWL_POWER_AUTO, 44 IWL_POWER_MAX = IWL_POWER_AUTO,
45 IWL_POWER_AC,
46 IWL_POWER_BATTERY,
47}; 45};
48 46
49enum { 47enum {
50 IWL_POWER_SYS_AUTO, 48 IWL_POWER_SYS_AUTO,
51 IWL_POWER_SYS_AC, 49 IWL_POWER_SYS_AC,
52 IWL_POWER_SYS_BATTERY, 50 IWL_POWER_SYS_BATTERY,
51 IWL_POWER_SYS_MAX,
53}; 52};
54 53
55#define IWL_POWER_LIMIT 0x08
56#define IWL_POWER_MASK 0x0F
57#define IWL_POWER_ENABLED 0x10
58 54
59/* Power management (not Tx power) structures */ 55/* Power management (not Tx power) structures */
60 56
57#define NOSLP cpu_to_le16(0), 0, 0
58#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
59#define SLP_TOUT(T) cpu_to_le32((T) * MSEC_TO_USEC)
60#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
61 cpu_to_le32(X1), \
62 cpu_to_le32(X2), \
63 cpu_to_le32(X3), \
64 cpu_to_le32(X4)}
61struct iwl_power_vec_entry { 65struct iwl_power_vec_entry {
62 struct iwl_powertable_cmd cmd; 66 struct iwl_powertable_cmd cmd;
63 u8 no_dtim; 67 u8 no_dtim;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index b7a5f23351c..3b9cac3fd21 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index 4b69da30665..2ad9faf1508 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -47,7 +47,7 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
47 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 47 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
48 return 0; 48 return 0;
49 49
50 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state); 50 IWL_DEBUG_RF_KILL(priv, "we received soft RFKILL set to state %d\n", state);
51 mutex_lock(&priv->mutex); 51 mutex_lock(&priv->mutex);
52 52
53 switch (state) { 53 switch (state) {
@@ -62,7 +62,8 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
62 iwl_radio_kill_sw_disable_radio(priv); 62 iwl_radio_kill_sw_disable_radio(priv);
63 break; 63 break;
64 default: 64 default:
65 IWL_WARNING("we received unexpected RFKILL state %d\n", state); 65 IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
66 state);
66 break; 67 break;
67 } 68 }
68out_unlock: 69out_unlock:
@@ -78,10 +79,10 @@ int iwl_rfkill_init(struct iwl_priv *priv)
78 79
79 BUG_ON(device == NULL); 80 BUG_ON(device == NULL);
80 81
81 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n"); 82 IWL_DEBUG_RF_KILL(priv, "Initializing RFKILL.\n");
82 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN); 83 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
83 if (!priv->rfkill) { 84 if (!priv->rfkill) {
84 IWL_ERROR("Unable to allocate RFKILL device.\n"); 85 IWL_ERR(priv, "Unable to allocate RFKILL device.\n");
85 ret = -ENOMEM; 86 ret = -ENOMEM;
86 goto error; 87 goto error;
87 } 88 }
@@ -97,11 +98,11 @@ int iwl_rfkill_init(struct iwl_priv *priv)
97 98
98 ret = rfkill_register(priv->rfkill); 99 ret = rfkill_register(priv->rfkill);
99 if (ret) { 100 if (ret) {
100 IWL_ERROR("Unable to register RFKILL: %d\n", ret); 101 IWL_ERR(priv, "Unable to register RFKILL: %d\n", ret);
101 goto free_rfkill; 102 goto free_rfkill;
102 } 103 }
103 104
104 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n"); 105 IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
105 return ret; 106 return ret;
106 107
107free_rfkill: 108free_rfkill:
@@ -110,7 +111,7 @@ free_rfkill:
110 priv->rfkill = NULL; 111 priv->rfkill = NULL;
111 112
112error: 113error:
113 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n"); 114 IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
114 return ret; 115 return ret;
115} 116}
116EXPORT_SYMBOL(iwl_rfkill_init); 117EXPORT_SYMBOL(iwl_rfkill_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
index 86dc055a2e9..633dafb4bf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.h
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index c5f1aa0feac..8f65908f66f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -125,9 +125,10 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
125 */ 125 */
126int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) 126int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
127{ 127{
128 u32 reg = 0;
129 int ret = 0;
130 unsigned long flags; 128 unsigned long flags;
129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
130 u32 reg;
131 int ret = 0;
131 132
132 spin_lock_irqsave(&q->lock, flags); 133 spin_lock_irqsave(&q->lock, flags);
133 134
@@ -149,15 +150,14 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
149 goto exit_unlock; 150 goto exit_unlock;
150 151
151 /* Device expects a multiple of 8 */ 152 /* Device expects a multiple of 8 */
152 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 153 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
153 q->write & ~0x7);
154 iwl_release_nic_access(priv); 154 iwl_release_nic_access(priv);
155 155
156 /* Else device is assumed to be awake */ 156 /* Else device is assumed to be awake */
157 } else 157 } else {
158 /* Device expects a multiple of 8 */ 158 /* Device expects a multiple of 8 */
159 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); 159 iwl_write32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
160 160 }
161 161
162 q->need_update = 0; 162 q->need_update = 0;
163 163
@@ -262,8 +262,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
262 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 262 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
263 GFP_KERNEL); 263 GFP_KERNEL);
264 if (!rxb->skb) { 264 if (!rxb->skb) {
265 printk(KERN_CRIT DRV_NAME 265 IWL_CRIT(priv, "Can not allocate SKB buffers\n");
266 "Can not allocate SKB buffers\n");
267 /* We don't reschedule replenish work here -- we will 266 /* We don't reschedule replenish work here -- we will
268 * call the restock method and if it still needs 267 * call the restock method and if it still needs
269 * more buffers it will schedule replenish */ 268 * more buffers it will schedule replenish */
@@ -495,7 +494,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
495 494
496 missed_beacon = &pkt->u.missed_beacon; 495 missed_beacon = &pkt->u.missed_beacon;
497 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { 496 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
498 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", 497 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
499 le32_to_cpu(missed_beacon->consequtive_missed_beacons), 498 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
500 le32_to_cpu(missed_beacon->total_missed_becons), 499 le32_to_cpu(missed_beacon->total_missed_becons),
501 le32_to_cpu(missed_beacon->num_recvd_beacons), 500 le32_to_cpu(missed_beacon->num_recvd_beacons),
@@ -542,7 +541,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
542 else 541 else
543 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 542 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
544 543
545 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", 544 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
546 bcn_silence_a, bcn_silence_b, bcn_silence_c, 545 bcn_silence_a, bcn_silence_b, bcn_silence_c,
547 priv->last_rx_noise); 546 priv->last_rx_noise);
548} 547}
@@ -555,7 +554,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
555 int change; 554 int change;
556 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 555 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
557 556
558 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", 557 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
559 (int)sizeof(priv->statistics), pkt->len); 558 (int)sizeof(priv->statistics), pkt->len);
560 559
561 change = ((priv->statistics.general.temperature != 560 change = ((priv->statistics.general.temperature !=
@@ -742,13 +741,13 @@ static void iwl_dbg_report_frame(struct iwl_priv *priv,
742 * MAC addresses show just the last byte (for brevity), 741 * MAC addresses show just the last byte (for brevity),
743 * but you can hack it to show more, if you'd like to. */ 742 * but you can hack it to show more, if you'd like to. */
744 if (dataframe) 743 if (dataframe)
745 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " 744 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
746 "len=%u, rssi=%d, chnl=%d, rate=%u, \n", 745 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
747 title, le16_to_cpu(fc), header->addr1[5], 746 title, le16_to_cpu(fc), header->addr1[5],
748 length, rssi, channel, bitrate); 747 length, rssi, channel, bitrate);
749 else { 748 else {
750 /* src/dst addresses assume managed mode */ 749 /* src/dst addresses assume managed mode */
751 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, src=0x%02x, " 750 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
752 "len=%u, rssi=%d, tim=%lu usec, " 751 "len=%u, rssi=%d, tim=%lu usec, "
753 "phy=0x%02x, chnl=%d\n", 752 "phy=0x%02x, chnl=%d\n",
754 title, le16_to_cpu(fc), header->addr1[5], 753 title, le16_to_cpu(fc), header->addr1[5],
@@ -773,10 +772,10 @@ static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
773/* 772/*
774 * returns non-zero if packet should be dropped 773 * returns non-zero if packet should be dropped
775 */ 774 */
776static int iwl_set_decrypted_flag(struct iwl_priv *priv, 775int iwl_set_decrypted_flag(struct iwl_priv *priv,
777 struct ieee80211_hdr *hdr, 776 struct ieee80211_hdr *hdr,
778 u32 decrypt_res, 777 u32 decrypt_res,
779 struct ieee80211_rx_status *stats) 778 struct ieee80211_rx_status *stats)
780{ 779{
781 u16 fc = le16_to_cpu(hdr->frame_control); 780 u16 fc = le16_to_cpu(hdr->frame_control);
782 781
@@ -786,7 +785,7 @@ static int iwl_set_decrypted_flag(struct iwl_priv *priv,
786 if (!(fc & IEEE80211_FCTL_PROTECTED)) 785 if (!(fc & IEEE80211_FCTL_PROTECTED))
787 return 0; 786 return 0;
788 787
789 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); 788 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
790 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 789 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
791 case RX_RES_STATUS_SEC_TYPE_TKIP: 790 case RX_RES_STATUS_SEC_TYPE_TKIP:
792 /* The uCode has got a bad phase 1 Key, pushes the packet. 791 /* The uCode has got a bad phase 1 Key, pushes the packet.
@@ -800,13 +799,13 @@ static int iwl_set_decrypted_flag(struct iwl_priv *priv,
800 RX_RES_STATUS_BAD_ICV_MIC) { 799 RX_RES_STATUS_BAD_ICV_MIC) {
801 /* bad ICV, the packet is destroyed since the 800 /* bad ICV, the packet is destroyed since the
802 * decryption is inplace, drop it */ 801 * decryption is inplace, drop it */
803 IWL_DEBUG_RX("Packet destroyed\n"); 802 IWL_DEBUG_RX(priv, "Packet destroyed\n");
804 return -1; 803 return -1;
805 } 804 }
806 case RX_RES_STATUS_SEC_TYPE_CCMP: 805 case RX_RES_STATUS_SEC_TYPE_CCMP:
807 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 806 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
808 RX_RES_STATUS_DECRYPT_OK) { 807 RX_RES_STATUS_DECRYPT_OK) {
809 IWL_DEBUG_RX("hw decrypt successfully!!!\n"); 808 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
810 stats->flag |= RX_FLAG_DECRYPTED; 809 stats->flag |= RX_FLAG_DECRYPTED;
811 } 810 }
812 break; 811 break;
@@ -816,6 +815,7 @@ static int iwl_set_decrypted_flag(struct iwl_priv *priv,
816 } 815 }
817 return 0; 816 return 0;
818} 817}
818EXPORT_SYMBOL(iwl_set_decrypted_flag);
819 819
820static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 820static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
821{ 821{
@@ -870,7 +870,7 @@ static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
870 break; 870 break;
871 }; 871 };
872 872
873 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", 873 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
874 decrypt_in, decrypt_out); 874 decrypt_in, decrypt_out);
875 875
876 return decrypt_out; 876 return decrypt_out;
@@ -895,7 +895,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
895 rx_start = (struct iwl_rx_phy_res *)&priv->last_phy_res[1]; 895 rx_start = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
896 896
897 if (!rx_start) { 897 if (!rx_start) {
898 IWL_ERROR("MPDU frame without a PHY data\n"); 898 IWL_ERR(priv, "MPDU frame without a PHY data\n");
899 return; 899 return;
900 } 900 }
901 if (include_phy) { 901 if (include_phy) {
@@ -934,8 +934,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
934 934
935 /* We only process data packets if the interface is open */ 935 /* We only process data packets if the interface is open */
936 if (unlikely(!priv->is_open)) { 936 if (unlikely(!priv->is_open)) {
937 IWL_DEBUG_DROP_LIMIT 937 IWL_DEBUG_DROP_LIMIT(priv,
938 ("Dropping packet while interface is not open.\n"); 938 "Dropping packet while interface is not open.\n");
939 return; 939 return;
940 } 940 }
941 941
@@ -1007,7 +1007,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1007 /*rx_status.flag |= RX_FLAG_TSFT;*/ 1007 /*rx_status.flag |= RX_FLAG_TSFT;*/
1008 1008
1009 if ((unlikely(rx_start->cfg_phy_cnt > 20))) { 1009 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
1010 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n", 1010 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1011 rx_start->cfg_phy_cnt); 1011 rx_start->cfg_phy_cnt);
1012 return; 1012 return;
1013 } 1013 }
@@ -1021,7 +1021,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1021 } 1021 }
1022 1022
1023 if (!rx_start) { 1023 if (!rx_start) {
1024 IWL_ERROR("MPDU frame without a PHY data\n"); 1024 IWL_ERR(priv, "MPDU frame without a PHY data\n");
1025 return; 1025 return;
1026 } 1026 }
1027 1027
@@ -1045,7 +1045,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1045 1045
1046 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) || 1046 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
1047 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) { 1047 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1048 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", 1048 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1049 le32_to_cpu(*rx_end)); 1049 le32_to_cpu(*rx_end));
1050 return; 1050 return;
1051 } 1051 }
@@ -1078,7 +1078,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1078 if (unlikely(priv->debug_level & IWL_DL_RX)) 1078 if (unlikely(priv->debug_level & IWL_DL_RX))
1079 iwl_dbg_report_frame(priv, rx_start, len, header, 1); 1079 iwl_dbg_report_frame(priv, rx_start, len, header, 1);
1080#endif 1080#endif
1081 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n", 1081 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
1082 rx_status.signal, rx_status.noise, rx_status.signal, 1082 rx_status.signal, rx_status.noise, rx_status.signal,
1083 (unsigned long long)rx_status.mactime); 1083 (unsigned long long)rx_status.mactime);
1084 1084
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 3c803f6922e..1ec2b20eb37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -46,15 +46,6 @@
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) 46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) 47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48 48
49/* For faster active scanning, scan will move to the next channel if fewer than
50 * PLCP_QUIET_THRESH packets are heard on this channel within
51 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
52 * time if it's a quiet channel (nothing responded to our probe, and there's
53 * no other traffic).
54 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
55#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
56#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
57
58/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
59 * Must be set longer than active dwell time. 50 * Must be set longer than active dwell time.
60 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
@@ -63,7 +54,6 @@
63#define IWL_PASSIVE_DWELL_BASE (100) 54#define IWL_PASSIVE_DWELL_BASE (100)
64#define IWL_CHANNEL_TUNE_TIME 5 55#define IWL_CHANNEL_TUNE_TIME 5
65 56
66#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
67 57
68 58
69/** 59/**
@@ -80,12 +70,12 @@ int iwl_scan_cancel(struct iwl_priv *priv)
80 70
81 if (test_bit(STATUS_SCANNING, &priv->status)) { 71 if (test_bit(STATUS_SCANNING, &priv->status)) {
82 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 72 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
83 IWL_DEBUG_SCAN("Queuing scan abort.\n"); 73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
84 set_bit(STATUS_SCAN_ABORTING, &priv->status); 74 set_bit(STATUS_SCAN_ABORTING, &priv->status);
85 queue_work(priv->workqueue, &priv->abort_scan); 75 queue_work(priv->workqueue, &priv->abort_scan);
86 76
87 } else 77 } else
88 IWL_DEBUG_SCAN("Scan abort already in progress.\n"); 78 IWL_DEBUG_SCAN(priv, "Scan abort already in progress.\n");
89 79
90 return test_bit(STATUS_SCANNING, &priv->status); 80 return test_bit(STATUS_SCANNING, &priv->status);
91 } 81 }
@@ -119,7 +109,7 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
119} 109}
120EXPORT_SYMBOL(iwl_scan_cancel_timeout); 110EXPORT_SYMBOL(iwl_scan_cancel_timeout);
121 111
122static int iwl_send_scan_abort(struct iwl_priv *priv) 112int iwl_send_scan_abort(struct iwl_priv *priv)
123{ 113{
124 int ret = 0; 114 int ret = 0;
125 struct iwl_rx_packet *res; 115 struct iwl_rx_packet *res;
@@ -150,7 +140,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
150 * can occur if we send the scan abort before we 140 * can occur if we send the scan abort before we
151 * the microcode has notified us that a scan is 141 * the microcode has notified us that a scan is
152 * completed. */ 142 * completed. */
153 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); 143 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status);
154 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 144 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
155 clear_bit(STATUS_SCAN_HW, &priv->status); 145 clear_bit(STATUS_SCAN_HW, &priv->status);
156 } 146 }
@@ -160,7 +150,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
160 150
161 return ret; 151 return ret;
162} 152}
163 153EXPORT_SYMBOL(iwl_send_scan_abort);
164 154
165/* Service response to REPLY_SCAN_CMD (0x80) */ 155/* Service response to REPLY_SCAN_CMD (0x80) */
166static void iwl_rx_reply_scan(struct iwl_priv *priv, 156static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -171,7 +161,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
171 struct iwl_scanreq_notification *notif = 161 struct iwl_scanreq_notification *notif =
172 (struct iwl_scanreq_notification *)pkt->u.raw; 162 (struct iwl_scanreq_notification *)pkt->u.raw;
173 163
174 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); 164 IWL_DEBUG_RX(priv, "Scan request status = 0x%x\n", notif->status);
175#endif 165#endif
176} 166}
177 167
@@ -183,7 +173,7 @@ static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
183 struct iwl_scanstart_notification *notif = 173 struct iwl_scanstart_notification *notif =
184 (struct iwl_scanstart_notification *)pkt->u.raw; 174 (struct iwl_scanstart_notification *)pkt->u.raw;
185 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 175 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
186 IWL_DEBUG_SCAN("Scan start: " 176 IWL_DEBUG_SCAN(priv, "Scan start: "
187 "%d [802.11%s] " 177 "%d [802.11%s] "
188 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", 178 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
189 notif->channel, 179 notif->channel,
@@ -202,7 +192,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
202 struct iwl_scanresults_notification *notif = 192 struct iwl_scanresults_notification *notif =
203 (struct iwl_scanresults_notification *)pkt->u.raw; 193 (struct iwl_scanresults_notification *)pkt->u.raw;
204 194
205 IWL_DEBUG_SCAN("Scan ch.res: " 195 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
206 "%d [802.11%s] " 196 "%d [802.11%s] "
207 "(TSF: 0x%08X:%08X) - %d " 197 "(TSF: 0x%08X:%08X) - %d "
208 "elapsed=%lu usec (%dms since last)\n", 198 "elapsed=%lu usec (%dms since last)\n",
@@ -228,7 +218,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
228 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 218 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
229 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 219 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
230 220
231 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 221 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
232 scan_notif->scanned_channels, 222 scan_notif->scanned_channels,
233 scan_notif->tsf_low, 223 scan_notif->tsf_low,
234 scan_notif->tsf_high, scan_notif->status); 224 scan_notif->tsf_high, scan_notif->status);
@@ -240,7 +230,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
240 /* The scan completion notification came in, so kill that timer... */ 230 /* The scan completion notification came in, so kill that timer... */
241 cancel_delayed_work(&priv->scan_check); 231 cancel_delayed_work(&priv->scan_check);
242 232
243 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", 233 IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
244 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 234 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
245 "2.4" : "5.2", 235 "2.4" : "5.2",
246 jiffies_to_msecs(elapsed_jiffies 236 jiffies_to_msecs(elapsed_jiffies
@@ -258,7 +248,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
258 * then we reset the scan state machine and terminate, 248 * then we reset the scan state machine and terminate,
259 * re-queuing another scan if one has been requested */ 249 * re-queuing another scan if one has been requested */
260 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 250 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
261 IWL_DEBUG_INFO("Aborted scan completed.\n"); 251 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
262 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 252 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
263 } else { 253 } else {
264 /* If there are more bands on this scan pass reschedule */ 254 /* If there are more bands on this scan pass reschedule */
@@ -268,11 +258,11 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
268 258
269 priv->last_scan_jiffies = jiffies; 259 priv->last_scan_jiffies = jiffies;
270 priv->next_scan_jiffies = 0; 260 priv->next_scan_jiffies = 0;
271 IWL_DEBUG_INFO("Setting scan to off\n"); 261 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
272 262
273 clear_bit(STATUS_SCANNING, &priv->status); 263 clear_bit(STATUS_SCANNING, &priv->status);
274 264
275 IWL_DEBUG_INFO("Scan took %dms\n", 265 IWL_DEBUG_INFO(priv, "Scan took %dms\n",
276 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); 266 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
277 267
278 queue_work(priv->workqueue, &priv->scan_completed); 268 queue_work(priv->workqueue, &priv->scan_completed);
@@ -296,9 +286,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
296} 286}
297EXPORT_SYMBOL(iwl_setup_rx_scan_handlers); 287EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
298 288
299static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 289inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
300 enum ieee80211_band band, 290 enum ieee80211_band band,
301 u8 n_probes) 291 u8 n_probes)
302{ 292{
303 if (band == IEEE80211_BAND_5GHZ) 293 if (band == IEEE80211_BAND_5GHZ)
304 return IWL_ACTIVE_DWELL_TIME_52 + 294 return IWL_ACTIVE_DWELL_TIME_52 +
@@ -307,9 +297,10 @@ static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
307 return IWL_ACTIVE_DWELL_TIME_24 + 297 return IWL_ACTIVE_DWELL_TIME_24 +
308 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 298 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
309} 299}
300EXPORT_SYMBOL(iwl_get_active_dwell_time);
310 301
311static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 302u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
312 enum ieee80211_band band) 303 enum ieee80211_band band)
313{ 304{
314 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 305 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
315 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 306 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
@@ -327,6 +318,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
327 318
328 return passive; 319 return passive;
329} 320}
321EXPORT_SYMBOL(iwl_get_passive_dwell_time);
330 322
331static int iwl_get_channels_for_scan(struct iwl_priv *priv, 323static int iwl_get_channels_for_scan(struct iwl_priv *priv,
332 enum ieee80211_band band, 324 enum ieee80211_band band,
@@ -363,7 +355,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
363 355
364 ch_info = iwl_get_channel_info(priv, band, channel); 356 ch_info = iwl_get_channel_info(priv, band, channel);
365 if (!is_channel_valid(ch_info)) { 357 if (!is_channel_valid(ch_info)) {
366 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", 358 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
367 channel); 359 channel);
368 continue; 360 continue;
369 } 361 }
@@ -392,7 +384,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
392 else 384 else
393 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 385 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
394 386
395 IWL_DEBUG_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", 387 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
396 channel, le32_to_cpu(scan_ch->type), 388 channel, le32_to_cpu(scan_ch->type),
397 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? 389 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
398 "ACTIVE" : "PASSIVE", 390 "ACTIVE" : "PASSIVE",
@@ -403,7 +395,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
403 added++; 395 added++;
404 } 396 }
405 397
406 IWL_DEBUG_SCAN("total channels to scan %d \n", added); 398 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
407 return added; 399 return added;
408} 400}
409 401
@@ -419,21 +411,21 @@ void iwl_init_scan_params(struct iwl_priv *priv)
419int iwl_scan_initiate(struct iwl_priv *priv) 411int iwl_scan_initiate(struct iwl_priv *priv)
420{ 412{
421 if (!iwl_is_ready_rf(priv)) { 413 if (!iwl_is_ready_rf(priv)) {
422 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); 414 IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
423 return -EIO; 415 return -EIO;
424 } 416 }
425 417
426 if (test_bit(STATUS_SCANNING, &priv->status)) { 418 if (test_bit(STATUS_SCANNING, &priv->status)) {
427 IWL_DEBUG_SCAN("Scan already in progress.\n"); 419 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
428 return -EAGAIN; 420 return -EAGAIN;
429 } 421 }
430 422
431 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 423 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
432 IWL_DEBUG_SCAN("Scan request while abort pending\n"); 424 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
433 return -EAGAIN; 425 return -EAGAIN;
434 } 426 }
435 427
436 IWL_DEBUG_INFO("Starting scan...\n"); 428 IWL_DEBUG_INFO(priv, "Starting scan...\n");
437 if (priv->cfg->sku & IWL_SKU_G) 429 if (priv->cfg->sku & IWL_SKU_G)
438 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); 430 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
439 if (priv->cfg->sku & IWL_SKU_A) 431 if (priv->cfg->sku & IWL_SKU_A)
@@ -450,7 +442,7 @@ EXPORT_SYMBOL(iwl_scan_initiate);
450 442
451#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 443#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
452 444
453static void iwl_bg_scan_check(struct work_struct *data) 445void iwl_bg_scan_check(struct work_struct *data)
454{ 446{
455 struct iwl_priv *priv = 447 struct iwl_priv *priv =
456 container_of(data, struct iwl_priv, scan_check.work); 448 container_of(data, struct iwl_priv, scan_check.work);
@@ -461,7 +453,7 @@ static void iwl_bg_scan_check(struct work_struct *data)
461 mutex_lock(&priv->mutex); 453 mutex_lock(&priv->mutex);
462 if (test_bit(STATUS_SCANNING, &priv->status) || 454 if (test_bit(STATUS_SCANNING, &priv->status) ||
463 test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 455 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
464 IWL_DEBUG(IWL_DL_SCAN, "Scan completion watchdog resetting " 456 IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting "
465 "adapter (%dms)\n", 457 "adapter (%dms)\n",
466 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); 458 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
467 459
@@ -470,6 +462,8 @@ static void iwl_bg_scan_check(struct work_struct *data)
470 } 462 }
471 mutex_unlock(&priv->mutex); 463 mutex_unlock(&priv->mutex);
472} 464}
465EXPORT_SYMBOL(iwl_bg_scan_check);
466
473/** 467/**
474 * iwl_supported_rate_to_ie - fill in the supported rate in IE field 468 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
475 * 469 *
@@ -527,10 +521,10 @@ static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
527 * iwl_fill_probe_req - fill in all required fields and IE for probe request 521 * iwl_fill_probe_req - fill in all required fields and IE for probe request
528 */ 522 */
529 523
530static u16 iwl_fill_probe_req(struct iwl_priv *priv, 524u16 iwl_fill_probe_req(struct iwl_priv *priv,
531 enum ieee80211_band band, 525 enum ieee80211_band band,
532 struct ieee80211_mgmt *frame, 526 struct ieee80211_mgmt *frame,
533 int left) 527 int left)
534{ 528{
535 int len = 0; 529 int len = 0;
536 u8 *pos = NULL; 530 u8 *pos = NULL;
@@ -624,6 +618,7 @@ static u16 iwl_fill_probe_req(struct iwl_priv *priv,
624 618
625 return (u16)len; 619 return (u16)len;
626} 620}
621EXPORT_SYMBOL(iwl_fill_probe_req);
627 622
628static void iwl_bg_request_scan(struct work_struct *data) 623static void iwl_bg_request_scan(struct work_struct *data)
629{ 624{
@@ -650,7 +645,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
650 mutex_lock(&priv->mutex); 645 mutex_lock(&priv->mutex);
651 646
652 if (!iwl_is_ready(priv)) { 647 if (!iwl_is_ready(priv)) {
653 IWL_WARNING("request scan called when driver not ready.\n"); 648 IWL_WARN(priv, "request scan called when driver not ready.\n");
654 goto done; 649 goto done;
655 } 650 }
656 651
@@ -662,34 +657,34 @@ static void iwl_bg_request_scan(struct work_struct *data)
662 /* This should never be called or scheduled if there is currently 657 /* This should never be called or scheduled if there is currently
663 * a scan active in the hardware. */ 658 * a scan active in the hardware. */
664 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 659 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
665 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " 660 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
666 "Ignoring second request.\n"); 661 "Ignoring second request.\n");
667 ret = -EIO; 662 ret = -EIO;
668 goto done; 663 goto done;
669 } 664 }
670 665
671 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 666 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
672 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); 667 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
673 goto done; 668 goto done;
674 } 669 }
675 670
676 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 671 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
677 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 672 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
678 goto done; 673 goto done;
679 } 674 }
680 675
681 if (iwl_is_rfkill(priv)) { 676 if (iwl_is_rfkill(priv)) {
682 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); 677 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
683 goto done; 678 goto done;
684 } 679 }
685 680
686 if (!test_bit(STATUS_READY, &priv->status)) { 681 if (!test_bit(STATUS_READY, &priv->status)) {
687 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); 682 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
688 goto done; 683 goto done;
689 } 684 }
690 685
691 if (!priv->scan_bands) { 686 if (!priv->scan_bands) {
692 IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); 687 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
693 goto done; 688 goto done;
694 } 689 }
695 690
@@ -714,7 +709,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
714 u32 scan_suspend_time = 100; 709 u32 scan_suspend_time = 100;
715 unsigned long flags; 710 unsigned long flags;
716 711
717 IWL_DEBUG_INFO("Scanning while associated...\n"); 712 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
718 713
719 spin_lock_irqsave(&priv->lock, flags); 714 spin_lock_irqsave(&priv->lock, flags);
720 interval = priv->beacon_int; 715 interval = priv->beacon_int;
@@ -729,13 +724,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
729 scan_suspend_time = (extra | 724 scan_suspend_time = (extra |
730 ((suspend_time % interval) * 1024)); 725 ((suspend_time % interval) * 1024));
731 scan->suspend_time = cpu_to_le32(scan_suspend_time); 726 scan->suspend_time = cpu_to_le32(scan_suspend_time);
732 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", 727 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
733 scan_suspend_time, interval); 728 scan_suspend_time, interval);
734 } 729 }
735 730
736 /* We should add the ability for user to lock to PASSIVE ONLY */ 731 /* We should add the ability for user to lock to PASSIVE ONLY */
737 if (priv->one_direct_scan) { 732 if (priv->one_direct_scan) {
738 IWL_DEBUG_SCAN("Start direct scan for '%s'\n", 733 IWL_DEBUG_SCAN(priv, "Start direct scan for '%s'\n",
739 print_ssid(ssid, priv->direct_ssid, 734 print_ssid(ssid, priv->direct_ssid,
740 priv->direct_ssid_len)); 735 priv->direct_ssid_len));
741 scan->direct_scan[0].id = WLAN_EID_SSID; 736 scan->direct_scan[0].id = WLAN_EID_SSID;
@@ -744,7 +739,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
744 priv->direct_ssid, priv->direct_ssid_len); 739 priv->direct_ssid, priv->direct_ssid_len);
745 n_probes++; 740 n_probes++;
746 } else { 741 } else {
747 IWL_DEBUG_SCAN("Start indirect scan.\n"); 742 IWL_DEBUG_SCAN(priv, "Start indirect scan.\n");
748 } 743 }
749 744
750 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 745 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
@@ -773,7 +768,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
773 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 768 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
774 rx_chain = 0x6; 769 rx_chain = 0x6;
775 } else { 770 } else {
776 IWL_WARNING("Invalid scan band count\n"); 771 IWL_WARN(priv, "Invalid scan band count\n");
777 goto done; 772 goto done;
778 } 773 }
779 774
@@ -806,7 +801,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
806 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 801 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
807 802
808 if (scan->channel_count == 0) { 803 if (scan->channel_count == 0) {
809 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count); 804 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
810 goto done; 805 goto done;
811 } 806 }
812 807
@@ -839,7 +834,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
839 mutex_unlock(&priv->mutex); 834 mutex_unlock(&priv->mutex);
840} 835}
841 836
842static void iwl_bg_abort_scan(struct work_struct *work) 837void iwl_bg_abort_scan(struct work_struct *work)
843{ 838{
844 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 839 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
845 840
@@ -853,18 +848,19 @@ static void iwl_bg_abort_scan(struct work_struct *work)
853 848
854 mutex_unlock(&priv->mutex); 849 mutex_unlock(&priv->mutex);
855} 850}
851EXPORT_SYMBOL(iwl_bg_abort_scan);
856 852
857static void iwl_bg_scan_completed(struct work_struct *work) 853void iwl_bg_scan_completed(struct work_struct *work)
858{ 854{
859 struct iwl_priv *priv = 855 struct iwl_priv *priv =
860 container_of(work, struct iwl_priv, scan_completed); 856 container_of(work, struct iwl_priv, scan_completed);
861 857
862 IWL_DEBUG_SCAN("SCAN complete scan\n"); 858 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
863 859
864 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 860 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
865 return; 861 return;
866 862
867 ieee80211_scan_completed(priv->hw); 863 ieee80211_scan_completed(priv->hw, false);
868 864
869 /* Since setting the TXPOWER may have been deferred while 865 /* Since setting the TXPOWER may have been deferred while
870 * performing the scan, fire one off */ 866 * performing the scan, fire one off */
@@ -872,7 +868,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
872 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 868 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
873 mutex_unlock(&priv->mutex); 869 mutex_unlock(&priv->mutex);
874} 870}
875 871EXPORT_SYMBOL(iwl_bg_scan_completed);
876 872
877void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 873void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
878{ 874{
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 836c3c80b69..022bcf11573 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -146,7 +146,7 @@ static int iwl_get_measurement(struct iwl_priv *priv,
146 146
147 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; 147 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
148 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 148 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
149 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); 149 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
150 rc = -EIO; 150 rc = -EIO;
151 } 151 }
152 152
@@ -154,9 +154,9 @@ static int iwl_get_measurement(struct iwl_priv *priv,
154 switch (spectrum_resp_status) { 154 switch (spectrum_resp_status) {
155 case 0: /* Command will be handled */ 155 case 0: /* Command will be handled */
156 if (res->u.spectrum.id != 0xff) { 156 if (res->u.spectrum.id != 0xff) {
157 IWL_DEBUG_INFO 157 IWL_DEBUG_INFO(priv,
158 ("Replaced existing measurement: %d\n", 158 "Replaced existing measurement: %d\n",
159 res->u.spectrum.id); 159 res->u.spectrum.id);
160 priv->measurement_status &= ~MEASUREMENT_READY; 160 priv->measurement_status &= ~MEASUREMENT_READY;
161 } 161 }
162 priv->measurement_status |= MEASUREMENT_ACTIVE; 162 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -181,7 +181,7 @@ static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); 181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182 182
183 if (!report->state) { 183 if (!report->state) {
184 IWL_DEBUG(IWL_DL_11H, 184 IWL_DEBUG_11H(priv,
185 "Spectrum Measure Notification: Start\n"); 185 "Spectrum Measure Notification: Start\n");
186 return; 186 return;
187 } 187 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index b7d7943e476..a77c1e61906 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files. 5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 * 6 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 70a8b21ca39..1fae3a6bd8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -60,7 +60,7 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
60 goto out; 60 goto out;
61 } 61 }
62 62
63 IWL_DEBUG_ASSOC_LIMIT("can not find STA %pM total %d\n", 63 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
64 addr, priv->num_stations); 64 addr, priv->num_stations);
65 65
66 out: 66 out:
@@ -86,11 +86,13 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
86 86
87 spin_lock_irqsave(&priv->sta_lock, flags); 87 spin_lock_irqsave(&priv->sta_lock, flags);
88 88
89 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 89 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
90 IWL_ERROR("ACTIVATE a non DRIVER active station %d\n", sta_id); 90 !(priv->stations_39[sta_id].used & IWL_STA_DRIVER_ACTIVE))
91 IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
92 sta_id);
91 93
92 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; 94 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
93 IWL_DEBUG_ASSOC("Added STA to Ucode: %pM\n", 95 IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
94 priv->stations[sta_id].sta.sta.addr); 96 priv->stations[sta_id].sta.sta.addr);
95 97
96 spin_unlock_irqrestore(&priv->sta_lock, flags); 98 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -105,13 +107,13 @@ static int iwl_add_sta_callback(struct iwl_priv *priv,
105 u8 sta_id = addsta->sta.sta_id; 107 u8 sta_id = addsta->sta.sta_id;
106 108
107 if (!skb) { 109 if (!skb) {
108 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); 110 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
109 return 1; 111 return 1;
110 } 112 }
111 113
112 res = (struct iwl_rx_packet *)skb->data; 114 res = (struct iwl_rx_packet *)skb->data;
113 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 115 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
114 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", 116 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
115 res->hdr.flags); 117 res->hdr.flags);
116 return 1; 118 return 1;
117 } 119 }
@@ -121,7 +123,7 @@ static int iwl_add_sta_callback(struct iwl_priv *priv,
121 iwl_sta_ucode_activate(priv, sta_id); 123 iwl_sta_ucode_activate(priv, sta_id);
122 /* fall through */ 124 /* fall through */
123 default: 125 default:
124 IWL_DEBUG_HC("Received REPLY_ADD_STA:(0x%08X)\n", 126 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
125 res->u.add_sta.status); 127 res->u.add_sta.status);
126 break; 128 break;
127 } 129 }
@@ -130,7 +132,7 @@ static int iwl_add_sta_callback(struct iwl_priv *priv,
130 return 1; 132 return 1;
131} 133}
132 134
133static int iwl_send_add_sta(struct iwl_priv *priv, 135int iwl_send_add_sta(struct iwl_priv *priv,
134 struct iwl_addsta_cmd *sta, u8 flags) 136 struct iwl_addsta_cmd *sta, u8 flags)
135{ 137{
136 struct iwl_rx_packet *res = NULL; 138 struct iwl_rx_packet *res = NULL;
@@ -155,7 +157,7 @@ static int iwl_send_add_sta(struct iwl_priv *priv,
155 157
156 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; 158 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
157 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 159 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
158 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", 160 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
159 res->hdr.flags); 161 res->hdr.flags);
160 ret = -EIO; 162 ret = -EIO;
161 } 163 }
@@ -164,11 +166,11 @@ static int iwl_send_add_sta(struct iwl_priv *priv,
164 switch (res->u.add_sta.status) { 166 switch (res->u.add_sta.status) {
165 case ADD_STA_SUCCESS_MSK: 167 case ADD_STA_SUCCESS_MSK:
166 iwl_sta_ucode_activate(priv, sta->sta.sta_id); 168 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
167 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); 169 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
168 break; 170 break;
169 default: 171 default:
170 ret = -EIO; 172 ret = -EIO;
171 IWL_WARNING("REPLY_ADD_STA failed\n"); 173 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
172 break; 174 break;
173 } 175 }
174 } 176 }
@@ -178,6 +180,7 @@ static int iwl_send_add_sta(struct iwl_priv *priv,
178 180
179 return ret; 181 return ret;
180} 182}
183EXPORT_SYMBOL(iwl_send_add_sta);
181 184
182static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 185static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
183 struct ieee80211_sta_ht_cap *sta_ht_inf) 186 struct ieee80211_sta_ht_cap *sta_ht_inf)
@@ -204,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
204 case WLAN_HT_CAP_SM_PS_DISABLED: 207 case WLAN_HT_CAP_SM_PS_DISABLED:
205 break; 208 break;
206 default: 209 default:
207 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); 210 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
208 break; 211 break;
209 } 212 }
210 213
@@ -269,7 +272,7 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
269 272
270 station = &priv->stations[sta_id]; 273 station = &priv->stations[sta_id];
271 station->used = IWL_STA_DRIVER_ACTIVE; 274 station->used = IWL_STA_DRIVER_ACTIVE;
272 IWL_DEBUG_ASSOC("Add STA to driver ID %d: %pM\n", 275 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
273 sta_id, addr); 276 sta_id, addr);
274 priv->num_stations++; 277 priv->num_stations++;
275 278
@@ -301,13 +304,13 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
301 304
302 BUG_ON(sta_id == IWL_INVALID_STATION); 305 BUG_ON(sta_id == IWL_INVALID_STATION);
303 306
304 IWL_DEBUG_ASSOC("Removed STA from Ucode: %pM\n", addr); 307 IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr);
305 308
306 spin_lock_irqsave(&priv->sta_lock, flags); 309 spin_lock_irqsave(&priv->sta_lock, flags);
307 310
308 /* Ucode must be active and driver must be non active */ 311 /* Ucode must be active and driver must be non active */
309 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE) 312 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
310 IWL_ERROR("removed non active STA %d\n", sta_id); 313 IWL_ERR(priv, "removed non active STA %d\n", sta_id);
311 314
312 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 315 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
313 316
@@ -324,13 +327,13 @@ static int iwl_remove_sta_callback(struct iwl_priv *priv,
324 const char *addr = rm_sta->addr; 327 const char *addr = rm_sta->addr;
325 328
326 if (!skb) { 329 if (!skb) {
327 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n"); 330 IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
328 return 1; 331 return 1;
329 } 332 }
330 333
331 res = (struct iwl_rx_packet *)skb->data; 334 res = (struct iwl_rx_packet *)skb->data;
332 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 335 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
333 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n", 336 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
334 res->hdr.flags); 337 res->hdr.flags);
335 return 1; 338 return 1;
336 } 339 }
@@ -340,7 +343,7 @@ static int iwl_remove_sta_callback(struct iwl_priv *priv,
340 iwl_sta_ucode_deactivate(priv, addr); 343 iwl_sta_ucode_deactivate(priv, addr);
341 break; 344 break;
342 default: 345 default:
343 IWL_ERROR("REPLY_REMOVE_STA failed\n"); 346 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
344 break; 347 break;
345 } 348 }
346 349
@@ -378,7 +381,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
378 381
379 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; 382 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
380 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 383 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
381 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n", 384 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
382 res->hdr.flags); 385 res->hdr.flags);
383 ret = -EIO; 386 ret = -EIO;
384 } 387 }
@@ -387,11 +390,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
387 switch (res->u.rem_sta.status) { 390 switch (res->u.rem_sta.status) {
388 case REM_STA_SUCCESS_MSK: 391 case REM_STA_SUCCESS_MSK:
389 iwl_sta_ucode_deactivate(priv, addr); 392 iwl_sta_ucode_deactivate(priv, addr);
390 IWL_DEBUG_ASSOC("REPLY_REMOVE_STA PASSED\n"); 393 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
391 break; 394 break;
392 default: 395 default:
393 ret = -EIO; 396 ret = -EIO;
394 IWL_ERROR("REPLY_REMOVE_STA failed\n"); 397 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
395 break; 398 break;
396 } 399 }
397 } 400 }
@@ -429,17 +432,17 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
429 if (unlikely(sta_id == IWL_INVALID_STATION)) 432 if (unlikely(sta_id == IWL_INVALID_STATION))
430 goto out; 433 goto out;
431 434
432 IWL_DEBUG_ASSOC("Removing STA from driver:%d %pM\n", 435 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
433 sta_id, addr); 436 sta_id, addr);
434 437
435 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 438 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
436 IWL_ERROR("Removing %pM but non DRIVER active\n", 439 IWL_ERR(priv, "Removing %pM but non DRIVER active\n",
437 addr); 440 addr);
438 goto out; 441 goto out;
439 } 442 }
440 443
441 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 444 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
442 IWL_ERROR("Removing %pM but non UCODE active\n", 445 IWL_ERR(priv, "Removing %pM but non UCODE active\n",
443 addr); 446 addr);
444 goto out; 447 goto out;
445 } 448 }
@@ -475,7 +478,7 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
475 if (iwl_is_alive(priv) && 478 if (iwl_is_alive(priv) &&
476 !test_bit(STATUS_EXIT_PENDING, &priv->status) && 479 !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
477 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL)) 480 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
478 IWL_ERROR("Couldn't clear the station table\n"); 481 IWL_ERR(priv, "Couldn't clear the station table\n");
479 482
480 priv->num_stations = 0; 483 priv->num_stations = 0;
481 memset(priv->stations, 0, sizeof(priv->stations)); 484 memset(priv->stations, 0, sizeof(priv->stations));
@@ -551,13 +554,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
551 spin_lock_irqsave(&priv->sta_lock, flags); 554 spin_lock_irqsave(&priv->sta_lock, flags);
552 555
553 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) 556 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
554 IWL_ERROR("index %d not used in uCode key table.\n", 557 IWL_ERR(priv, "index %d not used in uCode key table.\n",
555 keyconf->keyidx); 558 keyconf->keyidx);
556 559
557 priv->default_wep_key--; 560 priv->default_wep_key--;
558 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 561 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
559 ret = iwl_send_static_wepkey_cmd(priv, 1); 562 ret = iwl_send_static_wepkey_cmd(priv, 1);
560 IWL_DEBUG_WEP("Remove default WEP key: idx=%d ret=%d\n", 563 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
561 keyconf->keyidx, ret); 564 keyconf->keyidx, ret);
562 spin_unlock_irqrestore(&priv->sta_lock, flags); 565 spin_unlock_irqrestore(&priv->sta_lock, flags);
563 566
@@ -573,7 +576,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
573 576
574 if (keyconf->keylen != WEP_KEY_LEN_128 && 577 if (keyconf->keylen != WEP_KEY_LEN_128 &&
575 keyconf->keylen != WEP_KEY_LEN_64) { 578 keyconf->keylen != WEP_KEY_LEN_64) {
576 IWL_DEBUG_WEP("Bad WEP key length %d\n", keyconf->keylen); 579 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
577 return -EINVAL; 580 return -EINVAL;
578 } 581 }
579 582
@@ -585,7 +588,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
585 priv->default_wep_key++; 588 priv->default_wep_key++;
586 589
587 if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table)) 590 if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
588 IWL_ERROR("index %d already used in uCode key table.\n", 591 IWL_ERR(priv, "index %d already used in uCode key table.\n",
589 keyconf->keyidx); 592 keyconf->keyidx);
590 593
591 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; 594 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
@@ -593,7 +596,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
593 keyconf->keylen); 596 keyconf->keylen);
594 597
595 ret = iwl_send_static_wepkey_cmd(priv, 0); 598 ret = iwl_send_static_wepkey_cmd(priv, 0);
596 IWL_DEBUG_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", 599 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
597 keyconf->keylen, keyconf->keyidx, ret); 600 keyconf->keylen, keyconf->keyidx, ret);
598 spin_unlock_irqrestore(&priv->sta_lock, flags); 601 spin_unlock_irqrestore(&priv->sta_lock, flags);
599 602
@@ -641,7 +644,7 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
641 * in uCode. */ 644 * in uCode. */
642 645
643 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 646 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
644 "no space for new kew"); 647 "no space for a new key");
645 648
646 priv->stations[sta_id].sta.key.key_flags = key_flags; 649 priv->stations[sta_id].sta.key.key_flags = key_flags;
647 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 650 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
@@ -689,7 +692,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
689 * in uCode. */ 692 * in uCode. */
690 693
691 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 694 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
692 "no space for new kew"); 695 "no space for a new key");
693 696
694 priv->stations[sta_id].sta.key.key_flags = key_flags; 697 priv->stations[sta_id].sta.key.key_flags = key_flags;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 698 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
@@ -725,7 +728,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
725 * in uCode. */ 728 * in uCode. */
726 729
727 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 730 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
728 "no space for new kew"); 731 "no space for a new key");
729 732
730 /* This copy is acutally not needed: we get the key with each TX */ 733 /* This copy is acutally not needed: we get the key with each TX */
731 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 734 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -749,7 +752,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
749 752
750 sta_id = iwl_find_station(priv, addr); 753 sta_id = iwl_find_station(priv, addr);
751 if (sta_id == IWL_INVALID_STATION) { 754 if (sta_id == IWL_INVALID_STATION) {
752 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n", 755 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
753 addr); 756 addr);
754 return; 757 return;
755 } 758 }
@@ -801,7 +804,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
801 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); 804 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
802 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; 805 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
803 806
804 IWL_DEBUG_WEP("Remove dynamic key: idx=%d sta=%d\n", 807 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
805 keyconf->keyidx, sta_id); 808 keyconf->keyidx, sta_id);
806 809
807 if (keyconf->keyidx != keyidx) { 810 if (keyconf->keyidx != keyidx) {
@@ -815,7 +818,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
815 } 818 }
816 819
817 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { 820 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
818 IWL_WARNING("Removing wrong key %d 0x%x\n", 821 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
819 keyconf->keyidx, key_flags); 822 keyconf->keyidx, key_flags);
820 spin_unlock_irqrestore(&priv->sta_lock, flags); 823 spin_unlock_irqrestore(&priv->sta_lock, flags);
821 return 0; 824 return 0;
@@ -823,7 +826,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
823 826
824 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 827 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
825 &priv->ucode_key_table)) 828 &priv->ucode_key_table))
826 IWL_ERROR("index %d not used in uCode key table.\n", 829 IWL_ERR(priv, "index %d not used in uCode key table.\n",
827 priv->stations[sta_id].sta.key.key_offset); 830 priv->stations[sta_id].sta.key.key_offset);
828 memset(&priv->stations[sta_id].keyinfo, 0, 831 memset(&priv->stations[sta_id].keyinfo, 0,
829 sizeof(struct iwl_hw_key)); 832 sizeof(struct iwl_hw_key));
@@ -860,11 +863,12 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
860 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id); 863 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id);
861 break; 864 break;
862 default: 865 default:
863 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 866 IWL_ERR(priv,
867 "Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
864 ret = -EINVAL; 868 ret = -EINVAL;
865 } 869 }
866 870
867 IWL_DEBUG_WEP("Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 871 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n",
868 keyconf->alg, keyconf->keylen, keyconf->keyidx, 872 keyconf->alg, keyconf->keylen, keyconf->keyidx,
869 sta_id, ret); 873 sta_id, ret);
870 874
@@ -877,13 +881,13 @@ static void iwl_dump_lq_cmd(struct iwl_priv *priv,
877 struct iwl_link_quality_cmd *lq) 881 struct iwl_link_quality_cmd *lq)
878{ 882{
879 int i; 883 int i;
880 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id); 884 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
881 IWL_DEBUG_RATE("lq ant 0x%X 0x%X\n", 885 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
882 lq->general_params.single_stream_ant_msk, 886 lq->general_params.single_stream_ant_msk,
883 lq->general_params.dual_stream_ant_msk); 887 lq->general_params.dual_stream_ant_msk);
884 888
885 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 889 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
886 IWL_DEBUG_RATE("lq index %d 0x%X\n", 890 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
887 i, lq->rs_table[i].rate_n_flags); 891 i, lq->rs_table[i].rate_n_flags);
888} 892}
889#else 893#else
@@ -1060,7 +1064,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1060 if (sta_id != IWL_INVALID_STATION) 1064 if (sta_id != IWL_INVALID_STATION)
1061 return sta_id; 1065 return sta_id;
1062 1066
1063 IWL_DEBUG_DROP("Station %pM not in station map. " 1067 IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
1064 "Defaulting to broadcast...\n", 1068 "Defaulting to broadcast...\n",
1065 hdr->addr1); 1069 hdr->addr1);
1066 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 1070 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
@@ -1072,7 +1076,8 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1072 return priv->hw_params.bcast_sta_id; 1076 return priv->hw_params.bcast_sta_id;
1073 1077
1074 default: 1078 default:
1075 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 1079 IWL_WARN(priv, "Unknown mode of operation: %d\n",
1080 priv->iw_mode);
1076 return priv->hw_params.bcast_sta_id; 1081 return priv->hw_params.bcast_sta_id;
1077 } 1082 }
1078} 1083}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 9bb7cefc1f3..97f6169007f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -56,6 +56,8 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
56void iwl_clear_stations_table(struct iwl_priv *priv); 56void iwl_clear_stations_table(struct iwl_priv *priv);
57int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 57int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
58int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 58int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
59int iwl_send_add_sta(struct iwl_priv *priv,
60 struct iwl_addsta_cmd *sta, u8 flags);
59u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, 61u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
60 int is_ap, u8 flags, 62 int is_ap, u8 flags,
61 struct ieee80211_sta_ht_cap *ht_info); 63 struct ieee80211_sta_ht_cap *ht_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b0ee86c6268..ae04c2086f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -76,116 +76,6 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
76 memset(ptr, 0, sizeof(*ptr)); 76 memset(ptr, 0, sizeof(*ptr));
77} 77}
78 78
79static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
80{
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
82
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
85 addr |=
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
87
88 return addr;
89}
90
91static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
92{
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
94
95 return le16_to_cpu(tb->hi_n_len) >> 4;
96}
97
98static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
100{
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
103
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
107
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
109
110 tfd->num_tbs = idx + 1;
111}
112
113static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
114{
115 return tfd->num_tbs & 0x1f;
116}
117
118/**
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
121 * @txq - tx queue
122 *
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
125 */
126static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
127{
128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
129 struct iwl_tfd *tfd;
130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
132 int i;
133 int num_tbs;
134
135 tfd = &tfd_tmp[index];
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_tfd_get_num_tbs(tfd);
139
140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */
143 return;
144 }
145
146 /* Unmap tx_cmd */
147 if (num_tbs)
148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len),
151 PCI_DMA_TODEVICE);
152
153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) {
155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
157
158 if (txq->txb) {
159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
161 }
162 }
163}
164
165static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
166 struct iwl_tfd *tfd,
167 dma_addr_t addr, u16 len)
168{
169
170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
171
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERROR("Error can not send more than %d chunks\n",
175 IWL_NUM_OF_TBS);
176 return -EINVAL;
177 }
178
179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERROR("Unaligned address = %llx\n",
182 (unsigned long long)addr);
183
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
185
186 return 0;
187}
188
189/** 79/**
190 * iwl_txq_update_write_ptr - Send new write index to hardware 80 * iwl_txq_update_write_ptr - Send new write index to hardware
191 */ 81 */
@@ -206,7 +96,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
206 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 96 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
207 97
208 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 98 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
209 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); 99 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
210 iwl_set_bit(priv, CSR_GP_CNTRL, 100 iwl_set_bit(priv, CSR_GP_CNTRL,
211 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
212 return ret; 102 return ret;
@@ -241,7 +131,7 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
241 * Free all buffers. 131 * Free all buffers.
242 * 0-fill, but do not free "txq" descriptor structure. 132 * 0-fill, but do not free "txq" descriptor structure.
243 */ 133 */
244static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 134void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
245{ 135{
246 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 136 struct iwl_tx_queue *txq = &priv->txq[txq_id];
247 struct iwl_queue *q = &txq->q; 137 struct iwl_queue *q = &txq->q;
@@ -254,7 +144,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
254 /* first, empty all BD's */ 144 /* first, empty all BD's */
255 for (; q->write_ptr != q->read_ptr; 145 for (; q->write_ptr != q->read_ptr;
256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 146 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
257 iwl_hw_txq_free_tfd(priv, txq); 147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
258 148
259 len = sizeof(struct iwl_cmd) * q->n_window; 149 len = sizeof(struct iwl_cmd) * q->n_window;
260 150
@@ -264,7 +154,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
264 154
265 /* De-alloc circular buffer of TFDs */ 155 /* De-alloc circular buffer of TFDs */
266 if (txq->q.n_bd) 156 if (txq->q.n_bd)
267 pci_free_consistent(dev, sizeof(struct iwl_tfd) * 157 pci_free_consistent(dev, priv->hw_params.tfd_size *
268 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 158 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
269 159
270 /* De-alloc array of per-TFD driver data */ 160 /* De-alloc array of per-TFD driver data */
@@ -274,7 +164,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
274 /* 0-fill queue descriptor structure */ 164 /* 0-fill queue descriptor structure */
275 memset(txq, 0, sizeof(*txq)); 165 memset(txq, 0, sizeof(*txq));
276} 166}
277 167EXPORT_SYMBOL(iwl_tx_queue_free);
278 168
279/** 169/**
280 * iwl_cmd_queue_free - Deallocate DMA queue. 170 * iwl_cmd_queue_free - Deallocate DMA queue.
@@ -388,6 +278,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
388 struct iwl_tx_queue *txq, u32 id) 278 struct iwl_tx_queue *txq, u32 id)
389{ 279{
390 struct pci_dev *dev = priv->pci_dev; 280 struct pci_dev *dev = priv->pci_dev;
281 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
391 282
392 /* Driver private data, only for Tx (not command) queues, 283 /* Driver private data, only for Tx (not command) queues,
393 * not shared with device. */ 284 * not shared with device. */
@@ -395,22 +286,20 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
395 txq->txb = kmalloc(sizeof(txq->txb[0]) * 286 txq->txb = kmalloc(sizeof(txq->txb[0]) *
396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 287 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
397 if (!txq->txb) { 288 if (!txq->txb) {
398 IWL_ERROR("kmalloc for auxiliary BD " 289 IWL_ERR(priv, "kmalloc for auxiliary BD "
399 "structures failed\n"); 290 "structures failed\n");
400 goto error; 291 goto error;
401 } 292 }
402 } else 293 } else {
403 txq->txb = NULL; 294 txq->txb = NULL;
295 }
404 296
405 /* Circular buffer of transmit frame descriptors (TFDs), 297 /* Circular buffer of transmit frame descriptors (TFDs),
406 * shared with device */ 298 * shared with device */
407 txq->tfds = pci_alloc_consistent(dev, 299 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
408 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
409 &txq->q.dma_addr);
410 300
411 if (!txq->tfds) { 301 if (!txq->tfds) {
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n", 302 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
414 goto error; 303 goto error;
415 } 304 }
416 txq->q.id = id; 305 txq->q.id = id;
@@ -424,42 +313,11 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
424 return -ENOMEM; 313 return -ENOMEM;
425} 314}
426 315
427/*
428 * Tell nic where to find circular buffer of Tx Frame Descriptors for
429 * given Tx queue, and enable the DMA channel used for that queue.
430 *
431 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
432 * channels supported in hardware.
433 */
434static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
435 struct iwl_tx_queue *txq)
436{
437 int ret;
438 unsigned long flags;
439 int txq_id = txq->q.id;
440
441 spin_lock_irqsave(&priv->lock, flags);
442 ret = iwl_grab_nic_access(priv);
443 if (ret) {
444 spin_unlock_irqrestore(&priv->lock, flags);
445 return ret;
446 }
447
448 /* Circular buffer (TFD queue in DRAM) physical base address */
449 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
450 txq->q.dma_addr >> 8);
451
452 iwl_release_nic_access(priv);
453 spin_unlock_irqrestore(&priv->lock, flags);
454
455 return 0;
456}
457
458/** 316/**
459 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue 317 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
460 */ 318 */
461static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 319int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
462 int slots_num, u32 txq_id) 320 int slots_num, u32 txq_id)
463{ 321{
464 int i, len; 322 int i, len;
465 int ret; 323 int ret;
@@ -501,7 +359,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
501 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 359 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
502 360
503 /* Tell device where to find queue */ 361 /* Tell device where to find queue */
504 iwl_hw_tx_queue_init(priv, txq); 362 priv->cfg->ops->lib->txq_init(priv, txq);
505 363
506 return 0; 364 return 0;
507err: 365err:
@@ -516,6 +374,8 @@ err:
516 } 374 }
517 return -ENOMEM; 375 return -ENOMEM;
518} 376}
377EXPORT_SYMBOL(iwl_tx_queue_init);
378
519/** 379/**
520 * iwl_hw_txq_ctx_free - Free TXQ Context 380 * iwl_hw_txq_ctx_free - Free TXQ Context
521 * 381 *
@@ -557,13 +417,13 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 417 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
558 priv->hw_params.scd_bc_tbls_size); 418 priv->hw_params.scd_bc_tbls_size);
559 if (ret) { 419 if (ret) {
560 IWL_ERROR("Scheduler BC Table allocation failed\n"); 420 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
561 goto error_bc_tbls; 421 goto error_bc_tbls;
562 } 422 }
563 /* Alloc keep-warm buffer */ 423 /* Alloc keep-warm buffer */
564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 424 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
565 if (ret) { 425 if (ret) {
566 IWL_ERROR("Keep Warm allocation failed\n"); 426 IWL_ERR(priv, "Keep Warm allocation failed\n");
567 goto error_kw; 427 goto error_kw;
568 } 428 }
569 spin_lock_irqsave(&priv->lock, flags); 429 spin_lock_irqsave(&priv->lock, flags);
@@ -589,7 +449,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
589 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 449 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
590 txq_id); 450 txq_id);
591 if (ret) { 451 if (ret) {
592 IWL_ERROR("Tx %d queue init failed\n", txq_id); 452 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
593 goto error; 453 goto error;
594 } 454 }
595 } 455 }
@@ -778,14 +638,14 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
778 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 638 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
779 if (info->flags & IEEE80211_TX_CTL_AMPDU) 639 if (info->flags & IEEE80211_TX_CTL_AMPDU)
780 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; 640 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
781 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n"); 641 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
782 break; 642 break;
783 643
784 case ALG_TKIP: 644 case ALG_TKIP:
785 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 645 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
786 ieee80211_get_tkip_key(keyconf, skb_frag, 646 ieee80211_get_tkip_key(keyconf, skb_frag,
787 IEEE80211_TKIP_P2_KEY, tx_cmd->key); 647 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
788 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n"); 648 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
789 break; 649 break;
790 650
791 case ALG_WEP: 651 case ALG_WEP:
@@ -797,12 +657,12 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
797 657
798 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 658 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
799 659
800 IWL_DEBUG_TX("Configuring packet for WEP encryption " 660 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
801 "with key %d\n", keyconf->keyidx); 661 "with key %d\n", keyconf->keyidx);
802 break; 662 break;
803 663
804 default: 664 default:
805 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg); 665 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
806 break; 666 break;
807 } 667 }
808} 668}
@@ -822,7 +682,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
822{ 682{
823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 683 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 684 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
825 struct iwl_tfd *tfd;
826 struct iwl_tx_queue *txq; 685 struct iwl_tx_queue *txq;
827 struct iwl_queue *q; 686 struct iwl_queue *q;
828 struct iwl_cmd *out_cmd; 687 struct iwl_cmd *out_cmd;
@@ -844,13 +703,13 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
844 703
845 spin_lock_irqsave(&priv->lock, flags); 704 spin_lock_irqsave(&priv->lock, flags);
846 if (iwl_is_rfkill(priv)) { 705 if (iwl_is_rfkill(priv)) {
847 IWL_DEBUG_DROP("Dropping - RF KILL\n"); 706 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
848 goto drop_unlock; 707 goto drop_unlock;
849 } 708 }
850 709
851 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == 710 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
852 IWL_INVALID_RATE) { 711 IWL_INVALID_RATE) {
853 IWL_ERROR("ERROR: No TX rate available.\n"); 712 IWL_ERR(priv, "ERROR: No TX rate available.\n");
854 goto drop_unlock; 713 goto drop_unlock;
855 } 714 }
856 715
@@ -858,11 +717,11 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
858 717
859#ifdef CONFIG_IWLWIFI_DEBUG 718#ifdef CONFIG_IWLWIFI_DEBUG
860 if (ieee80211_is_auth(fc)) 719 if (ieee80211_is_auth(fc))
861 IWL_DEBUG_TX("Sending AUTH frame\n"); 720 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
862 else if (ieee80211_is_assoc_req(fc)) 721 else if (ieee80211_is_assoc_req(fc))
863 IWL_DEBUG_TX("Sending ASSOC frame\n"); 722 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
864 else if (ieee80211_is_reassoc_req(fc)) 723 else if (ieee80211_is_reassoc_req(fc))
865 IWL_DEBUG_TX("Sending REASSOC frame\n"); 724 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
866#endif 725#endif
867 726
868 /* drop all data frame if we are not associated */ 727 /* drop all data frame if we are not associated */
@@ -872,7 +731,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
872 (!iwl_is_associated(priv) || 731 (!iwl_is_associated(priv) ||
873 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || 732 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
874 !priv->assoc_station_added)) { 733 !priv->assoc_station_added)) {
875 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); 734 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
876 goto drop_unlock; 735 goto drop_unlock;
877 } 736 }
878 737
@@ -883,12 +742,12 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
883 /* Find (or create) index into station table for destination station */ 742 /* Find (or create) index into station table for destination station */
884 sta_id = iwl_get_sta_id(priv, hdr); 743 sta_id = iwl_get_sta_id(priv, hdr);
885 if (sta_id == IWL_INVALID_STATION) { 744 if (sta_id == IWL_INVALID_STATION) {
886 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n", 745 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
887 hdr->addr1); 746 hdr->addr1);
888 goto drop; 747 goto drop;
889 } 748 }
890 749
891 IWL_DEBUG_TX("station Id %d\n", sta_id); 750 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
892 751
893 swq_id = skb_get_queue_mapping(skb); 752 swq_id = skb_get_queue_mapping(skb);
894 txq_id = swq_id; 753 txq_id = swq_id;
@@ -898,7 +757,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
898 seq_number = priv->stations[sta_id].tid[tid].seq_number; 757 seq_number = priv->stations[sta_id].tid[tid].seq_number;
899 seq_number &= IEEE80211_SCTL_SEQ; 758 seq_number &= IEEE80211_SCTL_SEQ;
900 hdr->seq_ctrl = hdr->seq_ctrl & 759 hdr->seq_ctrl = hdr->seq_ctrl &
901 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG); 760 cpu_to_le16(IEEE80211_SCTL_FRAG);
902 hdr->seq_ctrl |= cpu_to_le16(seq_number); 761 hdr->seq_ctrl |= cpu_to_le16(seq_number);
903 seq_number += 0x10; 762 seq_number += 0x10;
904 /* aggregation is on for this <sta,tid> */ 763 /* aggregation is on for this <sta,tid> */
@@ -913,10 +772,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
913 772
914 spin_lock_irqsave(&priv->lock, flags); 773 spin_lock_irqsave(&priv->lock, flags);
915 774
916 /* Set up first empty TFD within this queue's circular TFD buffer */
917 tfd = &txq->tfds[q->write_ptr];
918 memset(tfd, 0, sizeof(*tfd));
919
920 /* Set up driver data for this TFD */ 775 /* Set up driver data for this TFD */
921 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 776 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
922 txq->txb[q->write_ptr].skb[0] = skb; 777 txq->txb[q->write_ptr].skb[0] = skb;
@@ -970,7 +825,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
970 /* Add buffer containing Tx command and MAC(!) header to TFD's 825 /* Add buffer containing Tx command and MAC(!) header to TFD's
971 * first entry */ 826 * first entry */
972 txcmd_phys += offsetof(struct iwl_cmd, hdr); 827 txcmd_phys += offsetof(struct iwl_cmd, hdr);
973 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 828 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
829 txcmd_phys, len, 1, 0);
974 830
975 if (info->control.hw_key) 831 if (info->control.hw_key)
976 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 832 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
@@ -981,7 +837,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
981 if (len) { 837 if (len) {
982 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 838 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
983 len, PCI_DMA_TODEVICE); 839 len, PCI_DMA_TODEVICE);
984 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); 840 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
841 phys_addr, len,
842 0, 0);
985 } 843 }
986 844
987 /* Tell NIC about any 2-byte padding after MAC header */ 845 /* Tell NIC about any 2-byte padding after MAC header */
@@ -1063,7 +921,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1063{ 921{
1064 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 922 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1065 struct iwl_queue *q = &txq->q; 923 struct iwl_queue *q = &txq->q;
1066 struct iwl_tfd *tfd;
1067 struct iwl_cmd *out_cmd; 924 struct iwl_cmd *out_cmd;
1068 dma_addr_t phys_addr; 925 dma_addr_t phys_addr;
1069 unsigned long flags; 926 unsigned long flags;
@@ -1081,21 +938,17 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1081 !(cmd->meta.flags & CMD_SIZE_HUGE)); 938 !(cmd->meta.flags & CMD_SIZE_HUGE));
1082 939
1083 if (iwl_is_rfkill(priv)) { 940 if (iwl_is_rfkill(priv)) {
1084 IWL_DEBUG_INFO("Not sending command - RF KILL"); 941 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL");
1085 return -EIO; 942 return -EIO;
1086 } 943 }
1087 944
1088 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { 945 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1089 IWL_ERROR("No space for Tx\n"); 946 IWL_ERR(priv, "No space for Tx\n");
1090 return -ENOSPC; 947 return -ENOSPC;
1091 } 948 }
1092 949
1093 spin_lock_irqsave(&priv->hcmd_lock, flags); 950 spin_lock_irqsave(&priv->hcmd_lock, flags);
1094 951
1095 tfd = &txq->tfds[q->write_ptr];
1096 memset(tfd, 0, sizeof(*tfd));
1097
1098
1099 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 952 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1100 out_cmd = txq->cmd[idx]; 953 out_cmd = txq->cmd[idx];
1101 954
@@ -1120,13 +973,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1120 pci_unmap_len_set(&out_cmd->meta, len, len); 973 pci_unmap_len_set(&out_cmd->meta, len, len);
1121 phys_addr += offsetof(struct iwl_cmd, hdr); 974 phys_addr += offsetof(struct iwl_cmd, hdr);
1122 975
1123 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 976 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
977 phys_addr, fix_size, 1,
978 U32_PAD(cmd->len));
1124 979
1125#ifdef CONFIG_IWLWIFI_DEBUG 980#ifdef CONFIG_IWLWIFI_DEBUG
1126 switch (out_cmd->hdr.cmd) { 981 switch (out_cmd->hdr.cmd) {
1127 case REPLY_TX_LINK_QUALITY_CMD: 982 case REPLY_TX_LINK_QUALITY_CMD:
1128 case SENSITIVITY_CMD: 983 case SENSITIVITY_CMD:
1129 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 984 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
1130 "%d bytes at %d[%d]:%d\n", 985 "%d bytes at %d[%d]:%d\n",
1131 get_cmd_string(out_cmd->hdr.cmd), 986 get_cmd_string(out_cmd->hdr.cmd),
1132 out_cmd->hdr.cmd, 987 out_cmd->hdr.cmd,
@@ -1134,7 +989,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1134 q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 989 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1135 break; 990 break;
1136 default: 991 default:
1137 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 992 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
1138 "%d bytes at %d[%d]:%d\n", 993 "%d bytes at %d[%d]:%d\n",
1139 get_cmd_string(out_cmd->hdr.cmd), 994 get_cmd_string(out_cmd->hdr.cmd),
1140 out_cmd->hdr.cmd, 995 out_cmd->hdr.cmd,
@@ -1144,8 +999,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1144#endif 999#endif
1145 txq->need_update = 1; 1000 txq->need_update = 1;
1146 1001
1147 /* Set up entry in queue's byte count circular buffer */ 1002 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1148 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); 1003 /* Set up entry in queue's byte count circular buffer */
1004 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1149 1005
1150 /* Increment and update queue's write index */ 1006 /* Increment and update queue's write index */
1151 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1007 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -1163,7 +1019,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1163 int nfreed = 0; 1019 int nfreed = 0;
1164 1020
1165 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1021 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1166 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1022 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1167 "is out of range [0-%d] %d %d.\n", txq_id, 1023 "is out of range [0-%d] %d %d.\n", txq_id,
1168 index, q->n_bd, q->write_ptr, q->read_ptr); 1024 index, q->n_bd, q->write_ptr, q->read_ptr);
1169 return 0; 1025 return 0;
@@ -1180,7 +1036,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1180 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1036 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1181 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1037 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1182 1038
1183 iwl_hw_txq_free_tfd(priv, txq); 1039 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1184 nfreed++; 1040 nfreed++;
1185 } 1041 }
1186 return nfreed; 1042 return nfreed;
@@ -1203,7 +1059,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1203 int nfreed = 0; 1059 int nfreed = 0;
1204 1060
1205 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { 1061 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1206 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1062 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1207 "is out of range [0-%d] %d %d.\n", txq_id, 1063 "is out of range [0-%d] %d %d.\n", txq_id,
1208 idx, q->n_bd, q->write_ptr, q->read_ptr); 1064 idx, q->n_bd, q->write_ptr, q->read_ptr);
1209 return; 1065 return;
@@ -1218,7 +1074,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1074 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1219 1075
1220 if (nfreed++ > 0) { 1076 if (nfreed++ > 0) {
1221 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx, 1077 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1222 q->write_ptr, q->read_ptr); 1078 q->write_ptr, q->read_ptr);
1223 queue_work(priv->workqueue, &priv->restart); 1079 queue_work(priv->workqueue, &priv->restart);
1224 } 1080 }
@@ -1306,7 +1162,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1306 else 1162 else
1307 return -EINVAL; 1163 return -EINVAL;
1308 1164
1309 IWL_WARNING("%s on ra = %pM tid = %d\n", 1165 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1310 __func__, ra, tid); 1166 __func__, ra, tid);
1311 1167
1312 sta_id = iwl_find_station(priv, ra); 1168 sta_id = iwl_find_station(priv, ra);
@@ -1314,7 +1170,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1314 return -ENXIO; 1170 return -ENXIO;
1315 1171
1316 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1172 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1317 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 1173 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1318 return -ENXIO; 1174 return -ENXIO;
1319 } 1175 }
1320 1176
@@ -1334,11 +1190,11 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1334 return ret; 1190 return ret;
1335 1191
1336 if (tid_data->tfds_in_queue == 0) { 1192 if (tid_data->tfds_in_queue == 0) {
1337 printk(KERN_ERR "HW queue is empty\n"); 1193 IWL_ERR(priv, "HW queue is empty\n");
1338 tid_data->agg.state = IWL_AGG_ON; 1194 tid_data->agg.state = IWL_AGG_ON;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); 1195 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1340 } else { 1196 } else {
1341 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n", 1197 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1342 tid_data->tfds_in_queue); 1198 tid_data->tfds_in_queue);
1343 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 1199 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1344 } 1200 }
@@ -1354,7 +1210,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1354 unsigned long flags; 1210 unsigned long flags;
1355 1211
1356 if (!ra) { 1212 if (!ra) {
1357 IWL_ERROR("ra = NULL\n"); 1213 IWL_ERR(priv, "ra = NULL\n");
1358 return -EINVAL; 1214 return -EINVAL;
1359 } 1215 }
1360 1216
@@ -1369,7 +1225,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1369 return -ENXIO; 1225 return -ENXIO;
1370 1226
1371 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1227 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1372 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); 1228 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
1373 1229
1374 tid_data = &priv->stations[sta_id].tid[tid]; 1230 tid_data = &priv->stations[sta_id].tid[tid];
1375 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1231 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
@@ -1379,13 +1235,13 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1379 1235
1380 /* The queue is not empty */ 1236 /* The queue is not empty */
1381 if (write_ptr != read_ptr) { 1237 if (write_ptr != read_ptr) {
1382 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); 1238 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1383 priv->stations[sta_id].tid[tid].agg.state = 1239 priv->stations[sta_id].tid[tid].agg.state =
1384 IWL_EMPTYING_HW_QUEUE_DELBA; 1240 IWL_EMPTYING_HW_QUEUE_DELBA;
1385 return 0; 1241 return 0;
1386 } 1242 }
1387 1243
1388 IWL_DEBUG_HT("HW queue is empty\n"); 1244 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1389 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1245 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1390 1246
1391 spin_lock_irqsave(&priv->lock, flags); 1247 spin_lock_irqsave(&priv->lock, flags);
@@ -1416,7 +1272,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1416 (q->read_ptr == q->write_ptr)) { 1272 (q->read_ptr == q->write_ptr)) {
1417 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1273 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1418 int tx_fifo = default_tid_to_tx_fifo[tid]; 1274 int tx_fifo = default_tid_to_tx_fifo[tid];
1419 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n"); 1275 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1420 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1276 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1421 ssn, tx_fifo); 1277 ssn, tx_fifo);
1422 tid_data->agg.state = IWL_AGG_OFF; 1278 tid_data->agg.state = IWL_AGG_OFF;
@@ -1426,7 +1282,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1426 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1282 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1427 /* We are reclaiming the last packet of the queue */ 1283 /* We are reclaiming the last packet of the queue */
1428 if (tid_data->tfds_in_queue == 0) { 1284 if (tid_data->tfds_in_queue == 0) {
1429 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n"); 1285 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1430 tid_data->agg.state = IWL_AGG_ON; 1286 tid_data->agg.state = IWL_AGG_ON;
1431 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); 1287 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1432 } 1288 }
@@ -1455,13 +1311,13 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1455 struct ieee80211_tx_info *info; 1311 struct ieee80211_tx_info *info;
1456 1312
1457 if (unlikely(!agg->wait_for_ba)) { 1313 if (unlikely(!agg->wait_for_ba)) {
1458 IWL_ERROR("Received BA when not expected\n"); 1314 IWL_ERR(priv, "Received BA when not expected\n");
1459 return -EINVAL; 1315 return -EINVAL;
1460 } 1316 }
1461 1317
1462 /* Mark that the expected block-ack response arrived */ 1318 /* Mark that the expected block-ack response arrived */
1463 agg->wait_for_ba = 0; 1319 agg->wait_for_ba = 0;
1464 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); 1320 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1465 1321
1466 /* Calculate shift to align block-ack bits with our Tx window bits */ 1322 /* Calculate shift to align block-ack bits with our Tx window bits */
1467 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); 1323 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
@@ -1472,7 +1328,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1472 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; 1328 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1473 1329
1474 if (agg->frame_count > (64 - sh)) { 1330 if (agg->frame_count > (64 - sh)) {
1475 IWL_DEBUG_TX_REPLY("more frames than bitmap size"); 1331 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1476 return -1; 1332 return -1;
1477 } 1333 }
1478 1334
@@ -1485,7 +1341,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1485 for (i = 0; i < agg->frame_count ; i++) { 1341 for (i = 0; i < agg->frame_count ; i++) {
1486 ack = bitmap & (1ULL << i); 1342 ack = bitmap & (1ULL << i);
1487 successes += !!ack; 1343 successes += !!ack;
1488 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 1344 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1489 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, 1345 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1490 agg->start_idx + i); 1346 agg->start_idx + i);
1491 } 1347 }
@@ -1498,7 +1354,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1498 info->status.ampdu_ack_len = agg->frame_count; 1354 info->status.ampdu_ack_len = agg->frame_count;
1499 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1355 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1500 1356
1501 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); 1357 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1502 1358
1503 return 0; 1359 return 0;
1504} 1360}
@@ -1528,7 +1384,8 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1528 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1384 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1529 1385
1530 if (scd_flow >= priv->hw_params.max_txq_num) { 1386 if (scd_flow >= priv->hw_params.max_txq_num) {
1531 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); 1387 IWL_ERR(priv,
1388 "BUG_ON scd_flow is bigger than number of queues\n");
1532 return; 1389 return;
1533 } 1390 }
1534 1391
@@ -1542,19 +1399,19 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1542 1399
1543 /* TODO: Need to get this copy more safely - now good for debug */ 1400 /* TODO: Need to get this copy more safely - now good for debug */
1544 1401
1545 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, " 1402 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1546 "sta_id = %d\n", 1403 "sta_id = %d\n",
1547 agg->wait_for_ba, 1404 agg->wait_for_ba,
1548 (u8 *) &ba_resp->sta_addr_lo32, 1405 (u8 *) &ba_resp->sta_addr_lo32,
1549 ba_resp->sta_id); 1406 ba_resp->sta_id);
1550 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " 1407 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1551 "%d, scd_ssn = %d\n", 1408 "%d, scd_ssn = %d\n",
1552 ba_resp->tid, 1409 ba_resp->tid,
1553 ba_resp->seq_ctl, 1410 ba_resp->seq_ctl,
1554 (unsigned long long)le64_to_cpu(ba_resp->bitmap), 1411 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1555 ba_resp->scd_flow, 1412 ba_resp->scd_flow,
1556 ba_resp->scd_ssn); 1413 ba_resp->scd_ssn);
1557 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n", 1414 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1558 agg->start_idx, 1415 agg->start_idx,
1559 (unsigned long long)agg->bitmap); 1416 (unsigned long long)agg->bitmap);
1560 1417
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 93be74a1f13..0cd8cb96a5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -46,40 +46,25 @@
46 46
47#include <asm/div64.h> 47#include <asm/div64.h>
48 48
49#include "iwl-3945-core.h" 49#define DRV_NAME "iwl3945"
50
51#include "iwl-fh.h"
52#include "iwl-3945-fh.h"
53#include "iwl-commands.h"
54#include "iwl-sta.h"
50#include "iwl-3945.h" 55#include "iwl-3945.h"
51#include "iwl-helpers.h" 56#include "iwl-helpers.h"
52 57#include "iwl-core.h"
53#ifdef CONFIG_IWL3945_DEBUG 58#include "iwl-dev.h"
54u32 iwl3945_debug_level;
55#endif
56
57static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
58 struct iwl3945_tx_queue *txq);
59
60/******************************************************************************
61 *
62 * module boiler plate
63 *
64 ******************************************************************************/
65
66/* module parameters */
67static int iwl3945_param_disable_hw_scan; /* def: 0 = use 3945's h/w scan */
68static u32 iwl3945_param_debug; /* def: 0 = minimal debug log messages */
69static int iwl3945_param_disable; /* def: 0 = enable radio */
70static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */
71int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */
72int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
73 59
74/* 60/*
75 * module name, copyright, version, etc. 61 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */ 62 */
78 63
79#define DRV_DESCRIPTION \ 64#define DRV_DESCRIPTION \
80"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 65"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
81 66
82#ifdef CONFIG_IWL3945_DEBUG 67#ifdef CONFIG_IWLWIFI_DEBUG
83#define VD "d" 68#define VD "d"
84#else 69#else
85#define VD 70#define VD
@@ -91,10 +76,10 @@ int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
91#define VS 76#define VS
92#endif 77#endif
93 78
94#define IWLWIFI_VERSION "1.2.26k" VD VS 79#define IWL39_VERSION "1.2.26k" VD VS
95#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation" 80#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
96#define DRV_AUTHOR "<ilw@linux.intel.com>" 81#define DRV_AUTHOR "<ilw@linux.intel.com>"
97#define DRV_VERSION IWLWIFI_VERSION 82#define DRV_VERSION IWL39_VERSION
98 83
99 84
100MODULE_DESCRIPTION(DRV_DESCRIPTION); 85MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -102,235 +87,13 @@ MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
103MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
104 89
105static const struct ieee80211_supported_band *iwl3945_get_band( 90 /* module parameters */
106 struct iwl3945_priv *priv, enum ieee80211_band band) 91struct iwl_mod_params iwl3945_mod_params = {
107{ 92 .num_of_queues = IWL39_MAX_NUM_QUEUES,
108 return priv->hw->wiphy->bands[band]; 93 .sw_crypto = 1,
109} 94 .restart_fw = 1,
110 95 /* the rest are 0 by default */
111/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 96};
112 * DMA services
113 *
114 * Theory of operation
115 *
116 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
117 * of buffer descriptors, each of which points to one or more data buffers for
118 * the device to read from or fill. Driver and device exchange status of each
119 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
120 * entries in each circular buffer, to protect against confusing empty and full
121 * queue states.
122 *
123 * The device reads or writes the data in the queues via the device's several
124 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
125 *
126 * For Tx queue, there are low mark and high mark limits. If, after queuing
127 * the packet for Tx, free space become < low mark, Tx queue stopped. When
128 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
129 * Tx queue resumed.
130 *
131 * The 3945 operates with six queues: One receive queue, one transmit queue
132 * (#4) for sending commands to the device firmware, and four transmit queues
133 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
134 ***************************************************/
135
136int iwl3945_queue_space(const struct iwl3945_queue *q)
137{
138 int s = q->read_ptr - q->write_ptr;
139
140 if (q->read_ptr > q->write_ptr)
141 s -= q->n_bd;
142
143 if (s <= 0)
144 s += q->n_window;
145 /* keep some reserve to not confuse empty and full situations */
146 s -= 2;
147 if (s < 0)
148 s = 0;
149 return s;
150}
151
152int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i)
153{
154 return q->write_ptr > q->read_ptr ?
155 (i >= q->read_ptr && i < q->write_ptr) :
156 !(i < q->read_ptr && i >= q->write_ptr);
157}
158
159
160static inline u8 get_cmd_index(struct iwl3945_queue *q, u32 index, int is_huge)
161{
162 /* This is for scan command, the big buffer at end of command array */
163 if (is_huge)
164 return q->n_window; /* must be power of 2 */
165
166 /* Otherwise, use normal size buffers */
167 return index & (q->n_window - 1);
168}
169
170/**
171 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
172 */
173static int iwl3945_queue_init(struct iwl3945_priv *priv, struct iwl3945_queue *q,
174 int count, int slots_num, u32 id)
175{
176 q->n_bd = count;
177 q->n_window = slots_num;
178 q->id = id;
179
180 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
181 * and iwl_queue_dec_wrap are broken. */
182 BUG_ON(!is_power_of_2(count));
183
184 /* slots_num must be power-of-two size, otherwise
185 * get_cmd_index is broken. */
186 BUG_ON(!is_power_of_2(slots_num));
187
188 q->low_mark = q->n_window / 4;
189 if (q->low_mark < 4)
190 q->low_mark = 4;
191
192 q->high_mark = q->n_window / 8;
193 if (q->high_mark < 2)
194 q->high_mark = 2;
195
196 q->write_ptr = q->read_ptr = 0;
197
198 return 0;
199}
200
201/**
202 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
203 */
204static int iwl3945_tx_queue_alloc(struct iwl3945_priv *priv,
205 struct iwl3945_tx_queue *txq, u32 id)
206{
207 struct pci_dev *dev = priv->pci_dev;
208
209 /* Driver private data, only for Tx (not command) queues,
210 * not shared with device. */
211 if (id != IWL_CMD_QUEUE_NUM) {
212 txq->txb = kmalloc(sizeof(txq->txb[0]) *
213 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
214 if (!txq->txb) {
215 IWL_ERROR("kmalloc for auxiliary BD "
216 "structures failed\n");
217 goto error;
218 }
219 } else
220 txq->txb = NULL;
221
222 /* Circular buffer of transmit frame descriptors (TFDs),
223 * shared with device */
224 txq->bd = pci_alloc_consistent(dev,
225 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
226 &txq->q.dma_addr);
227
228 if (!txq->bd) {
229 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
230 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
231 goto error;
232 }
233 txq->q.id = id;
234
235 return 0;
236
237 error:
238 kfree(txq->txb);
239 txq->txb = NULL;
240
241 return -ENOMEM;
242}
243
244/**
245 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
246 */
247int iwl3945_tx_queue_init(struct iwl3945_priv *priv,
248 struct iwl3945_tx_queue *txq, int slots_num, u32 txq_id)
249{
250 struct pci_dev *dev = priv->pci_dev;
251 int len;
252 int rc = 0;
253
254 /*
255 * Alloc buffer array for commands (Tx or other types of commands).
256 * For the command queue (#4), allocate command space + one big
257 * command for scan, since scan command is very huge; the system will
258 * not have two scans at the same time, so only one is needed.
259 * For data Tx queues (all other queues), no super-size command
260 * space is needed.
261 */
262 len = sizeof(struct iwl3945_cmd) * slots_num;
263 if (txq_id == IWL_CMD_QUEUE_NUM)
264 len += IWL_MAX_SCAN_SIZE;
265 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
266 if (!txq->cmd)
267 return -ENOMEM;
268
269 /* Alloc driver data array and TFD circular buffer */
270 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
271 if (rc) {
272 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
273
274 return -ENOMEM;
275 }
276 txq->need_update = 0;
277
278 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
279 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
280 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
281
282 /* Initialize queue high/low-water, head/tail indexes */
283 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
284
285 /* Tell device where to find queue, enable DMA channel. */
286 iwl3945_hw_tx_queue_init(priv, txq);
287
288 return 0;
289}
290
291/**
292 * iwl3945_tx_queue_free - Deallocate DMA queue.
293 * @txq: Transmit queue to deallocate.
294 *
295 * Empty queue by removing and destroying all BD's.
296 * Free all buffers.
297 * 0-fill, but do not free "txq" descriptor structure.
298 */
299void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq)
300{
301 struct iwl3945_queue *q = &txq->q;
302 struct pci_dev *dev = priv->pci_dev;
303 int len;
304
305 if (q->n_bd == 0)
306 return;
307
308 /* first, empty all BD's */
309 for (; q->write_ptr != q->read_ptr;
310 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
311 iwl3945_hw_txq_free_tfd(priv, txq);
312
313 len = sizeof(struct iwl3945_cmd) * q->n_window;
314 if (q->id == IWL_CMD_QUEUE_NUM)
315 len += IWL_MAX_SCAN_SIZE;
316
317 /* De-alloc array of command/tx buffers */
318 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
319
320 /* De-alloc circular buffer of TFDs */
321 if (txq->q.n_bd)
322 pci_free_consistent(dev, sizeof(struct iwl3945_tfd_frame) *
323 txq->q.n_bd, txq->bd, txq->q.dma_addr);
324
325 /* De-alloc array of per-TFD driver data */
326 kfree(txq->txb);
327 txq->txb = NULL;
328
329 /* 0-fill queue descriptor structure */
330 memset(txq, 0, sizeof(*txq));
331}
332
333const u8 iwl3945_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
334 97
335/*************** STATION TABLE MANAGEMENT **** 98/*************** STATION TABLE MANAGEMENT ****
336 * mac80211 should be examined to determine if sta_info is duplicating 99 * mac80211 should be examined to determine if sta_info is duplicating
@@ -344,7 +107,7 @@ const u8 iwl3945_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
344 * 107 *
345 * NOTE: This does not remove station from device's station table. 108 * NOTE: This does not remove station from device's station table.
346 */ 109 */
347static u8 iwl3945_remove_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap) 110static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
348{ 111{
349 int index = IWL_INVALID_STATION; 112 int index = IWL_INVALID_STATION;
350 int i; 113 int i;
@@ -355,11 +118,11 @@ static u8 iwl3945_remove_station(struct iwl3945_priv *priv, const u8 *addr, int
355 if (is_ap) 118 if (is_ap)
356 index = IWL_AP_ID; 119 index = IWL_AP_ID;
357 else if (is_broadcast_ether_addr(addr)) 120 else if (is_broadcast_ether_addr(addr))
358 index = priv->hw_setting.bcast_sta_id; 121 index = priv->hw_params.bcast_sta_id;
359 else 122 else
360 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) 123 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
361 if (priv->stations[i].used && 124 if (priv->stations_39[i].used &&
362 !compare_ether_addr(priv->stations[i].sta.sta.addr, 125 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
363 addr)) { 126 addr)) {
364 index = i; 127 index = i;
365 break; 128 break;
@@ -368,8 +131,8 @@ static u8 iwl3945_remove_station(struct iwl3945_priv *priv, const u8 *addr, int
368 if (unlikely(index == IWL_INVALID_STATION)) 131 if (unlikely(index == IWL_INVALID_STATION))
369 goto out; 132 goto out;
370 133
371 if (priv->stations[index].used) { 134 if (priv->stations_39[index].used) {
372 priv->stations[index].used = 0; 135 priv->stations_39[index].used = 0;
373 priv->num_stations--; 136 priv->num_stations--;
374 } 137 }
375 138
@@ -386,14 +149,14 @@ out:
386 * 149 *
387 * NOTE: This does not clear or otherwise alter the device's station table. 150 * NOTE: This does not clear or otherwise alter the device's station table.
388 */ 151 */
389static void iwl3945_clear_stations_table(struct iwl3945_priv *priv) 152static void iwl3945_clear_stations_table(struct iwl_priv *priv)
390{ 153{
391 unsigned long flags; 154 unsigned long flags;
392 155
393 spin_lock_irqsave(&priv->sta_lock, flags); 156 spin_lock_irqsave(&priv->sta_lock, flags);
394 157
395 priv->num_stations = 0; 158 priv->num_stations = 0;
396 memset(priv->stations, 0, sizeof(priv->stations)); 159 memset(priv->stations_39, 0, sizeof(priv->stations_39));
397 160
398 spin_unlock_irqrestore(&priv->sta_lock, flags); 161 spin_unlock_irqrestore(&priv->sta_lock, flags);
399} 162}
@@ -401,7 +164,7 @@ static void iwl3945_clear_stations_table(struct iwl3945_priv *priv)
401/** 164/**
402 * iwl3945_add_station - Add station to station tables in driver and device 165 * iwl3945_add_station - Add station to station tables in driver and device
403 */ 166 */
404u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8 flags) 167u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
405{ 168{
406 int i; 169 int i;
407 int index = IWL_INVALID_STATION; 170 int index = IWL_INVALID_STATION;
@@ -413,16 +176,16 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
413 if (is_ap) 176 if (is_ap)
414 index = IWL_AP_ID; 177 index = IWL_AP_ID;
415 else if (is_broadcast_ether_addr(addr)) 178 else if (is_broadcast_ether_addr(addr))
416 index = priv->hw_setting.bcast_sta_id; 179 index = priv->hw_params.bcast_sta_id;
417 else 180 else
418 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { 181 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
419 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 182 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
420 addr)) { 183 addr)) {
421 index = i; 184 index = i;
422 break; 185 break;
423 } 186 }
424 187
425 if (!priv->stations[i].used && 188 if (!priv->stations_39[i].used &&
426 index == IWL_INVALID_STATION) 189 index == IWL_INVALID_STATION)
427 index = i; 190 index = i;
428 } 191 }
@@ -434,14 +197,14 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
434 return index; 197 return index;
435 } 198 }
436 199
437 if (priv->stations[index].used && 200 if (priv->stations_39[index].used &&
438 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { 201 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
439 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 202 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
440 return index; 203 return index;
441 } 204 }
442 205
443 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr); 206 IWL_DEBUG_ASSOC(priv, "Add STA ID %d: %pM\n", index, addr);
444 station = &priv->stations[index]; 207 station = &priv->stations_39[index];
445 station->used = 1; 208 station->used = 1;
446 priv->num_stations++; 209 priv->num_stations++;
447 210
@@ -460,531 +223,35 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
460 /* Turn on both antennas for the station... */ 223 /* Turn on both antennas for the station... */
461 station->sta.rate_n_flags = 224 station->sta.rate_n_flags =
462 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK); 225 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
463 station->current_rate.rate_n_flags =
464 le16_to_cpu(station->sta.rate_n_flags);
465 226
466 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 227 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
467 228
468 /* Add station to device's station table */ 229 /* Add station to device's station table */
469 iwl3945_send_add_station(priv, &station->sta, flags); 230 iwl_send_add_sta(priv,
231 (struct iwl_addsta_cmd *)&station->sta, flags);
470 return index; 232 return index;
471 233
472} 234}
473 235
474/*************** DRIVER STATUS FUNCTIONS *****/ 236static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
475
476static inline int iwl3945_is_ready(struct iwl3945_priv *priv)
477{
478 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
479 * set but EXIT_PENDING is not */
480 return test_bit(STATUS_READY, &priv->status) &&
481 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
482 !test_bit(STATUS_EXIT_PENDING, &priv->status);
483}
484
485static inline int iwl3945_is_alive(struct iwl3945_priv *priv)
486{
487 return test_bit(STATUS_ALIVE, &priv->status);
488}
489
490static inline int iwl3945_is_init(struct iwl3945_priv *priv)
491{
492 return test_bit(STATUS_INIT, &priv->status);
493}
494
495static inline int iwl3945_is_rfkill_sw(struct iwl3945_priv *priv)
496{
497 return test_bit(STATUS_RF_KILL_SW, &priv->status);
498}
499
500static inline int iwl3945_is_rfkill_hw(struct iwl3945_priv *priv)
501{
502 return test_bit(STATUS_RF_KILL_HW, &priv->status);
503}
504
505static inline int iwl3945_is_rfkill(struct iwl3945_priv *priv)
506{
507 return iwl3945_is_rfkill_hw(priv) ||
508 iwl3945_is_rfkill_sw(priv);
509}
510
511static inline int iwl3945_is_ready_rf(struct iwl3945_priv *priv)
512{
513
514 if (iwl3945_is_rfkill(priv))
515 return 0;
516
517 return iwl3945_is_ready(priv);
518}
519
520/*************** HOST COMMAND QUEUE FUNCTIONS *****/
521
522#define IWL_CMD(x) case x: return #x
523
524static const char *get_cmd_string(u8 cmd)
525{
526 switch (cmd) {
527 IWL_CMD(REPLY_ALIVE);
528 IWL_CMD(REPLY_ERROR);
529 IWL_CMD(REPLY_RXON);
530 IWL_CMD(REPLY_RXON_ASSOC);
531 IWL_CMD(REPLY_QOS_PARAM);
532 IWL_CMD(REPLY_RXON_TIMING);
533 IWL_CMD(REPLY_ADD_STA);
534 IWL_CMD(REPLY_REMOVE_STA);
535 IWL_CMD(REPLY_REMOVE_ALL_STA);
536 IWL_CMD(REPLY_3945_RX);
537 IWL_CMD(REPLY_TX);
538 IWL_CMD(REPLY_RATE_SCALE);
539 IWL_CMD(REPLY_LEDS_CMD);
540 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
541 IWL_CMD(RADAR_NOTIFICATION);
542 IWL_CMD(REPLY_QUIET_CMD);
543 IWL_CMD(REPLY_CHANNEL_SWITCH);
544 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
545 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
546 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
547 IWL_CMD(POWER_TABLE_CMD);
548 IWL_CMD(PM_SLEEP_NOTIFICATION);
549 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
550 IWL_CMD(REPLY_SCAN_CMD);
551 IWL_CMD(REPLY_SCAN_ABORT_CMD);
552 IWL_CMD(SCAN_START_NOTIFICATION);
553 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
554 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
555 IWL_CMD(BEACON_NOTIFICATION);
556 IWL_CMD(REPLY_TX_BEACON);
557 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
558 IWL_CMD(QUIET_NOTIFICATION);
559 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
560 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
561 IWL_CMD(REPLY_BT_CONFIG);
562 IWL_CMD(REPLY_STATISTICS_CMD);
563 IWL_CMD(STATISTICS_NOTIFICATION);
564 IWL_CMD(REPLY_CARD_STATE_CMD);
565 IWL_CMD(CARD_STATE_NOTIFICATION);
566 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
567 default:
568 return "UNKNOWN";
569
570 }
571}
572
573#define HOST_COMPLETE_TIMEOUT (HZ / 2)
574
575/**
576 * iwl3945_enqueue_hcmd - enqueue a uCode command
577 * @priv: device private data point
578 * @cmd: a point to the ucode command structure
579 *
580 * The function returns < 0 values to indicate the operation is
581 * failed. On success, it turns the index (> 0) of command in the
582 * command queue.
583 */
584static int iwl3945_enqueue_hcmd(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
585{
586 struct iwl3945_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
587 struct iwl3945_queue *q = &txq->q;
588 struct iwl3945_tfd_frame *tfd;
589 u32 *control_flags;
590 struct iwl3945_cmd *out_cmd;
591 u32 idx;
592 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
593 dma_addr_t phys_addr;
594 int pad;
595 u16 count;
596 int ret;
597 unsigned long flags;
598
599 /* If any of the command structures end up being larger than
600 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
601 * we will need to increase the size of the TFD entries */
602 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
603 !(cmd->meta.flags & CMD_SIZE_HUGE));
604
605
606 if (iwl3945_is_rfkill(priv)) {
607 IWL_DEBUG_INFO("Not sending command - RF KILL");
608 return -EIO;
609 }
610
611 if (iwl3945_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
612 IWL_ERROR("No space for Tx\n");
613 return -ENOSPC;
614 }
615
616 spin_lock_irqsave(&priv->hcmd_lock, flags);
617
618 tfd = &txq->bd[q->write_ptr];
619 memset(tfd, 0, sizeof(*tfd));
620
621 control_flags = (u32 *) tfd;
622
623 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
624 out_cmd = &txq->cmd[idx];
625
626 out_cmd->hdr.cmd = cmd->id;
627 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
628 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
629
630 /* At this point, the out_cmd now has all of the incoming cmd
631 * information */
632
633 out_cmd->hdr.flags = 0;
634 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
635 INDEX_TO_SEQ(q->write_ptr));
636 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
637 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
638
639 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
640 offsetof(struct iwl3945_cmd, hdr);
641 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
642
643 pad = U32_PAD(cmd->len);
644 count = TFD_CTL_COUNT_GET(*control_flags);
645 *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
646
647 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
648 "%d bytes at %d[%d]:%d\n",
649 get_cmd_string(out_cmd->hdr.cmd),
650 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
651 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
652
653 txq->need_update = 1;
654
655 /* Increment and update queue's write index */
656 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
657 ret = iwl3945_tx_queue_update_write_ptr(priv, txq);
658
659 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
660 return ret ? ret : idx;
661}
662
663static int iwl3945_send_cmd_async(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
664{
665 int ret;
666
667 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
668
669 /* An asynchronous command can not expect an SKB to be set. */
670 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
671
672 /* An asynchronous command MUST have a callback. */
673 BUG_ON(!cmd->meta.u.callback);
674
675 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
676 return -EBUSY;
677
678 ret = iwl3945_enqueue_hcmd(priv, cmd);
679 if (ret < 0) {
680 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
681 get_cmd_string(cmd->id), ret);
682 return ret;
683 }
684 return 0;
685}
686
687static int iwl3945_send_cmd_sync(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
688{
689 int cmd_idx;
690 int ret;
691
692 BUG_ON(cmd->meta.flags & CMD_ASYNC);
693
694 /* A synchronous command can not have a callback set. */
695 BUG_ON(cmd->meta.u.callback != NULL);
696
697 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
698 IWL_ERROR("Error sending %s: Already sending a host command\n",
699 get_cmd_string(cmd->id));
700 ret = -EBUSY;
701 goto out;
702 }
703
704 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
705
706 if (cmd->meta.flags & CMD_WANT_SKB)
707 cmd->meta.source = &cmd->meta;
708
709 cmd_idx = iwl3945_enqueue_hcmd(priv, cmd);
710 if (cmd_idx < 0) {
711 ret = cmd_idx;
712 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
713 get_cmd_string(cmd->id), ret);
714 goto out;
715 }
716
717 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
718 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
719 HOST_COMPLETE_TIMEOUT);
720 if (!ret) {
721 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
722 IWL_ERROR("Error sending %s: time out after %dms.\n",
723 get_cmd_string(cmd->id),
724 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
725
726 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
727 ret = -ETIMEDOUT;
728 goto cancel;
729 }
730 }
731
732 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
733 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
734 get_cmd_string(cmd->id));
735 ret = -ECANCELED;
736 goto fail;
737 }
738 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
739 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
740 get_cmd_string(cmd->id));
741 ret = -EIO;
742 goto fail;
743 }
744 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
745 IWL_ERROR("Error: Response NULL in '%s'\n",
746 get_cmd_string(cmd->id));
747 ret = -EIO;
748 goto cancel;
749 }
750
751 ret = 0;
752 goto out;
753
754cancel:
755 if (cmd->meta.flags & CMD_WANT_SKB) {
756 struct iwl3945_cmd *qcmd;
757
758 /* Cancel the CMD_WANT_SKB flag for the cmd in the
759 * TX cmd queue. Otherwise in case the cmd comes
760 * in later, it will possibly set an invalid
761 * address (cmd->meta.source). */
762 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
763 qcmd->meta.flags &= ~CMD_WANT_SKB;
764 }
765fail:
766 if (cmd->meta.u.skb) {
767 dev_kfree_skb_any(cmd->meta.u.skb);
768 cmd->meta.u.skb = NULL;
769 }
770out:
771 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
772 return ret;
773}
774
775int iwl3945_send_cmd(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
776{
777 if (cmd->meta.flags & CMD_ASYNC)
778 return iwl3945_send_cmd_async(priv, cmd);
779
780 return iwl3945_send_cmd_sync(priv, cmd);
781}
782
783int iwl3945_send_cmd_pdu(struct iwl3945_priv *priv, u8 id, u16 len, const void *data)
784{
785 struct iwl3945_host_cmd cmd = {
786 .id = id,
787 .len = len,
788 .data = data,
789 };
790
791 return iwl3945_send_cmd_sync(priv, &cmd);
792}
793
794static int __must_check iwl3945_send_cmd_u32(struct iwl3945_priv *priv, u8 id, u32 val)
795{
796 struct iwl3945_host_cmd cmd = {
797 .id = id,
798 .len = sizeof(val),
799 .data = &val,
800 };
801
802 return iwl3945_send_cmd_sync(priv, &cmd);
803}
804
805int iwl3945_send_statistics_request(struct iwl3945_priv *priv)
806{
807 return iwl3945_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
808}
809
810/**
811 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
812 * @band: 2.4 or 5 GHz band
813 * @channel: Any channel valid for the requested band
814
815 * In addition to setting the staging RXON, priv->band is also set.
816 *
817 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
818 * in the staging RXON flag structure based on the band
819 */
820static int iwl3945_set_rxon_channel(struct iwl3945_priv *priv,
821 enum ieee80211_band band,
822 u16 channel)
823{
824 if (!iwl3945_get_channel_info(priv, band, channel)) {
825 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
826 channel, band);
827 return -EINVAL;
828 }
829
830 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
831 (priv->band == band))
832 return 0;
833
834 priv->staging_rxon.channel = cpu_to_le16(channel);
835 if (band == IEEE80211_BAND_5GHZ)
836 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
837 else
838 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
839
840 priv->band = band;
841
842 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
843
844 return 0;
845}
846
847/**
848 * iwl3945_check_rxon_cmd - validate RXON structure is valid
849 *
850 * NOTE: This is really only useful during development and can eventually
851 * be #ifdef'd out once the driver is stable and folks aren't actively
852 * making changes
853 */
854static int iwl3945_check_rxon_cmd(struct iwl3945_rxon_cmd *rxon)
855{
856 int error = 0;
857 int counter = 1;
858
859 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
860 error |= le32_to_cpu(rxon->flags &
861 (RXON_FLG_TGJ_NARROW_BAND_MSK |
862 RXON_FLG_RADAR_DETECT_MSK));
863 if (error)
864 IWL_WARNING("check 24G fields %d | %d\n",
865 counter++, error);
866 } else {
867 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
868 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
869 if (error)
870 IWL_WARNING("check 52 fields %d | %d\n",
871 counter++, error);
872 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
873 if (error)
874 IWL_WARNING("check 52 CCK %d | %d\n",
875 counter++, error);
876 }
877 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
878 if (error)
879 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
880
881 /* make sure basic rates 6Mbps and 1Mbps are supported */
882 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
883 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
884 if (error)
885 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
886
887 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
888 if (error)
889 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
890
891 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
892 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
893 if (error)
894 IWL_WARNING("check CCK and short slot %d | %d\n",
895 counter++, error);
896
897 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
898 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
899 if (error)
900 IWL_WARNING("check CCK & auto detect %d | %d\n",
901 counter++, error);
902
903 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
904 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
905 if (error)
906 IWL_WARNING("check TGG and auto detect %d | %d\n",
907 counter++, error);
908
909 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
910 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
911 RXON_FLG_ANT_A_MSK)) == 0);
912 if (error)
913 IWL_WARNING("check antenna %d %d\n", counter++, error);
914
915 if (error)
916 IWL_WARNING("Tuning to channel %d\n",
917 le16_to_cpu(rxon->channel));
918
919 if (error) {
920 IWL_ERROR("Not a valid iwl3945_rxon_assoc_cmd field values\n");
921 return -1;
922 }
923 return 0;
924}
925
926/**
927 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
928 * @priv: staging_rxon is compared to active_rxon
929 *
930 * If the RXON structure is changing enough to require a new tune,
931 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
932 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
933 */
934static int iwl3945_full_rxon_required(struct iwl3945_priv *priv)
935{
936
937 /* These items are only settable from the full RXON command */
938 if (!(iwl3945_is_associated(priv)) ||
939 compare_ether_addr(priv->staging_rxon.bssid_addr,
940 priv->active_rxon.bssid_addr) ||
941 compare_ether_addr(priv->staging_rxon.node_addr,
942 priv->active_rxon.node_addr) ||
943 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
944 priv->active_rxon.wlap_bssid_addr) ||
945 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
946 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
947 (priv->staging_rxon.air_propagation !=
948 priv->active_rxon.air_propagation) ||
949 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
950 return 1;
951
952 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
953 * be updated with the RXON_ASSOC command -- however only some
954 * flag transitions are allowed using RXON_ASSOC */
955
956 /* Check if we are not switching bands */
957 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
958 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
959 return 1;
960
961 /* Check if we are switching association toggle */
962 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
963 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
964 return 1;
965
966 return 0;
967}
968
969static int iwl3945_send_rxon_assoc(struct iwl3945_priv *priv)
970{ 237{
971 int rc = 0; 238 int rc = 0;
972 struct iwl3945_rx_packet *res = NULL; 239 struct iwl_rx_packet *res = NULL;
973 struct iwl3945_rxon_assoc_cmd rxon_assoc; 240 struct iwl3945_rxon_assoc_cmd rxon_assoc;
974 struct iwl3945_host_cmd cmd = { 241 struct iwl_host_cmd cmd = {
975 .id = REPLY_RXON_ASSOC, 242 .id = REPLY_RXON_ASSOC,
976 .len = sizeof(rxon_assoc), 243 .len = sizeof(rxon_assoc),
977 .meta.flags = CMD_WANT_SKB, 244 .meta.flags = CMD_WANT_SKB,
978 .data = &rxon_assoc, 245 .data = &rxon_assoc,
979 }; 246 };
980 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging_rxon; 247 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
981 const struct iwl3945_rxon_cmd *rxon2 = &priv->active_rxon; 248 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
982 249
983 if ((rxon1->flags == rxon2->flags) && 250 if ((rxon1->flags == rxon2->flags) &&
984 (rxon1->filter_flags == rxon2->filter_flags) && 251 (rxon1->filter_flags == rxon2->filter_flags) &&
985 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && 252 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
986 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { 253 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
987 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); 254 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
988 return 0; 255 return 0;
989 } 256 }
990 257
@@ -994,13 +261,13 @@ static int iwl3945_send_rxon_assoc(struct iwl3945_priv *priv)
994 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 261 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
995 rxon_assoc.reserved = 0; 262 rxon_assoc.reserved = 0;
996 263
997 rc = iwl3945_send_cmd_sync(priv, &cmd); 264 rc = iwl_send_cmd_sync(priv, &cmd);
998 if (rc) 265 if (rc)
999 return rc; 266 return rc;
1000 267
1001 res = (struct iwl3945_rx_packet *)cmd.meta.u.skb->data; 268 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1002 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 269 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1003 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); 270 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1004 rc = -EIO; 271 rc = -EIO;
1005 } 272 }
1006 273
@@ -1011,6 +278,43 @@ static int iwl3945_send_rxon_assoc(struct iwl3945_priv *priv)
1011} 278}
1012 279
1013/** 280/**
281 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
282 * @priv: eeprom and antenna fields are used to determine antenna flags
283 *
284 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
285 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
286 *
287 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
288 * IWL_ANTENNA_MAIN - Force MAIN antenna
289 * IWL_ANTENNA_AUX - Force AUX antenna
290 */
291__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
292{
293 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
294
295 switch (iwl3945_mod_params.antenna) {
296 case IWL_ANTENNA_DIVERSITY:
297 return 0;
298
299 case IWL_ANTENNA_MAIN:
300 if (eeprom->antenna_switch_type)
301 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
302 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
303
304 case IWL_ANTENNA_AUX:
305 if (eeprom->antenna_switch_type)
306 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
307 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
308 }
309
310 /* bad antenna selector value */
311 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
312 iwl3945_mod_params.antenna);
313
314 return 0; /* "diversity" is default if error */
315}
316
317/**
1014 * iwl3945_commit_rxon - commit staging_rxon to hardware 318 * iwl3945_commit_rxon - commit staging_rxon to hardware
1015 * 319 *
1016 * The RXON command in staging_rxon is committed to the hardware and 320 * The RXON command in staging_rxon is committed to the hardware and
@@ -1018,41 +322,42 @@ static int iwl3945_send_rxon_assoc(struct iwl3945_priv *priv)
1018 * function correctly transitions out of the RXON_ASSOC_MSK state if 322 * function correctly transitions out of the RXON_ASSOC_MSK state if
1019 * a HW tune is required based on the RXON structure changes. 323 * a HW tune is required based on the RXON structure changes.
1020 */ 324 */
1021static int iwl3945_commit_rxon(struct iwl3945_priv *priv) 325static int iwl3945_commit_rxon(struct iwl_priv *priv)
1022{ 326{
1023 /* cast away the const for active_rxon in this function */ 327 /* cast away the const for active_rxon in this function */
1024 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 328 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
329 struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon;
1025 int rc = 0; 330 int rc = 0;
1026 331
1027 if (!iwl3945_is_alive(priv)) 332 if (!iwl_is_alive(priv))
1028 return -1; 333 return -1;
1029 334
1030 /* always get timestamp with Rx frame */ 335 /* always get timestamp with Rx frame */
1031 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 336 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1032 337
1033 /* select antenna */ 338 /* select antenna */
1034 priv->staging_rxon.flags &= 339 staging_rxon->flags &=
1035 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 340 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1036 priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv); 341 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1037 342
1038 rc = iwl3945_check_rxon_cmd(&priv->staging_rxon); 343 rc = iwl_check_rxon_cmd(priv);
1039 if (rc) { 344 if (rc) {
1040 IWL_ERROR("Invalid RXON configuration. Not committing.\n"); 345 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1041 return -EINVAL; 346 return -EINVAL;
1042 } 347 }
1043 348
1044 /* If we don't need to send a full RXON, we can use 349 /* If we don't need to send a full RXON, we can use
1045 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 350 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1046 * and other flags for the current radio configuration. */ 351 * and other flags for the current radio configuration. */
1047 if (!iwl3945_full_rxon_required(priv)) { 352 if (!iwl_full_rxon_required(priv)) {
1048 rc = iwl3945_send_rxon_assoc(priv); 353 rc = iwl3945_send_rxon_assoc(priv);
1049 if (rc) { 354 if (rc) {
1050 IWL_ERROR("Error setting RXON_ASSOC " 355 IWL_ERR(priv, "Error setting RXON_ASSOC "
1051 "configuration (%d).\n", rc); 356 "configuration (%d).\n", rc);
1052 return rc; 357 return rc;
1053 } 358 }
1054 359
1055 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 360 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1056 361
1057 return 0; 362 return 0;
1058 } 363 }
@@ -1061,12 +366,18 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1061 * an RXON_ASSOC and the new config wants the associated mask enabled, 366 * an RXON_ASSOC and the new config wants the associated mask enabled,
1062 * we must clear the associated from the active configuration 367 * we must clear the associated from the active configuration
1063 * before we apply the new config */ 368 * before we apply the new config */
1064 if (iwl3945_is_associated(priv) && 369 if (iwl_is_associated(priv) &&
1065 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { 370 (staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK)) {
1066 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); 371 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1067 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 372 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1068 373
1069 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON, 374 /*
375 * reserved4 and 5 could have been filled by the iwlcore code.
376 * Let's clear them before pushing to the 3945.
377 */
378 active_rxon->reserved4 = 0;
379 active_rxon->reserved5 = 0;
380 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1070 sizeof(struct iwl3945_rxon_cmd), 381 sizeof(struct iwl3945_rxon_cmd),
1071 &priv->active_rxon); 382 &priv->active_rxon);
1072 383
@@ -1074,231 +385,78 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1074 * active_rxon back to what it was previously */ 385 * active_rxon back to what it was previously */
1075 if (rc) { 386 if (rc) {
1076 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; 387 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1077 IWL_ERROR("Error clearing ASSOC_MSK on current " 388 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1078 "configuration (%d).\n", rc); 389 "configuration (%d).\n", rc);
1079 return rc; 390 return rc;
1080 } 391 }
1081 } 392 }
1082 393
1083 IWL_DEBUG_INFO("Sending RXON\n" 394 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1084 "* with%s RXON_FILTER_ASSOC_MSK\n" 395 "* with%s RXON_FILTER_ASSOC_MSK\n"
1085 "* channel = %d\n" 396 "* channel = %d\n"
1086 "* bssid = %pM\n", 397 "* bssid = %pM\n",
1087 ((priv->staging_rxon.filter_flags & 398 ((priv->staging_rxon.filter_flags &
1088 RXON_FILTER_ASSOC_MSK) ? "" : "out"), 399 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1089 le16_to_cpu(priv->staging_rxon.channel), 400 le16_to_cpu(staging_rxon->channel),
1090 priv->staging_rxon.bssid_addr); 401 staging_rxon->bssid_addr);
402
403 /*
404 * reserved4 and 5 could have been filled by the iwlcore code.
405 * Let's clear them before pushing to the 3945.
406 */
407 staging_rxon->reserved4 = 0;
408 staging_rxon->reserved5 = 0;
1091 409
1092 /* Apply the new configuration */ 410 /* Apply the new configuration */
1093 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON, 411 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1094 sizeof(struct iwl3945_rxon_cmd), &priv->staging_rxon); 412 sizeof(struct iwl3945_rxon_cmd),
413 staging_rxon);
1095 if (rc) { 414 if (rc) {
1096 IWL_ERROR("Error setting new configuration (%d).\n", rc); 415 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1097 return rc; 416 return rc;
1098 } 417 }
1099 418
1100 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 419 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1101 420
1102 iwl3945_clear_stations_table(priv); 421 iwl3945_clear_stations_table(priv);
1103 422
1104 /* If we issue a new RXON command which required a tune then we must 423 /* If we issue a new RXON command which required a tune then we must
1105 * send a new TXPOWER command or we won't be able to Tx any frames */ 424 * send a new TXPOWER command or we won't be able to Tx any frames */
1106 rc = iwl3945_hw_reg_send_txpower(priv); 425 rc = priv->cfg->ops->lib->send_tx_power(priv);
1107 if (rc) { 426 if (rc) {
1108 IWL_ERROR("Error setting Tx power (%d).\n", rc); 427 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1109 return rc; 428 return rc;
1110 } 429 }
1111 430
1112 /* Add the broadcast address so we can send broadcast frames */ 431 /* Add the broadcast address so we can send broadcast frames */
1113 if (iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0) == 432 if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
1114 IWL_INVALID_STATION) { 433 IWL_INVALID_STATION) {
1115 IWL_ERROR("Error adding BROADCAST address for transmit.\n"); 434 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
1116 return -EIO; 435 return -EIO;
1117 } 436 }
1118 437
1119 /* If we have set the ASSOC_MSK and we are in BSS mode then 438 /* If we have set the ASSOC_MSK and we are in BSS mode then
1120 * add the IWL_AP_ID to the station rate table */ 439 * add the IWL_AP_ID to the station rate table */
1121 if (iwl3945_is_associated(priv) && 440 if (iwl_is_associated(priv) &&
1122 (priv->iw_mode == NL80211_IFTYPE_STATION)) 441 (priv->iw_mode == NL80211_IFTYPE_STATION))
1123 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0) 442 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr,
443 1, 0)
1124 == IWL_INVALID_STATION) { 444 == IWL_INVALID_STATION) {
1125 IWL_ERROR("Error adding AP address for transmit.\n"); 445 IWL_ERR(priv, "Error adding AP address for transmit\n");
1126 return -EIO; 446 return -EIO;
1127 } 447 }
1128 448
1129 /* Init the hardware's rate fallback order based on the band */ 449 /* Init the hardware's rate fallback order based on the band */
1130 rc = iwl3945_init_hw_rate_table(priv); 450 rc = iwl3945_init_hw_rate_table(priv);
1131 if (rc) { 451 if (rc) {
1132 IWL_ERROR("Error setting HW rate table: %02X\n", rc); 452 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1133 return -EIO; 453 return -EIO;
1134 } 454 }
1135 455
1136 return 0; 456 return 0;
1137} 457}
1138 458
1139static int iwl3945_send_bt_config(struct iwl3945_priv *priv) 459static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
1140{
1141 struct iwl3945_bt_cmd bt_cmd = {
1142 .flags = 3,
1143 .lead_time = 0xAA,
1144 .max_kill = 1,
1145 .kill_ack_mask = 0,
1146 .kill_cts_mask = 0,
1147 };
1148
1149 return iwl3945_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1150 sizeof(struct iwl3945_bt_cmd), &bt_cmd);
1151}
1152
1153static int iwl3945_send_scan_abort(struct iwl3945_priv *priv)
1154{
1155 int rc = 0;
1156 struct iwl3945_rx_packet *res;
1157 struct iwl3945_host_cmd cmd = {
1158 .id = REPLY_SCAN_ABORT_CMD,
1159 .meta.flags = CMD_WANT_SKB,
1160 };
1161
1162 /* If there isn't a scan actively going on in the hardware
1163 * then we are in between scan bands and not actually
1164 * actively scanning, so don't send the abort command */
1165 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1166 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1167 return 0;
1168 }
1169
1170 rc = iwl3945_send_cmd_sync(priv, &cmd);
1171 if (rc) {
1172 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1173 return rc;
1174 }
1175
1176 res = (struct iwl3945_rx_packet *)cmd.meta.u.skb->data;
1177 if (res->u.status != CAN_ABORT_STATUS) {
1178 /* The scan abort will return 1 for success or
1179 * 2 for "failure". A failure condition can be
1180 * due to simply not being in an active scan which
1181 * can occur if we send the scan abort before we
1182 * the microcode has notified us that a scan is
1183 * completed. */
1184 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1185 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1186 clear_bit(STATUS_SCAN_HW, &priv->status);
1187 }
1188
1189 dev_kfree_skb_any(cmd.meta.u.skb);
1190
1191 return rc;
1192}
1193
1194static int iwl3945_card_state_sync_callback(struct iwl3945_priv *priv,
1195 struct iwl3945_cmd *cmd,
1196 struct sk_buff *skb)
1197{
1198 return 1;
1199}
1200
1201/*
1202 * CARD_STATE_CMD
1203 *
1204 * Use: Sets the device's internal card state to enable, disable, or halt
1205 *
1206 * When in the 'enable' state the card operates as normal.
1207 * When in the 'disable' state, the card enters into a low power mode.
1208 * When in the 'halt' state, the card is shut down and must be fully
1209 * restarted to come back on.
1210 */
1211static int iwl3945_send_card_state(struct iwl3945_priv *priv, u32 flags, u8 meta_flag)
1212{
1213 struct iwl3945_host_cmd cmd = {
1214 .id = REPLY_CARD_STATE_CMD,
1215 .len = sizeof(u32),
1216 .data = &flags,
1217 .meta.flags = meta_flag,
1218 };
1219
1220 if (meta_flag & CMD_ASYNC)
1221 cmd.meta.u.callback = iwl3945_card_state_sync_callback;
1222
1223 return iwl3945_send_cmd(priv, &cmd);
1224}
1225
1226static int iwl3945_add_sta_sync_callback(struct iwl3945_priv *priv,
1227 struct iwl3945_cmd *cmd, struct sk_buff *skb)
1228{
1229 struct iwl3945_rx_packet *res = NULL;
1230
1231 if (!skb) {
1232 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1233 return 1;
1234 }
1235
1236 res = (struct iwl3945_rx_packet *)skb->data;
1237 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1238 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1239 res->hdr.flags);
1240 return 1;
1241 }
1242
1243 switch (res->u.add_sta.status) {
1244 case ADD_STA_SUCCESS_MSK:
1245 break;
1246 default:
1247 break;
1248 }
1249
1250 /* We didn't cache the SKB; let the caller free it */
1251 return 1;
1252}
1253
1254int iwl3945_send_add_station(struct iwl3945_priv *priv,
1255 struct iwl3945_addsta_cmd *sta, u8 flags)
1256{
1257 struct iwl3945_rx_packet *res = NULL;
1258 int rc = 0;
1259 struct iwl3945_host_cmd cmd = {
1260 .id = REPLY_ADD_STA,
1261 .len = sizeof(struct iwl3945_addsta_cmd),
1262 .meta.flags = flags,
1263 .data = sta,
1264 };
1265
1266 if (flags & CMD_ASYNC)
1267 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
1268 else
1269 cmd.meta.flags |= CMD_WANT_SKB;
1270
1271 rc = iwl3945_send_cmd(priv, &cmd);
1272
1273 if (rc || (flags & CMD_ASYNC))
1274 return rc;
1275
1276 res = (struct iwl3945_rx_packet *)cmd.meta.u.skb->data;
1277 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1278 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1279 res->hdr.flags);
1280 rc = -EIO;
1281 }
1282
1283 if (rc == 0) {
1284 switch (res->u.add_sta.status) {
1285 case ADD_STA_SUCCESS_MSK:
1286 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1287 break;
1288 default:
1289 rc = -EIO;
1290 IWL_WARNING("REPLY_ADD_STA failed\n");
1291 break;
1292 }
1293 }
1294
1295 priv->alloc_rxb_skb--;
1296 dev_kfree_skb_any(cmd.meta.u.skb);
1297
1298 return rc;
1299}
1300
1301static int iwl3945_update_sta_key_info(struct iwl3945_priv *priv,
1302 struct ieee80211_key_conf *keyconf, 460 struct ieee80211_key_conf *keyconf,
1303 u8 sta_id) 461 u8 sta_id)
1304{ 462{
@@ -1318,46 +476,49 @@ static int iwl3945_update_sta_key_info(struct iwl3945_priv *priv,
1318 return -EINVAL; 476 return -EINVAL;
1319 } 477 }
1320 spin_lock_irqsave(&priv->sta_lock, flags); 478 spin_lock_irqsave(&priv->sta_lock, flags);
1321 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 479 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
1322 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 480 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
1323 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 481 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
1324 keyconf->keylen); 482 keyconf->keylen);
1325 483
1326 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 484 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
1327 keyconf->keylen); 485 keyconf->keylen);
1328 priv->stations[sta_id].sta.key.key_flags = key_flags; 486 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
1329 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 487 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1330 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 488 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1331 489
1332 spin_unlock_irqrestore(&priv->sta_lock, flags); 490 spin_unlock_irqrestore(&priv->sta_lock, flags);
1333 491
1334 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 492 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
1335 iwl3945_send_add_station(priv, &priv->stations[sta_id].sta, 0); 493 iwl_send_add_sta(priv,
494 (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, 0);
1336 return 0; 495 return 0;
1337} 496}
1338 497
1339static int iwl3945_clear_sta_key_info(struct iwl3945_priv *priv, u8 sta_id) 498static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1340{ 499{
1341 unsigned long flags; 500 unsigned long flags;
1342 501
1343 spin_lock_irqsave(&priv->sta_lock, flags); 502 spin_lock_irqsave(&priv->sta_lock, flags);
1344 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key)); 503 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
1345 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl3945_keyinfo)); 504 memset(&priv->stations_39[sta_id].sta.key, 0,
1346 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 505 sizeof(struct iwl4965_keyinfo));
1347 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 506 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1348 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 507 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
508 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1349 spin_unlock_irqrestore(&priv->sta_lock, flags); 509 spin_unlock_irqrestore(&priv->sta_lock, flags);
1350 510
1351 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); 511 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
1352 iwl3945_send_add_station(priv, &priv->stations[sta_id].sta, 0); 512 iwl_send_add_sta(priv,
513 (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, 0);
1353 return 0; 514 return 0;
1354} 515}
1355 516
1356static void iwl3945_clear_free_frames(struct iwl3945_priv *priv) 517static void iwl3945_clear_free_frames(struct iwl_priv *priv)
1357{ 518{
1358 struct list_head *element; 519 struct list_head *element;
1359 520
1360 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", 521 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
1361 priv->frames_count); 522 priv->frames_count);
1362 523
1363 while (!list_empty(&priv->free_frames)) { 524 while (!list_empty(&priv->free_frames)) {
@@ -1368,20 +529,20 @@ static void iwl3945_clear_free_frames(struct iwl3945_priv *priv)
1368 } 529 }
1369 530
1370 if (priv->frames_count) { 531 if (priv->frames_count) {
1371 IWL_WARNING("%d frames still in use. Did we lose one?\n", 532 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
1372 priv->frames_count); 533 priv->frames_count);
1373 priv->frames_count = 0; 534 priv->frames_count = 0;
1374 } 535 }
1375} 536}
1376 537
1377static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl3945_priv *priv) 538static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
1378{ 539{
1379 struct iwl3945_frame *frame; 540 struct iwl3945_frame *frame;
1380 struct list_head *element; 541 struct list_head *element;
1381 if (list_empty(&priv->free_frames)) { 542 if (list_empty(&priv->free_frames)) {
1382 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 543 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1383 if (!frame) { 544 if (!frame) {
1384 IWL_ERROR("Could not allocate frame!\n"); 545 IWL_ERR(priv, "Could not allocate frame!\n");
1385 return NULL; 546 return NULL;
1386 } 547 }
1387 548
@@ -1394,18 +555,18 @@ static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl3945_priv *priv)
1394 return list_entry(element, struct iwl3945_frame, list); 555 return list_entry(element, struct iwl3945_frame, list);
1395} 556}
1396 557
1397static void iwl3945_free_frame(struct iwl3945_priv *priv, struct iwl3945_frame *frame) 558static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
1398{ 559{
1399 memset(frame, 0, sizeof(*frame)); 560 memset(frame, 0, sizeof(*frame));
1400 list_add(&frame->list, &priv->free_frames); 561 list_add(&frame->list, &priv->free_frames);
1401} 562}
1402 563
1403unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv, 564unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
1404 struct ieee80211_hdr *hdr, 565 struct ieee80211_hdr *hdr,
1405 int left) 566 int left)
1406{ 567{
1407 568
1408 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon || 569 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1409 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && 570 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1410 (priv->iw_mode != NL80211_IFTYPE_AP))) 571 (priv->iw_mode != NL80211_IFTYPE_AP)))
1411 return 0; 572 return 0;
@@ -1418,31 +579,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
1418 return priv->ibss_beacon->len; 579 return priv->ibss_beacon->len;
1419} 580}
1420 581
1421static u8 iwl3945_rate_get_lowest_plcp(struct iwl3945_priv *priv) 582static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
1422{
1423 u8 i;
1424 int rate_mask;
1425
1426 /* Set rate mask*/
1427 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
1428 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
1429 else
1430 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
1431
1432 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1433 i = iwl3945_rates[i].next_ieee) {
1434 if (rate_mask & (1 << i))
1435 return iwl3945_rates[i].plcp;
1436 }
1437
1438 /* No valid rate was found. Assign the lowest one */
1439 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
1440 return IWL_RATE_1M_PLCP;
1441 else
1442 return IWL_RATE_6M_PLCP;
1443}
1444
1445static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
1446{ 583{
1447 struct iwl3945_frame *frame; 584 struct iwl3945_frame *frame;
1448 unsigned int frame_size; 585 unsigned int frame_size;
@@ -1452,16 +589,16 @@ static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
1452 frame = iwl3945_get_free_frame(priv); 589 frame = iwl3945_get_free_frame(priv);
1453 590
1454 if (!frame) { 591 if (!frame) {
1455 IWL_ERROR("Could not obtain free frame buffer for beacon " 592 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
1456 "command.\n"); 593 "command.\n");
1457 return -ENOMEM; 594 return -ENOMEM;
1458 } 595 }
1459 596
1460 rate = iwl3945_rate_get_lowest_plcp(priv); 597 rate = iwl_rate_get_lowest_plcp(priv);
1461 598
1462 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 599 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
1463 600
1464 rc = iwl3945_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 601 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1465 &frame->u.cmd[0]); 602 &frame->u.cmd[0]);
1466 603
1467 iwl3945_free_frame(priv, frame); 604 iwl3945_free_frame(priv, frame);
@@ -1469,307 +606,27 @@ static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
1469 return rc; 606 return rc;
1470} 607}
1471 608
1472/****************************************************************************** 609static void iwl3945_unset_hw_params(struct iwl_priv *priv)
1473 *
1474 * EEPROM related functions
1475 *
1476 ******************************************************************************/
1477
1478static void get_eeprom_mac(struct iwl3945_priv *priv, u8 *mac)
1479{ 610{
1480 memcpy(mac, priv->eeprom.mac_address, 6); 611 if (priv->shared_virt)
1481}
1482
1483/*
1484 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1485 * embedded controller) as EEPROM reader; each read is a series of pulses
1486 * to/from the EEPROM chip, not a single event, so even reads could conflict
1487 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1488 * simply claims ownership, which should be safe when this function is called
1489 * (i.e. before loading uCode!).
1490 */
1491static inline int iwl3945_eeprom_acquire_semaphore(struct iwl3945_priv *priv)
1492{
1493 _iwl3945_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
1494 return 0;
1495}
1496
1497/**
1498 * iwl3945_eeprom_init - read EEPROM contents
1499 *
1500 * Load the EEPROM contents from adapter into priv->eeprom
1501 *
1502 * NOTE: This routine uses the non-debug IO access functions.
1503 */
1504int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1505{
1506 u16 *e = (u16 *)&priv->eeprom;
1507 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP);
1508 int sz = sizeof(priv->eeprom);
1509 int ret;
1510 u16 addr;
1511
1512 /* The EEPROM structure has several padding buffers within it
1513 * and when adding new EEPROM maps is subject to programmer errors
1514 * which may be very difficult to identify without explicitly
1515 * checking the resulting size of the eeprom map. */
1516 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1517
1518 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1519 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
1520 return -ENOENT;
1521 }
1522
1523 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
1524 ret = iwl3945_eeprom_acquire_semaphore(priv);
1525 if (ret < 0) {
1526 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
1527 return -ENOENT;
1528 }
1529
1530 /* eeprom is an array of 16bit values */
1531 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1532 u32 r;
1533
1534 _iwl3945_write32(priv, CSR_EEPROM_REG,
1535 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
1536 _iwl3945_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1537 ret = iwl3945_poll_direct_bit(priv, CSR_EEPROM_REG,
1538 CSR_EEPROM_REG_READ_VALID_MSK,
1539 IWL_EEPROM_ACCESS_TIMEOUT);
1540 if (ret < 0) {
1541 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
1542 return ret;
1543 }
1544
1545 r = _iwl3945_read_direct32(priv, CSR_EEPROM_REG);
1546 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1547 }
1548
1549 return 0;
1550}
1551
1552static void iwl3945_unset_hw_setting(struct iwl3945_priv *priv)
1553{
1554 if (priv->hw_setting.shared_virt)
1555 pci_free_consistent(priv->pci_dev, 612 pci_free_consistent(priv->pci_dev,
1556 sizeof(struct iwl3945_shared), 613 sizeof(struct iwl3945_shared),
1557 priv->hw_setting.shared_virt, 614 priv->shared_virt,
1558 priv->hw_setting.shared_phys); 615 priv->shared_phys);
1559}
1560
1561/**
1562 * iwl3945_supported_rate_to_ie - fill in the supported rate in IE field
1563 *
1564 * return : set the bit for each supported rate insert in ie
1565 */
1566static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1567 u16 basic_rate, int *left)
1568{
1569 u16 ret_rates = 0, bit;
1570 int i;
1571 u8 *cnt = ie;
1572 u8 *rates = ie + 1;
1573
1574 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1575 if (bit & supported_rate) {
1576 ret_rates |= bit;
1577 rates[*cnt] = iwl3945_rates[i].ieee |
1578 ((bit & basic_rate) ? 0x80 : 0x00);
1579 (*cnt)++;
1580 (*left)--;
1581 if ((*left <= 0) ||
1582 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
1583 break;
1584 }
1585 }
1586
1587 return ret_rates;
1588}
1589
1590/**
1591 * iwl3945_fill_probe_req - fill in all required fields and IE for probe request
1592 */
1593static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv,
1594 struct ieee80211_mgmt *frame,
1595 int left)
1596{
1597 int len = 0;
1598 u8 *pos = NULL;
1599 u16 active_rates, ret_rates, cck_rates;
1600
1601 /* Make sure there is enough space for the probe request,
1602 * two mandatory IEs and the data */
1603 left -= 24;
1604 if (left < 0)
1605 return 0;
1606 len += 24;
1607
1608 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1609 memcpy(frame->da, iwl3945_broadcast_addr, ETH_ALEN);
1610 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1611 memcpy(frame->bssid, iwl3945_broadcast_addr, ETH_ALEN);
1612 frame->seq_ctrl = 0;
1613
1614 /* fill in our indirect SSID IE */
1615 /* ...next IE... */
1616
1617 left -= 2;
1618 if (left < 0)
1619 return 0;
1620 len += 2;
1621 pos = &(frame->u.probe_req.variable[0]);
1622 *pos++ = WLAN_EID_SSID;
1623 *pos++ = 0;
1624
1625 /* fill in supported rate */
1626 /* ...next IE... */
1627 left -= 2;
1628 if (left < 0)
1629 return 0;
1630
1631 /* ... fill it in... */
1632 *pos++ = WLAN_EID_SUPP_RATES;
1633 *pos = 0;
1634
1635 priv->active_rate = priv->rates_mask;
1636 active_rates = priv->active_rate;
1637 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1638
1639 cck_rates = IWL_CCK_RATES_MASK & active_rates;
1640 ret_rates = iwl3945_supported_rate_to_ie(pos, cck_rates,
1641 priv->active_rate_basic, &left);
1642 active_rates &= ~ret_rates;
1643
1644 ret_rates = iwl3945_supported_rate_to_ie(pos, active_rates,
1645 priv->active_rate_basic, &left);
1646 active_rates &= ~ret_rates;
1647
1648 len += 2 + *pos;
1649 pos += (*pos) + 1;
1650 if (active_rates == 0)
1651 goto fill_end;
1652
1653 /* fill in supported extended rate */
1654 /* ...next IE... */
1655 left -= 2;
1656 if (left < 0)
1657 return 0;
1658 /* ... fill it in... */
1659 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1660 *pos = 0;
1661 iwl3945_supported_rate_to_ie(pos, active_rates,
1662 priv->active_rate_basic, &left);
1663 if (*pos > 0)
1664 len += 2 + *pos;
1665
1666 fill_end:
1667 return (u16)len;
1668} 616}
1669 617
1670/* 618/*
1671 * QoS support 619 * QoS support
1672*/ 620*/
1673static int iwl3945_send_qos_params_command(struct iwl3945_priv *priv, 621static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
1674 struct iwl3945_qosparam_cmd *qos) 622 struct iwl_qosparam_cmd *qos)
1675{ 623{
1676 624
1677 return iwl3945_send_cmd_pdu(priv, REPLY_QOS_PARAM, 625 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1678 sizeof(struct iwl3945_qosparam_cmd), qos); 626 sizeof(struct iwl_qosparam_cmd), qos);
1679}
1680
1681static void iwl3945_reset_qos(struct iwl3945_priv *priv)
1682{
1683 u16 cw_min = 15;
1684 u16 cw_max = 1023;
1685 u8 aifs = 2;
1686 u8 is_legacy = 0;
1687 unsigned long flags;
1688 int i;
1689
1690 spin_lock_irqsave(&priv->lock, flags);
1691 priv->qos_data.qos_active = 0;
1692
1693 /* QoS always active in AP and ADHOC mode
1694 * In STA mode wait for association
1695 */
1696 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
1697 priv->iw_mode == NL80211_IFTYPE_AP)
1698 priv->qos_data.qos_active = 1;
1699 else
1700 priv->qos_data.qos_active = 0;
1701
1702
1703 /* check for legacy mode */
1704 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
1705 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
1706 (priv->iw_mode == NL80211_IFTYPE_STATION &&
1707 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
1708 cw_min = 31;
1709 is_legacy = 1;
1710 }
1711
1712 if (priv->qos_data.qos_active)
1713 aifs = 3;
1714
1715 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1716 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1717 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1718 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1719 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1720
1721 if (priv->qos_data.qos_active) {
1722 i = 1;
1723 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1724 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1725 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1726 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1727 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1728
1729 i = 2;
1730 priv->qos_data.def_qos_parm.ac[i].cw_min =
1731 cpu_to_le16((cw_min + 1) / 2 - 1);
1732 priv->qos_data.def_qos_parm.ac[i].cw_max =
1733 cpu_to_le16(cw_max);
1734 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1735 if (is_legacy)
1736 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1737 cpu_to_le16(6016);
1738 else
1739 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1740 cpu_to_le16(3008);
1741 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1742
1743 i = 3;
1744 priv->qos_data.def_qos_parm.ac[i].cw_min =
1745 cpu_to_le16((cw_min + 1) / 4 - 1);
1746 priv->qos_data.def_qos_parm.ac[i].cw_max =
1747 cpu_to_le16((cw_max + 1) / 2 - 1);
1748 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1749 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1750 if (is_legacy)
1751 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1752 cpu_to_le16(3264);
1753 else
1754 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1755 cpu_to_le16(1504);
1756 } else {
1757 for (i = 1; i < 4; i++) {
1758 priv->qos_data.def_qos_parm.ac[i].cw_min =
1759 cpu_to_le16(cw_min);
1760 priv->qos_data.def_qos_parm.ac[i].cw_max =
1761 cpu_to_le16(cw_max);
1762 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1763 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1764 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1765 }
1766 }
1767 IWL_DEBUG_QOS("set QoS to default \n");
1768
1769 spin_unlock_irqrestore(&priv->lock, flags);
1770} 627}
1771 628
1772static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force) 629static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
1773{ 630{
1774 unsigned long flags; 631 unsigned long flags;
1775 632
@@ -1790,8 +647,8 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
1790 647
1791 spin_unlock_irqrestore(&priv->lock, flags); 648 spin_unlock_irqrestore(&priv->lock, flags);
1792 649
1793 if (force || iwl3945_is_associated(priv)) { 650 if (force || iwl_is_associated(priv)) {
1794 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n", 651 IWL_DEBUG_QOS(priv, "send QoS cmd with QoS active %d \n",
1795 priv->qos_data.qos_active); 652 priv->qos_data.qos_active);
1796 653
1797 iwl3945_send_qos_params_command(priv, 654 iwl3945_send_qos_params_command(priv,
@@ -1799,236 +656,9 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
1799 } 656 }
1800} 657}
1801 658
1802/*
1803 * Power management (not Tx power!) functions
1804 */
1805#define MSEC_TO_USEC 1024
1806
1807#define NOSLP __constant_cpu_to_le32(0)
1808#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
1809#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1810#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1811 __constant_cpu_to_le32(X1), \
1812 __constant_cpu_to_le32(X2), \
1813 __constant_cpu_to_le32(X3), \
1814 __constant_cpu_to_le32(X4)}
1815
1816
1817/* default power management (not Tx power) table values */
1818/* for TIM 0-10 */
1819static struct iwl3945_power_vec_entry range_0[IWL_POWER_AC] = {
1820 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1821 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1822 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1823 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1824 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1825 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1826};
1827
1828/* for TIM > 10 */
1829static struct iwl3945_power_vec_entry range_1[IWL_POWER_AC] = {
1830 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1831 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1832 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1833 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1834 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1835 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1836 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1837 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1838 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1839 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1840};
1841
1842int iwl3945_power_init_handle(struct iwl3945_priv *priv)
1843{
1844 int rc = 0, i;
1845 struct iwl3945_power_mgr *pow_data;
1846 int size = sizeof(struct iwl3945_power_vec_entry) * IWL_POWER_AC;
1847 u16 pci_pm;
1848
1849 IWL_DEBUG_POWER("Initialize power \n");
1850
1851 pow_data = &(priv->power_data);
1852
1853 memset(pow_data, 0, sizeof(*pow_data));
1854
1855 pow_data->active_index = IWL_POWER_RANGE_0;
1856 pow_data->dtim_val = 0xffff;
1857
1858 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1859 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1860
1861 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1862 if (rc != 0)
1863 return 0;
1864 else {
1865 struct iwl3945_powertable_cmd *cmd;
1866
1867 IWL_DEBUG_POWER("adjust power command flags\n");
1868
1869 for (i = 0; i < IWL_POWER_AC; i++) {
1870 cmd = &pow_data->pwr_range_0[i].cmd;
1871
1872 if (pci_pm & 0x1)
1873 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1874 else
1875 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1876 }
1877 }
1878 return rc;
1879}
1880
1881static int iwl3945_update_power_cmd(struct iwl3945_priv *priv,
1882 struct iwl3945_powertable_cmd *cmd, u32 mode)
1883{
1884 int rc = 0, i;
1885 u8 skip;
1886 u32 max_sleep = 0;
1887 struct iwl3945_power_vec_entry *range;
1888 u8 period = 0;
1889 struct iwl3945_power_mgr *pow_data;
1890
1891 if (mode > IWL_POWER_INDEX_5) {
1892 IWL_DEBUG_POWER("Error invalid power mode \n");
1893 return -1;
1894 }
1895 pow_data = &(priv->power_data);
1896
1897 if (pow_data->active_index == IWL_POWER_RANGE_0)
1898 range = &pow_data->pwr_range_0[0];
1899 else
1900 range = &pow_data->pwr_range_1[1];
1901
1902 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
1903
1904#ifdef IWL_MAC80211_DISABLE
1905 if (priv->assoc_network != NULL) {
1906 unsigned long flags;
1907
1908 period = priv->assoc_network->tim.tim_period;
1909 }
1910#endif /*IWL_MAC80211_DISABLE */
1911 skip = range[mode].no_dtim;
1912
1913 if (period == 0) {
1914 period = 1;
1915 skip = 0;
1916 }
1917
1918 if (skip == 0) {
1919 max_sleep = period;
1920 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1921 } else {
1922 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1923 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1924 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1925 }
1926
1927 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1928 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1929 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1930 }
1931
1932 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1933 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1934 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1935 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1936 le32_to_cpu(cmd->sleep_interval[0]),
1937 le32_to_cpu(cmd->sleep_interval[1]),
1938 le32_to_cpu(cmd->sleep_interval[2]),
1939 le32_to_cpu(cmd->sleep_interval[3]),
1940 le32_to_cpu(cmd->sleep_interval[4]));
1941
1942 return rc;
1943}
1944
1945static int iwl3945_send_power_mode(struct iwl3945_priv *priv, u32 mode)
1946{
1947 u32 uninitialized_var(final_mode);
1948 int rc;
1949 struct iwl3945_powertable_cmd cmd;
1950
1951 /* If on battery, set to 3,
1952 * if plugged into AC power, set to CAM ("continuously aware mode"),
1953 * else user level */
1954 switch (mode) {
1955 case IWL_POWER_BATTERY:
1956 final_mode = IWL_POWER_INDEX_3;
1957 break;
1958 case IWL_POWER_AC:
1959 final_mode = IWL_POWER_MODE_CAM;
1960 break;
1961 default:
1962 final_mode = mode;
1963 break;
1964 }
1965
1966 iwl3945_update_power_cmd(priv, &cmd, final_mode);
1967
1968 rc = iwl3945_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
1969
1970 if (final_mode == IWL_POWER_MODE_CAM)
1971 clear_bit(STATUS_POWER_PMI, &priv->status);
1972 else
1973 set_bit(STATUS_POWER_PMI, &priv->status);
1974
1975 return rc;
1976}
1977
1978/**
1979 * iwl3945_scan_cancel - Cancel any currently executing HW scan
1980 *
1981 * NOTE: priv->mutex is not required before calling this function
1982 */
1983static int iwl3945_scan_cancel(struct iwl3945_priv *priv)
1984{
1985 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1986 clear_bit(STATUS_SCANNING, &priv->status);
1987 return 0;
1988 }
1989
1990 if (test_bit(STATUS_SCANNING, &priv->status)) {
1991 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1992 IWL_DEBUG_SCAN("Queuing scan abort.\n");
1993 set_bit(STATUS_SCAN_ABORTING, &priv->status);
1994 queue_work(priv->workqueue, &priv->abort_scan);
1995
1996 } else
1997 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
1998
1999 return test_bit(STATUS_SCANNING, &priv->status);
2000 }
2001
2002 return 0;
2003}
2004
2005/**
2006 * iwl3945_scan_cancel_timeout - Cancel any currently executing HW scan
2007 * @ms: amount of time to wait (in milliseconds) for scan to abort
2008 *
2009 * NOTE: priv->mutex must be held before calling this function
2010 */
2011static int iwl3945_scan_cancel_timeout(struct iwl3945_priv *priv, unsigned long ms)
2012{
2013 unsigned long now = jiffies;
2014 int ret;
2015
2016 ret = iwl3945_scan_cancel(priv);
2017 if (ret && ms) {
2018 mutex_unlock(&priv->mutex);
2019 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2020 test_bit(STATUS_SCANNING, &priv->status))
2021 msleep(1);
2022 mutex_lock(&priv->mutex);
2023
2024 return test_bit(STATUS_SCANNING, &priv->status);
2025 }
2026
2027 return ret;
2028}
2029 659
2030#define MAX_UCODE_BEACON_INTERVAL 1024 660#define MAX_UCODE_BEACON_INTERVAL 1024
2031#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) 661#define INTEL_CONN_LISTEN_INTERVAL cpu_to_le16(0xA)
2032 662
2033static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val) 663static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
2034{ 664{
@@ -2043,7 +673,7 @@ static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
2043 return cpu_to_le16(new_val); 673 return cpu_to_le16(new_val);
2044} 674}
2045 675
2046static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv) 676static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
2047{ 677{
2048 u64 interval_tm_unit; 678 u64 interval_tm_unit;
2049 u64 tsf, result; 679 u64 tsf, result;
@@ -2054,13 +684,10 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2054 conf = ieee80211_get_hw_conf(priv->hw); 684 conf = ieee80211_get_hw_conf(priv->hw);
2055 685
2056 spin_lock_irqsave(&priv->lock, flags); 686 spin_lock_irqsave(&priv->lock, flags);
2057 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); 687 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
2058 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2059
2060 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; 688 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2061 689
2062 tsf = priv->timestamp1; 690 tsf = priv->timestamp;
2063 tsf = ((tsf << 32) | priv->timestamp0);
2064 691
2065 beacon_int = priv->beacon_int; 692 beacon_int = priv->beacon_int;
2066 spin_unlock_irqrestore(&priv->lock, flags); 693 spin_unlock_irqrestore(&priv->lock, flags);
@@ -2092,32 +719,32 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2092 priv->rxon_timing.beacon_init_val = 719 priv->rxon_timing.beacon_init_val =
2093 cpu_to_le32((u32) ((u64) interval_tm_unit - result)); 720 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2094 721
2095 IWL_DEBUG_ASSOC 722 IWL_DEBUG_ASSOC(priv,
2096 ("beacon interval %d beacon timer %d beacon tim %d\n", 723 "beacon interval %d beacon timer %d beacon tim %d\n",
2097 le16_to_cpu(priv->rxon_timing.beacon_interval), 724 le16_to_cpu(priv->rxon_timing.beacon_interval),
2098 le32_to_cpu(priv->rxon_timing.beacon_init_val), 725 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2099 le16_to_cpu(priv->rxon_timing.atim_window)); 726 le16_to_cpu(priv->rxon_timing.atim_window));
2100} 727}
2101 728
2102static int iwl3945_scan_initiate(struct iwl3945_priv *priv) 729static int iwl3945_scan_initiate(struct iwl_priv *priv)
2103{ 730{
2104 if (!iwl3945_is_ready_rf(priv)) { 731 if (!iwl_is_ready_rf(priv)) {
2105 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); 732 IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
2106 return -EIO; 733 return -EIO;
2107 } 734 }
2108 735
2109 if (test_bit(STATUS_SCANNING, &priv->status)) { 736 if (test_bit(STATUS_SCANNING, &priv->status)) {
2110 IWL_DEBUG_SCAN("Scan already in progress.\n"); 737 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
2111 return -EAGAIN; 738 return -EAGAIN;
2112 } 739 }
2113 740
2114 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 741 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2115 IWL_DEBUG_SCAN("Scan request while abort pending. " 742 IWL_DEBUG_SCAN(priv, "Scan request while abort pending. "
2116 "Queuing.\n"); 743 "Queuing.\n");
2117 return -EAGAIN; 744 return -EAGAIN;
2118 } 745 }
2119 746
2120 IWL_DEBUG_INFO("Starting scan...\n"); 747 IWL_DEBUG_INFO(priv, "Starting scan...\n");
2121 if (priv->cfg->sku & IWL_SKU_G) 748 if (priv->cfg->sku & IWL_SKU_G)
2122 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); 749 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
2123 if (priv->cfg->sku & IWL_SKU_A) 750 if (priv->cfg->sku & IWL_SKU_A)
@@ -2131,144 +758,34 @@ static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
2131 return 0; 758 return 0;
2132} 759}
2133 760
2134static int iwl3945_set_rxon_hwcrypto(struct iwl3945_priv *priv, int hw_decrypt) 761static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
2135{
2136 struct iwl3945_rxon_cmd *rxon = &priv->staging_rxon;
2137
2138 if (hw_decrypt)
2139 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2140 else
2141 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2142
2143 return 0;
2144}
2145
2146static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2147 enum ieee80211_band band)
2148{
2149 if (band == IEEE80211_BAND_5GHZ) {
2150 priv->staging_rxon.flags &=
2151 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2152 | RXON_FLG_CCK_MSK);
2153 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2154 } else {
2155 /* Copied from iwl3945_bg_post_associate() */
2156 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2157 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2158 else
2159 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2160
2161 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2162 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2163
2164 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2165 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2166 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2167 }
2168}
2169
2170/*
2171 * initialize rxon structure with default values from eeprom
2172 */
2173static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv,
2174 int mode)
2175{
2176 const struct iwl3945_channel_info *ch_info;
2177
2178 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2179
2180 switch (mode) {
2181 case NL80211_IFTYPE_AP:
2182 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2183 break;
2184
2185 case NL80211_IFTYPE_STATION:
2186 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2187 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2188 break;
2189
2190 case NL80211_IFTYPE_ADHOC:
2191 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2192 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2193 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2194 RXON_FILTER_ACCEPT_GRP_MSK;
2195 break;
2196
2197 case NL80211_IFTYPE_MONITOR:
2198 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2199 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2200 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2201 break;
2202 default:
2203 IWL_ERROR("Unsupported interface type %d\n", mode);
2204 break;
2205 }
2206
2207#if 0
2208 /* TODO: Figure out when short_preamble would be set and cache from
2209 * that */
2210 if (!hw_to_local(priv->hw)->short_preamble)
2211 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2212 else
2213 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2214#endif
2215
2216 ch_info = iwl3945_get_channel_info(priv, priv->band,
2217 le16_to_cpu(priv->active_rxon.channel));
2218
2219 if (!ch_info)
2220 ch_info = &priv->channel_info[0];
2221
2222 /*
2223 * in some case A channels are all non IBSS
2224 * in this case force B/G channel
2225 */
2226 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
2227 ch_info = &priv->channel_info[0];
2228
2229 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2230 if (is_channel_a_band(ch_info))
2231 priv->band = IEEE80211_BAND_5GHZ;
2232 else
2233 priv->band = IEEE80211_BAND_2GHZ;
2234
2235 iwl3945_set_flags_for_phymode(priv, priv->band);
2236
2237 priv->staging_rxon.ofdm_basic_rates =
2238 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2239 priv->staging_rxon.cck_basic_rates =
2240 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2241}
2242
2243static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2244{ 762{
2245 if (mode == NL80211_IFTYPE_ADHOC) { 763 if (mode == NL80211_IFTYPE_ADHOC) {
2246 const struct iwl3945_channel_info *ch_info; 764 const struct iwl_channel_info *ch_info;
2247 765
2248 ch_info = iwl3945_get_channel_info(priv, 766 ch_info = iwl_get_channel_info(priv,
2249 priv->band, 767 priv->band,
2250 le16_to_cpu(priv->staging_rxon.channel)); 768 le16_to_cpu(priv->staging_rxon.channel));
2251 769
2252 if (!ch_info || !is_channel_ibss(ch_info)) { 770 if (!ch_info || !is_channel_ibss(ch_info)) {
2253 IWL_ERROR("channel %d not IBSS channel\n", 771 IWL_ERR(priv, "channel %d not IBSS channel\n",
2254 le16_to_cpu(priv->staging_rxon.channel)); 772 le16_to_cpu(priv->staging_rxon.channel));
2255 return -EINVAL; 773 return -EINVAL;
2256 } 774 }
2257 } 775 }
2258 776
2259 iwl3945_connection_init_rx_config(priv, mode); 777 iwl_connection_init_rx_config(priv, mode);
2260 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2261 778
2262 iwl3945_clear_stations_table(priv); 779 iwl3945_clear_stations_table(priv);
2263 780
2264 /* don't commit rxon if rf-kill is on*/ 781 /* don't commit rxon if rf-kill is on*/
2265 if (!iwl3945_is_ready_rf(priv)) 782 if (!iwl_is_ready_rf(priv))
2266 return -EAGAIN; 783 return -EAGAIN;
2267 784
2268 cancel_delayed_work(&priv->scan_check); 785 cancel_delayed_work(&priv->scan_check);
2269 if (iwl3945_scan_cancel_timeout(priv, 100)) { 786 if (iwl_scan_cancel_timeout(priv, 100)) {
2270 IWL_WARNING("Aborted scan still in progress after 100ms\n"); 787 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2271 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 788 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2272 return -EAGAIN; 789 return -EAGAIN;
2273 } 790 }
2274 791
@@ -2277,49 +794,50 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2277 return 0; 794 return 0;
2278} 795}
2279 796
2280static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv, 797static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2281 struct ieee80211_tx_info *info, 798 struct ieee80211_tx_info *info,
2282 struct iwl3945_cmd *cmd, 799 struct iwl_cmd *cmd,
2283 struct sk_buff *skb_frag, 800 struct sk_buff *skb_frag,
2284 int last_frag) 801 int last_frag)
2285{ 802{
803 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
2286 struct iwl3945_hw_key *keyinfo = 804 struct iwl3945_hw_key *keyinfo =
2287 &priv->stations[info->control.hw_key->hw_key_idx].keyinfo; 805 &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
2288 806
2289 switch (keyinfo->alg) { 807 switch (keyinfo->alg) {
2290 case ALG_CCMP: 808 case ALG_CCMP:
2291 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; 809 tx->sec_ctl = TX_CMD_SEC_CCM;
2292 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); 810 memcpy(tx->key, keyinfo->key, keyinfo->keylen);
2293 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n"); 811 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
2294 break; 812 break;
2295 813
2296 case ALG_TKIP: 814 case ALG_TKIP:
2297#if 0 815#if 0
2298 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; 816 tx->sec_ctl = TX_CMD_SEC_TKIP;
2299 817
2300 if (last_frag) 818 if (last_frag)
2301 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, 819 memcpy(tx->tkip_mic.byte, skb_frag->tail - 8,
2302 8); 820 8);
2303 else 821 else
2304 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); 822 memset(tx->tkip_mic.byte, 0, 8);
2305#endif 823#endif
2306 break; 824 break;
2307 825
2308 case ALG_WEP: 826 case ALG_WEP:
2309 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | 827 tx->sec_ctl = TX_CMD_SEC_WEP |
2310 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 828 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2311 829
2312 if (keyinfo->keylen == 13) 830 if (keyinfo->keylen == 13)
2313 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; 831 tx->sec_ctl |= TX_CMD_SEC_KEY128;
2314 832
2315 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); 833 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
2316 834
2317 IWL_DEBUG_TX("Configuring packet for WEP encryption " 835 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
2318 "with key %d\n", info->control.hw_key->hw_key_idx); 836 "with key %d\n", info->control.hw_key->hw_key_idx);
2319 break; 837 break;
2320 838
2321 default: 839 default:
2322 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); 840 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
2323 break; 841 break;
2324 } 842 }
2325} 843}
@@ -2327,17 +845,17 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2327/* 845/*
2328 * handle build REPLY_TX command notification. 846 * handle build REPLY_TX command notification.
2329 */ 847 */
2330static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv, 848static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
2331 struct iwl3945_cmd *cmd, 849 struct iwl_cmd *cmd,
2332 struct ieee80211_tx_info *info, 850 struct ieee80211_tx_info *info,
2333 struct ieee80211_hdr *hdr, 851 struct ieee80211_hdr *hdr, u8 std_id)
2334 int is_unicast, u8 std_id)
2335{ 852{
853 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
854 __le32 tx_flags = tx->tx_flags;
2336 __le16 fc = hdr->frame_control; 855 __le16 fc = hdr->frame_control;
2337 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2338 u8 rc_flags = info->control.rates[0].flags; 856 u8 rc_flags = info->control.rates[0].flags;
2339 857
2340 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 858 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2341 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 859 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
2342 tx_flags |= TX_CMD_FLG_ACK_MSK; 860 tx_flags |= TX_CMD_FLG_ACK_MSK;
2343 if (ieee80211_is_mgmt(fc)) 861 if (ieee80211_is_mgmt(fc))
@@ -2350,13 +868,13 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2350 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 868 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2351 } 869 }
2352 870
2353 cmd->cmd.tx.sta_id = std_id; 871 tx->sta_id = std_id;
2354 if (ieee80211_has_morefrags(fc)) 872 if (ieee80211_has_morefrags(fc))
2355 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 873 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2356 874
2357 if (ieee80211_is_data_qos(fc)) { 875 if (ieee80211_is_data_qos(fc)) {
2358 u8 *qc = ieee80211_get_qos_ctl(hdr); 876 u8 *qc = ieee80211_get_qos_ctl(hdr);
2359 cmd->cmd.tx.tid_tspec = qc[0] & 0xf; 877 tx->tid_tspec = qc[0] & 0xf;
2360 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 878 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2361 } else { 879 } else {
2362 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 880 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
@@ -2376,25 +894,25 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2376 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 894 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2377 if (ieee80211_is_mgmt(fc)) { 895 if (ieee80211_is_mgmt(fc)) {
2378 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 896 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
2379 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3); 897 tx->timeout.pm_frame_timeout = cpu_to_le16(3);
2380 else 898 else
2381 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2); 899 tx->timeout.pm_frame_timeout = cpu_to_le16(2);
2382 } else { 900 } else {
2383 cmd->cmd.tx.timeout.pm_frame_timeout = 0; 901 tx->timeout.pm_frame_timeout = 0;
2384#ifdef CONFIG_IWL3945_LEDS 902#ifdef CONFIG_IWL3945_LEDS
2385 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len); 903 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
2386#endif 904#endif
2387 } 905 }
2388 906
2389 cmd->cmd.tx.driver_txop = 0; 907 tx->driver_txop = 0;
2390 cmd->cmd.tx.tx_flags = tx_flags; 908 tx->tx_flags = tx_flags;
2391 cmd->cmd.tx.next_frame_len = 0; 909 tx->next_frame_len = 0;
2392} 910}
2393 911
2394/** 912/**
2395 * iwl3945_get_sta_id - Find station's index within station table 913 * iwl3945_get_sta_id - Find station's index within station table
2396 */ 914 */
2397static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *hdr) 915static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2398{ 916{
2399 int sta_id; 917 int sta_id;
2400 u16 fc = le16_to_cpu(hdr->frame_control); 918 u16 fc = le16_to_cpu(hdr->frame_control);
@@ -2402,7 +920,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2402 /* If this frame is broadcast or management, use broadcast station id */ 920 /* If this frame is broadcast or management, use broadcast station id */
2403 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || 921 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2404 is_multicast_ether_addr(hdr->addr1)) 922 is_multicast_ether_addr(hdr->addr1))
2405 return priv->hw_setting.bcast_sta_id; 923 return priv->hw_params.bcast_sta_id;
2406 924
2407 switch (priv->iw_mode) { 925 switch (priv->iw_mode) {
2408 926
@@ -2416,7 +934,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2416 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 934 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2417 if (sta_id != IWL_INVALID_STATION) 935 if (sta_id != IWL_INVALID_STATION)
2418 return sta_id; 936 return sta_id;
2419 return priv->hw_setting.bcast_sta_id; 937 return priv->hw_params.bcast_sta_id;
2420 938
2421 /* If this frame is going out to an IBSS network, find the station, 939 /* If this frame is going out to an IBSS network, find the station,
2422 * or create a new station table entry */ 940 * or create a new station table entry */
@@ -2431,38 +949,38 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2431 if (sta_id != IWL_INVALID_STATION) 949 if (sta_id != IWL_INVALID_STATION)
2432 return sta_id; 950 return sta_id;
2433 951
2434 IWL_DEBUG_DROP("Station %pM not in station map. " 952 IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
2435 "Defaulting to broadcast...\n", 953 "Defaulting to broadcast...\n",
2436 hdr->addr1); 954 hdr->addr1);
2437 iwl3945_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 955 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2438 return priv->hw_setting.bcast_sta_id; 956 return priv->hw_params.bcast_sta_id;
2439 } 957 }
2440 /* If we are in monitor mode, use BCAST. This is required for 958 /* If we are in monitor mode, use BCAST. This is required for
2441 * packet injection. */ 959 * packet injection. */
2442 case NL80211_IFTYPE_MONITOR: 960 case NL80211_IFTYPE_MONITOR:
2443 return priv->hw_setting.bcast_sta_id; 961 return priv->hw_params.bcast_sta_id;
2444 962
2445 default: 963 default:
2446 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 964 IWL_WARN(priv, "Unknown mode of operation: %d\n",
2447 return priv->hw_setting.bcast_sta_id; 965 priv->iw_mode);
966 return priv->hw_params.bcast_sta_id;
2448 } 967 }
2449} 968}
2450 969
2451/* 970/*
2452 * start REPLY_TX command process 971 * start REPLY_TX command process
2453 */ 972 */
2454static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb) 973static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2455{ 974{
2456 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 976 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2458 struct iwl3945_tfd_frame *tfd; 977 struct iwl3945_tx_cmd *tx;
2459 u32 *control_flags; 978 struct iwl_tx_queue *txq = NULL;
2460 int txq_id = skb_get_queue_mapping(skb); 979 struct iwl_queue *q = NULL;
2461 struct iwl3945_tx_queue *txq = NULL; 980 struct iwl_cmd *out_cmd = NULL;
2462 struct iwl3945_queue *q = NULL;
2463 dma_addr_t phys_addr; 981 dma_addr_t phys_addr;
2464 dma_addr_t txcmd_phys; 982 dma_addr_t txcmd_phys;
2465 struct iwl3945_cmd *out_cmd = NULL; 983 int txq_id = skb_get_queue_mapping(skb);
2466 u16 len, idx, len_org, hdr_len; 984 u16 len, idx, len_org, hdr_len;
2467 u8 id; 985 u8 id;
2468 u8 unicast; 986 u8 unicast;
@@ -2476,13 +994,13 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2476 int rc; 994 int rc;
2477 995
2478 spin_lock_irqsave(&priv->lock, flags); 996 spin_lock_irqsave(&priv->lock, flags);
2479 if (iwl3945_is_rfkill(priv)) { 997 if (iwl_is_rfkill(priv)) {
2480 IWL_DEBUG_DROP("Dropping - RF KILL\n"); 998 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
2481 goto drop_unlock; 999 goto drop_unlock;
2482 } 1000 }
2483 1001
2484 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { 1002 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
2485 IWL_ERROR("ERROR: No TX rate available.\n"); 1003 IWL_ERR(priv, "ERROR: No TX rate available.\n");
2486 goto drop_unlock; 1004 goto drop_unlock;
2487 } 1005 }
2488 1006
@@ -2491,21 +1009,21 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2491 1009
2492 fc = hdr->frame_control; 1010 fc = hdr->frame_control;
2493 1011
2494#ifdef CONFIG_IWL3945_DEBUG 1012#ifdef CONFIG_IWLWIFI_DEBUG
2495 if (ieee80211_is_auth(fc)) 1013 if (ieee80211_is_auth(fc))
2496 IWL_DEBUG_TX("Sending AUTH frame\n"); 1014 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
2497 else if (ieee80211_is_assoc_req(fc)) 1015 else if (ieee80211_is_assoc_req(fc))
2498 IWL_DEBUG_TX("Sending ASSOC frame\n"); 1016 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
2499 else if (ieee80211_is_reassoc_req(fc)) 1017 else if (ieee80211_is_reassoc_req(fc))
2500 IWL_DEBUG_TX("Sending REASSOC frame\n"); 1018 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
2501#endif 1019#endif
2502 1020
2503 /* drop all data frame if we are not associated */ 1021 /* drop all data frame if we are not associated */
2504 if (ieee80211_is_data(fc) && 1022 if (ieee80211_is_data(fc) &&
2505 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */ 1023 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
2506 (!iwl3945_is_associated(priv) || 1024 (!iwl_is_associated(priv) ||
2507 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) { 1025 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
2508 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n"); 1026 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
2509 goto drop_unlock; 1027 goto drop_unlock;
2510 } 1028 }
2511 1029
@@ -2516,21 +1034,21 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2516 /* Find (or create) index into station table for destination station */ 1034 /* Find (or create) index into station table for destination station */
2517 sta_id = iwl3945_get_sta_id(priv, hdr); 1035 sta_id = iwl3945_get_sta_id(priv, hdr);
2518 if (sta_id == IWL_INVALID_STATION) { 1036 if (sta_id == IWL_INVALID_STATION) {
2519 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n", 1037 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
2520 hdr->addr1); 1038 hdr->addr1);
2521 goto drop; 1039 goto drop;
2522 } 1040 }
2523 1041
2524 IWL_DEBUG_RATE("station Id %d\n", sta_id); 1042 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
2525 1043
2526 if (ieee80211_is_data_qos(fc)) { 1044 if (ieee80211_is_data_qos(fc)) {
2527 qc = ieee80211_get_qos_ctl(hdr); 1045 qc = ieee80211_get_qos_ctl(hdr);
2528 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 1046 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
2529 seq_number = priv->stations[sta_id].tid[tid].seq_number & 1047 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
2530 IEEE80211_SCTL_SEQ; 1048 IEEE80211_SCTL_SEQ;
2531 hdr->seq_ctrl = cpu_to_le16(seq_number) | 1049 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2532 (hdr->seq_ctrl & 1050 (hdr->seq_ctrl &
2533 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 1051 cpu_to_le16(IEEE80211_SCTL_FRAG));
2534 seq_number += 0x10; 1052 seq_number += 0x10;
2535 } 1053 }
2536 1054
@@ -2540,20 +1058,17 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2540 1058
2541 spin_lock_irqsave(&priv->lock, flags); 1059 spin_lock_irqsave(&priv->lock, flags);
2542 1060
2543 /* Set up first empty TFD within this queue's circular TFD buffer */
2544 tfd = &txq->bd[q->write_ptr];
2545 memset(tfd, 0, sizeof(*tfd));
2546 control_flags = (u32 *) tfd;
2547 idx = get_cmd_index(q, q->write_ptr, 0); 1061 idx = get_cmd_index(q, q->write_ptr, 0);
2548 1062
2549 /* Set up driver data for this TFD */ 1063 /* Set up driver data for this TFD */
2550 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info)); 1064 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
2551 txq->txb[q->write_ptr].skb[0] = skb; 1065 txq->txb[q->write_ptr].skb[0] = skb;
2552 1066
2553 /* Init first empty entry in queue's array of Tx/cmd buffers */ 1067 /* Init first empty entry in queue's array of Tx/cmd buffers */
2554 out_cmd = &txq->cmd[idx]; 1068 out_cmd = txq->cmd[idx];
1069 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
2555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 1070 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2556 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); 1071 memset(tx, 0, sizeof(*tx));
2557 1072
2558 /* 1073 /*
2559 * Set up the Tx-command (not MAC!) header. 1074 * Set up the Tx-command (not MAC!) header.
@@ -2566,7 +1081,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2566 INDEX_TO_SEQ(q->write_ptr))); 1081 INDEX_TO_SEQ(q->write_ptr)));
2567 1082
2568 /* Copy MAC header from skb into command buffer */ 1083 /* Copy MAC header from skb into command buffer */
2569 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); 1084 memcpy(tx->hdr, hdr, hdr_len);
2570 1085
2571 /* 1086 /*
2572 * Use the first empty entry in this queue's command buffer array 1087 * Use the first empty entry in this queue's command buffer array
@@ -2577,8 +1092,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2577 * of the MAC header (device reads on dword boundaries). 1092 * of the MAC header (device reads on dword boundaries).
2578 * We'll tell device about this padding later. 1093 * We'll tell device about this padding later.
2579 */ 1094 */
2580 len = priv->hw_setting.tx_cmd_len + 1095 len = sizeof(struct iwl3945_tx_cmd) +
2581 sizeof(struct iwl3945_cmd_header) + hdr_len; 1096 sizeof(struct iwl_cmd_header) + hdr_len;
2582 1097
2583 len_org = len; 1098 len_org = len;
2584 len = (len + 3) & ~3; 1099 len = (len + 3) & ~3;
@@ -2590,12 +1105,19 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2590 1105
2591 /* Physical address of this Tx command's header (not MAC header!), 1106 /* Physical address of this Tx command's header (not MAC header!),
2592 * within command buffer array. */ 1107 * within command buffer array. */
2593 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl3945_cmd) * idx + 1108 txcmd_phys = pci_map_single(priv->pci_dev,
2594 offsetof(struct iwl3945_cmd, hdr); 1109 out_cmd, sizeof(struct iwl_cmd),
1110 PCI_DMA_TODEVICE);
1111 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
1112 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
1113 /* Add buffer containing Tx command and MAC(!) header to TFD's
1114 * first entry */
1115 txcmd_phys += offsetof(struct iwl_cmd, hdr);
2595 1116
2596 /* Add buffer containing Tx command and MAC(!) header to TFD's 1117 /* Add buffer containing Tx command and MAC(!) header to TFD's
2597 * first entry */ 1118 * first entry */
2598 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 1119 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1120 txcmd_phys, len, 1, 0);
2599 1121
2600 if (info->control.hw_key) 1122 if (info->control.hw_key)
2601 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0); 1123 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
@@ -2606,60 +1128,52 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2606 if (len) { 1128 if (len) {
2607 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 1129 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2608 len, PCI_DMA_TODEVICE); 1130 len, PCI_DMA_TODEVICE);
2609 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); 1131 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1132 phys_addr, len,
1133 0, U32_PAD(len));
2610 } 1134 }
2611 1135
2612 if (!len)
2613 /* If there is no payload, then we use only one Tx buffer */
2614 *control_flags = TFD_CTL_COUNT_SET(1);
2615 else
2616 /* Else use 2 buffers.
2617 * Tell 3945 about any padding after MAC header */
2618 *control_flags = TFD_CTL_COUNT_SET(2) |
2619 TFD_CTL_PAD_SET(U32_PAD(len));
2620
2621 /* Total # bytes to be transmitted */ 1136 /* Total # bytes to be transmitted */
2622 len = (u16)skb->len; 1137 len = (u16)skb->len;
2623 out_cmd->cmd.tx.len = cpu_to_le16(len); 1138 tx->len = cpu_to_le16(len);
2624 1139
2625 /* TODO need this for burst mode later on */ 1140 /* TODO need this for burst mode later on */
2626 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id); 1141 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
2627 1142
2628 /* set is_hcca to 0; it probably will never be implemented */ 1143 /* set is_hcca to 0; it probably will never be implemented */
2629 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); 1144 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
2630 1145
2631 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 1146 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2632 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 1147 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2633 1148
2634 if (!ieee80211_has_morefrags(hdr->frame_control)) { 1149 if (!ieee80211_has_morefrags(hdr->frame_control)) {
2635 txq->need_update = 1; 1150 txq->need_update = 1;
2636 if (qc) 1151 if (qc)
2637 priv->stations[sta_id].tid[tid].seq_number = seq_number; 1152 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
2638 } else { 1153 } else {
2639 wait_write_ptr = 1; 1154 wait_write_ptr = 1;
2640 txq->need_update = 0; 1155 txq->need_update = 0;
2641 } 1156 }
2642 1157
2643 iwl3945_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, 1158 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
2644 sizeof(out_cmd->cmd.tx));
2645 1159
2646 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, 1160 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
2647 ieee80211_hdrlen(fc)); 1161 ieee80211_hdrlen(fc));
2648 1162
2649 /* Tell device the write index *just past* this latest filled TFD */ 1163 /* Tell device the write index *just past* this latest filled TFD */
2650 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1164 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2651 rc = iwl3945_tx_queue_update_write_ptr(priv, txq); 1165 rc = iwl_txq_update_write_ptr(priv, txq);
2652 spin_unlock_irqrestore(&priv->lock, flags); 1166 spin_unlock_irqrestore(&priv->lock, flags);
2653 1167
2654 if (rc) 1168 if (rc)
2655 return rc; 1169 return rc;
2656 1170
2657 if ((iwl3945_queue_space(q) < q->high_mark) 1171 if ((iwl_queue_space(q) < q->high_mark)
2658 && priv->mac80211_registered) { 1172 && priv->mac80211_registered) {
2659 if (wait_write_ptr) { 1173 if (wait_write_ptr) {
2660 spin_lock_irqsave(&priv->lock, flags); 1174 spin_lock_irqsave(&priv->lock, flags);
2661 txq->need_update = 1; 1175 txq->need_update = 1;
2662 iwl3945_tx_queue_update_write_ptr(priv, txq); 1176 iwl_txq_update_write_ptr(priv, txq);
2663 spin_unlock_irqrestore(&priv->lock, flags); 1177 spin_unlock_irqrestore(&priv->lock, flags);
2664 } 1178 }
2665 1179
@@ -2674,86 +1188,32 @@ drop:
2674 return -1; 1188 return -1;
2675} 1189}
2676 1190
2677static void iwl3945_set_rate(struct iwl3945_priv *priv) 1191static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2678{
2679 const struct ieee80211_supported_band *sband = NULL;
2680 struct ieee80211_rate *rate;
2681 int i;
2682
2683 sband = iwl3945_get_band(priv, priv->band);
2684 if (!sband) {
2685 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2686 return;
2687 }
2688
2689 priv->active_rate = 0;
2690 priv->active_rate_basic = 0;
2691
2692 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
2693 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
2694
2695 for (i = 0; i < sband->n_bitrates; i++) {
2696 rate = &sband->bitrates[i];
2697 if ((rate->hw_value < IWL_RATE_COUNT) &&
2698 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
2699 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
2700 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
2701 priv->active_rate |= (1 << rate->hw_value);
2702 }
2703 }
2704
2705 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2706 priv->active_rate, priv->active_rate_basic);
2707
2708 /*
2709 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2710 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2711 * OFDM
2712 */
2713 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2714 priv->staging_rxon.cck_basic_rates =
2715 ((priv->active_rate_basic &
2716 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2717 else
2718 priv->staging_rxon.cck_basic_rates =
2719 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2720
2721 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2722 priv->staging_rxon.ofdm_basic_rates =
2723 ((priv->active_rate_basic &
2724 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2725 IWL_FIRST_OFDM_RATE) & 0xFF;
2726 else
2727 priv->staging_rxon.ofdm_basic_rates =
2728 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2729}
2730
2731static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2732{ 1192{
2733 unsigned long flags; 1193 unsigned long flags;
2734 1194
2735 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) 1195 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2736 return; 1196 return;
2737 1197
2738 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", 1198 IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO %s\n",
2739 disable_radio ? "OFF" : "ON"); 1199 disable_radio ? "OFF" : "ON");
2740 1200
2741 if (disable_radio) { 1201 if (disable_radio) {
2742 iwl3945_scan_cancel(priv); 1202 iwl_scan_cancel(priv);
2743 /* FIXME: This is a workaround for AP */ 1203 /* FIXME: This is a workaround for AP */
2744 if (priv->iw_mode != NL80211_IFTYPE_AP) { 1204 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2745 spin_lock_irqsave(&priv->lock, flags); 1205 spin_lock_irqsave(&priv->lock, flags);
2746 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET, 1206 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2747 CSR_UCODE_SW_BIT_RFKILL); 1207 CSR_UCODE_SW_BIT_RFKILL);
2748 spin_unlock_irqrestore(&priv->lock, flags); 1208 spin_unlock_irqrestore(&priv->lock, flags);
2749 iwl3945_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); 1209 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2750 set_bit(STATUS_RF_KILL_SW, &priv->status); 1210 set_bit(STATUS_RF_KILL_SW, &priv->status);
2751 } 1211 }
2752 return; 1212 return;
2753 } 1213 }
2754 1214
2755 spin_lock_irqsave(&priv->lock, flags); 1215 spin_lock_irqsave(&priv->lock, flags);
2756 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1216 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2757 1217
2758 clear_bit(STATUS_RF_KILL_SW, &priv->status); 1218 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2759 spin_unlock_irqrestore(&priv->lock, flags); 1219 spin_unlock_irqrestore(&priv->lock, flags);
@@ -2762,13 +1222,13 @@ static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2762 msleep(10); 1222 msleep(10);
2763 1223
2764 spin_lock_irqsave(&priv->lock, flags); 1224 spin_lock_irqsave(&priv->lock, flags);
2765 iwl3945_read32(priv, CSR_UCODE_DRV_GP1); 1225 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2766 if (!iwl3945_grab_nic_access(priv)) 1226 if (!iwl_grab_nic_access(priv))
2767 iwl3945_release_nic_access(priv); 1227 iwl_release_nic_access(priv);
2768 spin_unlock_irqrestore(&priv->lock, flags); 1228 spin_unlock_irqrestore(&priv->lock, flags);
2769 1229
2770 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 1230 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2771 IWL_DEBUG_RF_KILL("Can not turn radio back on - " 1231 IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
2772 "disabled by HW switch\n"); 1232 "disabled by HW switch\n");
2773 return; 1233 return;
2774 } 1234 }
@@ -2778,38 +1238,6 @@ static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2778 return; 1238 return;
2779} 1239}
2780 1240
2781void iwl3945_set_decrypted_flag(struct iwl3945_priv *priv, struct sk_buff *skb,
2782 u32 decrypt_res, struct ieee80211_rx_status *stats)
2783{
2784 u16 fc =
2785 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2786
2787 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2788 return;
2789
2790 if (!(fc & IEEE80211_FCTL_PROTECTED))
2791 return;
2792
2793 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2794 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2795 case RX_RES_STATUS_SEC_TYPE_TKIP:
2796 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2797 RX_RES_STATUS_BAD_ICV_MIC)
2798 stats->flag |= RX_FLAG_MMIC_ERROR;
2799 case RX_RES_STATUS_SEC_TYPE_WEP:
2800 case RX_RES_STATUS_SEC_TYPE_CCMP:
2801 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2802 RX_RES_STATUS_DECRYPT_OK) {
2803 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2804 stats->flag |= RX_FLAG_DECRYPTED;
2805 }
2806 break;
2807
2808 default:
2809 break;
2810 }
2811}
2812
2813#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 1241#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2814 1242
2815#include "iwl-spectrum.h" 1243#include "iwl-spectrum.h"
@@ -2863,13 +1291,13 @@ static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
2863 return cpu_to_le32(res); 1291 return cpu_to_le32(res);
2864} 1292}
2865 1293
2866static int iwl3945_get_measurement(struct iwl3945_priv *priv, 1294static int iwl3945_get_measurement(struct iwl_priv *priv,
2867 struct ieee80211_measurement_params *params, 1295 struct ieee80211_measurement_params *params,
2868 u8 type) 1296 u8 type)
2869{ 1297{
2870 struct iwl3945_spectrum_cmd spectrum; 1298 struct iwl_spectrum_cmd spectrum;
2871 struct iwl3945_rx_packet *res; 1299 struct iwl_rx_packet *res;
2872 struct iwl3945_host_cmd cmd = { 1300 struct iwl_host_cmd cmd = {
2873 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 1301 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2874 .data = (void *)&spectrum, 1302 .data = (void *)&spectrum,
2875 .meta.flags = CMD_WANT_SKB, 1303 .meta.flags = CMD_WANT_SKB,
@@ -2879,7 +1307,7 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
2879 int spectrum_resp_status; 1307 int spectrum_resp_status;
2880 int duration = le16_to_cpu(params->duration); 1308 int duration = le16_to_cpu(params->duration);
2881 1309
2882 if (iwl3945_is_associated(priv)) 1310 if (iwl_is_associated(priv))
2883 add_time = 1311 add_time =
2884 iwl3945_usecs_to_beacons( 1312 iwl3945_usecs_to_beacons(
2885 le64_to_cpu(params->start_time) - priv->last_tsf, 1313 le64_to_cpu(params->start_time) - priv->last_tsf,
@@ -2894,7 +1322,7 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
2894 cmd.len = sizeof(spectrum); 1322 cmd.len = sizeof(spectrum);
2895 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 1323 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2896 1324
2897 if (iwl3945_is_associated(priv)) 1325 if (iwl_is_associated(priv))
2898 spectrum.start_time = 1326 spectrum.start_time =
2899 iwl3945_add_beacon_time(priv->last_beacon_time, 1327 iwl3945_add_beacon_time(priv->last_beacon_time,
2900 add_time, 1328 add_time,
@@ -2909,13 +1337,13 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
2909 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 1337 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2910 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 1338 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2911 1339
2912 rc = iwl3945_send_cmd_sync(priv, &cmd); 1340 rc = iwl_send_cmd_sync(priv, &cmd);
2913 if (rc) 1341 if (rc)
2914 return rc; 1342 return rc;
2915 1343
2916 res = (struct iwl3945_rx_packet *)cmd.meta.u.skb->data; 1344 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2917 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1345 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2918 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); 1346 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
2919 rc = -EIO; 1347 rc = -EIO;
2920 } 1348 }
2921 1349
@@ -2923,7 +1351,7 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
2923 switch (spectrum_resp_status) { 1351 switch (spectrum_resp_status) {
2924 case 0: /* Command will be handled */ 1352 case 0: /* Command will be handled */
2925 if (res->u.spectrum.id != 0xff) { 1353 if (res->u.spectrum.id != 0xff) {
2926 IWL_DEBUG_INFO("Replaced existing measurement: %d\n", 1354 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
2927 res->u.spectrum.id); 1355 res->u.spectrum.id);
2928 priv->measurement_status &= ~MEASUREMENT_READY; 1356 priv->measurement_status &= ~MEASUREMENT_READY;
2929 } 1357 }
@@ -2942,30 +1370,29 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
2942} 1370}
2943#endif 1371#endif
2944 1372
2945static void iwl3945_rx_reply_alive(struct iwl3945_priv *priv, 1373static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
2946 struct iwl3945_rx_mem_buffer *rxb) 1374 struct iwl_rx_mem_buffer *rxb)
2947{ 1375{
2948 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1376 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2949 struct iwl3945_alive_resp *palive; 1377 struct iwl_alive_resp *palive;
2950 struct delayed_work *pwork; 1378 struct delayed_work *pwork;
2951 1379
2952 palive = &pkt->u.alive_frame; 1380 palive = &pkt->u.alive_frame;
2953 1381
2954 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " 1382 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
2955 "0x%01X 0x%01X\n", 1383 "0x%01X 0x%01X\n",
2956 palive->is_valid, palive->ver_type, 1384 palive->is_valid, palive->ver_type,
2957 palive->ver_subtype); 1385 palive->ver_subtype);
2958 1386
2959 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 1387 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2960 IWL_DEBUG_INFO("Initialization Alive received.\n"); 1388 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2961 memcpy(&priv->card_alive_init, 1389 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2962 &pkt->u.alive_frame, 1390 sizeof(struct iwl_alive_resp));
2963 sizeof(struct iwl3945_init_alive_resp));
2964 pwork = &priv->init_alive_start; 1391 pwork = &priv->init_alive_start;
2965 } else { 1392 } else {
2966 IWL_DEBUG_INFO("Runtime Alive received.\n"); 1393 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2967 memcpy(&priv->card_alive, &pkt->u.alive_frame, 1394 memcpy(&priv->card_alive, &pkt->u.alive_frame,
2968 sizeof(struct iwl3945_alive_resp)); 1395 sizeof(struct iwl_alive_resp));
2969 pwork = &priv->alive_start; 1396 pwork = &priv->alive_start;
2970 iwl3945_disable_events(priv); 1397 iwl3945_disable_events(priv);
2971 } 1398 }
@@ -2976,24 +1403,26 @@ static void iwl3945_rx_reply_alive(struct iwl3945_priv *priv,
2976 queue_delayed_work(priv->workqueue, pwork, 1403 queue_delayed_work(priv->workqueue, pwork,
2977 msecs_to_jiffies(5)); 1404 msecs_to_jiffies(5));
2978 else 1405 else
2979 IWL_WARNING("uCode did not respond OK.\n"); 1406 IWL_WARN(priv, "uCode did not respond OK.\n");
2980} 1407}
2981 1408
2982static void iwl3945_rx_reply_add_sta(struct iwl3945_priv *priv, 1409static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
2983 struct iwl3945_rx_mem_buffer *rxb) 1410 struct iwl_rx_mem_buffer *rxb)
2984{ 1411{
2985 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1412#ifdef CONFIG_IWLWIFI_DEBUG
1413 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
1414#endif
2986 1415
2987 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 1416 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2988 return; 1417 return;
2989} 1418}
2990 1419
2991static void iwl3945_rx_reply_error(struct iwl3945_priv *priv, 1420static void iwl3945_rx_reply_error(struct iwl_priv *priv,
2992 struct iwl3945_rx_mem_buffer *rxb) 1421 struct iwl_rx_mem_buffer *rxb)
2993{ 1422{
2994 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1423 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2995 1424
2996 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " 1425 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2997 "seq 0x%04X ser 0x%08X\n", 1426 "seq 0x%04X ser 0x%08X\n",
2998 le32_to_cpu(pkt->u.err_resp.error_type), 1427 le32_to_cpu(pkt->u.err_resp.error_type),
2999 get_cmd_string(pkt->u.err_resp.cmd_id), 1428 get_cmd_string(pkt->u.err_resp.cmd_id),
@@ -3002,28 +1431,15 @@ static void iwl3945_rx_reply_error(struct iwl3945_priv *priv,
3002 le32_to_cpu(pkt->u.err_resp.error_info)); 1431 le32_to_cpu(pkt->u.err_resp.error_info));
3003} 1432}
3004 1433
3005#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 1434static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
3006 1435 struct iwl_rx_mem_buffer *rxb)
3007static void iwl3945_rx_csa(struct iwl3945_priv *priv, struct iwl3945_rx_mem_buffer *rxb)
3008{
3009 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data;
3010 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active_rxon;
3011 struct iwl3945_csa_notification *csa = &(pkt->u.csa_notif);
3012 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3013 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3014 rxon->channel = csa->channel;
3015 priv->staging_rxon.channel = csa->channel;
3016}
3017
3018static void iwl3945_rx_spectrum_measure_notif(struct iwl3945_priv *priv,
3019 struct iwl3945_rx_mem_buffer *rxb)
3020{ 1436{
3021#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 1437#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3022 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1438 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3023 struct iwl3945_spectrum_notification *report = &(pkt->u.spectrum_notif); 1439 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3024 1440
3025 if (!report->state) { 1441 if (!report->state) {
3026 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, 1442 IWL_DEBUG(priv, IWL_DL_11H | IWL_DL_INFO,
3027 "Spectrum Measure Notification: Start\n"); 1443 "Spectrum Measure Notification: Start\n");
3028 return; 1444 return;
3029 } 1445 }
@@ -3033,38 +1449,39 @@ static void iwl3945_rx_spectrum_measure_notif(struct iwl3945_priv *priv,
3033#endif 1449#endif
3034} 1450}
3035 1451
3036static void iwl3945_rx_pm_sleep_notif(struct iwl3945_priv *priv, 1452static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
3037 struct iwl3945_rx_mem_buffer *rxb) 1453 struct iwl_rx_mem_buffer *rxb)
3038{ 1454{
3039#ifdef CONFIG_IWL3945_DEBUG 1455#ifdef CONFIG_IWLWIFI_DEBUG
3040 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1456 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3041 struct iwl3945_sleep_notification *sleep = &(pkt->u.sleep_notif); 1457 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3042 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 1458 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
3043 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1459 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3044#endif 1460#endif
3045} 1461}
3046 1462
3047static void iwl3945_rx_pm_debug_statistics_notif(struct iwl3945_priv *priv, 1463static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3048 struct iwl3945_rx_mem_buffer *rxb) 1464 struct iwl_rx_mem_buffer *rxb)
3049{ 1465{
3050 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1466 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3051 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 1467 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
3052 "notification for %s:\n", 1468 "notification for %s:\n",
3053 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 1469 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3054 iwl3945_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 1470 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
1471 le32_to_cpu(pkt->len));
3055} 1472}
3056 1473
3057static void iwl3945_bg_beacon_update(struct work_struct *work) 1474static void iwl3945_bg_beacon_update(struct work_struct *work)
3058{ 1475{
3059 struct iwl3945_priv *priv = 1476 struct iwl_priv *priv =
3060 container_of(work, struct iwl3945_priv, beacon_update); 1477 container_of(work, struct iwl_priv, beacon_update);
3061 struct sk_buff *beacon; 1478 struct sk_buff *beacon;
3062 1479
3063 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 1480 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3064 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 1481 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3065 1482
3066 if (!beacon) { 1483 if (!beacon) {
3067 IWL_ERROR("update beacon failed\n"); 1484 IWL_ERR(priv, "update beacon failed\n");
3068 return; 1485 return;
3069 } 1486 }
3070 1487
@@ -3079,15 +1496,15 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
3079 iwl3945_send_beacon_cmd(priv); 1496 iwl3945_send_beacon_cmd(priv);
3080} 1497}
3081 1498
3082static void iwl3945_rx_beacon_notif(struct iwl3945_priv *priv, 1499static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
3083 struct iwl3945_rx_mem_buffer *rxb) 1500 struct iwl_rx_mem_buffer *rxb)
3084{ 1501{
3085#ifdef CONFIG_IWL3945_DEBUG 1502#ifdef CONFIG_IWLWIFI_DEBUG
3086 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1503 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3087 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 1504 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
3088 u8 rate = beacon->beacon_notify_hdr.rate; 1505 u8 rate = beacon->beacon_notify_hdr.rate;
3089 1506
3090 IWL_DEBUG_RX("beacon status %x retries %d iss %d " 1507 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
3091 "tsf %d %d rate %d\n", 1508 "tsf %d %d rate %d\n",
3092 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, 1509 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3093 beacon->beacon_notify_hdr.failure_frame, 1510 beacon->beacon_notify_hdr.failure_frame,
@@ -3102,27 +1519,27 @@ static void iwl3945_rx_beacon_notif(struct iwl3945_priv *priv,
3102} 1519}
3103 1520
3104/* Service response to REPLY_SCAN_CMD (0x80) */ 1521/* Service response to REPLY_SCAN_CMD (0x80) */
3105static void iwl3945_rx_reply_scan(struct iwl3945_priv *priv, 1522static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
3106 struct iwl3945_rx_mem_buffer *rxb) 1523 struct iwl_rx_mem_buffer *rxb)
3107{ 1524{
3108#ifdef CONFIG_IWL3945_DEBUG 1525#ifdef CONFIG_IWLWIFI_DEBUG
3109 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1526 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3110 struct iwl3945_scanreq_notification *notif = 1527 struct iwl_scanreq_notification *notif =
3111 (struct iwl3945_scanreq_notification *)pkt->u.raw; 1528 (struct iwl_scanreq_notification *)pkt->u.raw;
3112 1529
3113 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); 1530 IWL_DEBUG_RX(priv, "Scan request status = 0x%x\n", notif->status);
3114#endif 1531#endif
3115} 1532}
3116 1533
3117/* Service SCAN_START_NOTIFICATION (0x82) */ 1534/* Service SCAN_START_NOTIFICATION (0x82) */
3118static void iwl3945_rx_scan_start_notif(struct iwl3945_priv *priv, 1535static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
3119 struct iwl3945_rx_mem_buffer *rxb) 1536 struct iwl_rx_mem_buffer *rxb)
3120{ 1537{
3121 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1538 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3122 struct iwl3945_scanstart_notification *notif = 1539 struct iwl_scanstart_notification *notif =
3123 (struct iwl3945_scanstart_notification *)pkt->u.raw; 1540 (struct iwl_scanstart_notification *)pkt->u.raw;
3124 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1541 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3125 IWL_DEBUG_SCAN("Scan start: " 1542 IWL_DEBUG_SCAN(priv, "Scan start: "
3126 "%d [802.11%s] " 1543 "%d [802.11%s] "
3127 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", 1544 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3128 notif->channel, 1545 notif->channel,
@@ -3132,14 +1549,16 @@ static void iwl3945_rx_scan_start_notif(struct iwl3945_priv *priv,
3132} 1549}
3133 1550
3134/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ 1551/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3135static void iwl3945_rx_scan_results_notif(struct iwl3945_priv *priv, 1552static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
3136 struct iwl3945_rx_mem_buffer *rxb) 1553 struct iwl_rx_mem_buffer *rxb)
3137{ 1554{
3138 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1555#ifdef CONFIG_IWLWIFI_DEBUG
3139 struct iwl3945_scanresults_notification *notif = 1556 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3140 (struct iwl3945_scanresults_notification *)pkt->u.raw; 1557 struct iwl_scanresults_notification *notif =
1558 (struct iwl_scanresults_notification *)pkt->u.raw;
1559#endif
3141 1560
3142 IWL_DEBUG_SCAN("Scan ch.res: " 1561 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
3143 "%d [802.11%s] " 1562 "%d [802.11%s] "
3144 "(TSF: 0x%08X:%08X) - %d " 1563 "(TSF: 0x%08X:%08X) - %d "
3145 "elapsed=%lu usec (%dms since last)\n", 1564 "elapsed=%lu usec (%dms since last)\n",
@@ -3157,13 +1576,15 @@ static void iwl3945_rx_scan_results_notif(struct iwl3945_priv *priv,
3157} 1576}
3158 1577
3159/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 1578/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3160static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv, 1579static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
3161 struct iwl3945_rx_mem_buffer *rxb) 1580 struct iwl_rx_mem_buffer *rxb)
3162{ 1581{
3163 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1582#ifdef CONFIG_IWLWIFI_DEBUG
3164 struct iwl3945_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1583 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
1584 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1585#endif
3165 1586
3166 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1587 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3167 scan_notif->scanned_channels, 1588 scan_notif->scanned_channels,
3168 scan_notif->tsf_low, 1589 scan_notif->tsf_low,
3169 scan_notif->tsf_high, scan_notif->status); 1590 scan_notif->tsf_high, scan_notif->status);
@@ -3174,7 +1595,7 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
3174 /* The scan completion notification came in, so kill that timer... */ 1595 /* The scan completion notification came in, so kill that timer... */
3175 cancel_delayed_work(&priv->scan_check); 1596 cancel_delayed_work(&priv->scan_check);
3176 1597
3177 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", 1598 IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
3178 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 1599 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
3179 "2.4" : "5.2", 1600 "2.4" : "5.2",
3180 jiffies_to_msecs(elapsed_jiffies 1601 jiffies_to_msecs(elapsed_jiffies
@@ -3192,7 +1613,7 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
3192 * then we reset the scan state machine and terminate, 1613 * then we reset the scan state machine and terminate,
3193 * re-queuing another scan if one has been requested */ 1614 * re-queuing another scan if one has been requested */
3194 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 1615 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3195 IWL_DEBUG_INFO("Aborted scan completed.\n"); 1616 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
3196 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 1617 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3197 } else { 1618 } else {
3198 /* If there are more bands on this scan pass reschedule */ 1619 /* If there are more bands on this scan pass reschedule */
@@ -3202,11 +1623,11 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
3202 1623
3203 priv->last_scan_jiffies = jiffies; 1624 priv->last_scan_jiffies = jiffies;
3204 priv->next_scan_jiffies = 0; 1625 priv->next_scan_jiffies = 0;
3205 IWL_DEBUG_INFO("Setting scan to off\n"); 1626 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
3206 1627
3207 clear_bit(STATUS_SCANNING, &priv->status); 1628 clear_bit(STATUS_SCANNING, &priv->status);
3208 1629
3209 IWL_DEBUG_INFO("Scan took %dms\n", 1630 IWL_DEBUG_INFO(priv, "Scan took %dms\n",
3210 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); 1631 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3211 1632
3212 queue_work(priv->workqueue, &priv->scan_completed); 1633 queue_work(priv->workqueue, &priv->scan_completed);
@@ -3220,18 +1641,18 @@ reschedule:
3220 1641
3221/* Handle notification from uCode that card's power state is changing 1642/* Handle notification from uCode that card's power state is changing
3222 * due to software, hardware, or critical temperature RFKILL */ 1643 * due to software, hardware, or critical temperature RFKILL */
3223static void iwl3945_rx_card_state_notif(struct iwl3945_priv *priv, 1644static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
3224 struct iwl3945_rx_mem_buffer *rxb) 1645 struct iwl_rx_mem_buffer *rxb)
3225{ 1646{
3226 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 1647 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3227 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 1648 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3228 unsigned long status = priv->status; 1649 unsigned long status = priv->status;
3229 1650
3230 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", 1651 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
3231 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 1652 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3232 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 1653 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3233 1654
3234 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET, 1655 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3235 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1656 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3236 1657
3237 if (flags & HW_CARD_DISABLED) 1658 if (flags & HW_CARD_DISABLED)
@@ -3245,7 +1666,7 @@ static void iwl3945_rx_card_state_notif(struct iwl3945_priv *priv,
3245 else 1666 else
3246 clear_bit(STATUS_RF_KILL_SW, &priv->status); 1667 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3247 1668
3248 iwl3945_scan_cancel(priv); 1669 iwl_scan_cancel(priv);
3249 1670
3250 if ((test_bit(STATUS_RF_KILL_HW, &status) != 1671 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3251 test_bit(STATUS_RF_KILL_HW, &priv->status)) || 1672 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
@@ -3265,12 +1686,12 @@ static void iwl3945_rx_card_state_notif(struct iwl3945_priv *priv,
3265 * This function chains into the hardware specific files for them to setup 1686 * This function chains into the hardware specific files for them to setup
3266 * any hardware specific handlers as well. 1687 * any hardware specific handlers as well.
3267 */ 1688 */
3268static void iwl3945_setup_rx_handlers(struct iwl3945_priv *priv) 1689static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
3269{ 1690{
3270 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; 1691 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
3271 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 1692 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
3272 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error; 1693 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
3273 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa; 1694 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
3274 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 1695 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
3275 iwl3945_rx_spectrum_measure_notif; 1696 iwl3945_rx_spectrum_measure_notif;
3276 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif; 1697 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
@@ -3303,15 +1724,15 @@ static void iwl3945_setup_rx_handlers(struct iwl3945_priv *priv)
3303 * When FW advances 'R' index, all entries between old and new 'R' index 1724 * When FW advances 'R' index, all entries between old and new 'R' index
3304 * need to be reclaimed. 1725 * need to be reclaimed.
3305 */ 1726 */
3306static void iwl3945_cmd_queue_reclaim(struct iwl3945_priv *priv, 1727static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
3307 int txq_id, int index) 1728 int txq_id, int index)
3308{ 1729{
3309 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 1730 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3310 struct iwl3945_queue *q = &txq->q; 1731 struct iwl_queue *q = &txq->q;
3311 int nfreed = 0; 1732 int nfreed = 0;
3312 1733
3313 if ((index >= q->n_bd) || (iwl3945_x2_queue_used(q, index) == 0)) { 1734 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
3314 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1735 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
3315 "is out of range [0-%d] %d %d.\n", txq_id, 1736 "is out of range [0-%d] %d %d.\n", txq_id,
3316 index, q->n_bd, q->write_ptr, q->read_ptr); 1737 index, q->n_bd, q->write_ptr, q->read_ptr);
3317 return; 1738 return;
@@ -3320,7 +1741,7 @@ static void iwl3945_cmd_queue_reclaim(struct iwl3945_priv *priv,
3320 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 1741 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
3321 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1742 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3322 if (nfreed > 1) { 1743 if (nfreed > 1) {
3323 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, 1744 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", index,
3324 q->write_ptr, q->read_ptr); 1745 q->write_ptr, q->read_ptr);
3325 queue_work(priv->workqueue, &priv->restart); 1746 queue_work(priv->workqueue, &priv->restart);
3326 break; 1747 break;
@@ -3338,21 +1759,28 @@ static void iwl3945_cmd_queue_reclaim(struct iwl3945_priv *priv,
3338 * will be executed. The attached skb (if present) will only be freed 1759 * will be executed. The attached skb (if present) will only be freed
3339 * if the callback returns 1 1760 * if the callback returns 1
3340 */ 1761 */
3341static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv, 1762static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
3342 struct iwl3945_rx_mem_buffer *rxb) 1763 struct iwl_rx_mem_buffer *rxb)
3343{ 1764{
3344 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 1765 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3345 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1766 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3346 int txq_id = SEQ_TO_QUEUE(sequence); 1767 int txq_id = SEQ_TO_QUEUE(sequence);
3347 int index = SEQ_TO_INDEX(sequence); 1768 int index = SEQ_TO_INDEX(sequence);
3348 int huge = sequence & SEQ_HUGE_FRAME; 1769 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3349 int cmd_index; 1770 int cmd_index;
3350 struct iwl3945_cmd *cmd; 1771 struct iwl_cmd *cmd;
3351 1772
3352 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); 1773 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1774 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1775 txq_id, sequence,
1776 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1777 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1778 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
1779 return;
1780 }
3353 1781
3354 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1782 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3355 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1783 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3356 1784
3357 /* Input error checking is done when commands are added to queue. */ 1785 /* Input error checking is done when commands are added to queue. */
3358 if (cmd->meta.flags & CMD_WANT_SKB) { 1786 if (cmd->meta.flags & CMD_WANT_SKB) {
@@ -3417,7 +1845,6 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3417 * 1845 *
3418 * Driver sequence: 1846 * Driver sequence:
3419 * 1847 *
3420 * iwl3945_rx_queue_alloc() Allocates rx_free
3421 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls 1848 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
3422 * iwl3945_rx_queue_restock 1849 * iwl3945_rx_queue_restock
3423 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx 1850 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
@@ -3426,7 +1853,7 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3426 * are available, schedules iwl3945_rx_replenish 1853 * are available, schedules iwl3945_rx_replenish
3427 * 1854 *
3428 * -- enable interrupts -- 1855 * -- enable interrupts --
3429 * ISR - iwl3945_rx() Detach iwl3945_rx_mem_buffers from pool up to the 1856 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
3430 * READ INDEX, detaching the SKB from the pool. 1857 * READ INDEX, detaching the SKB from the pool.
3431 * Moves the packet buffer from queue to rx_used. 1858 * Moves the packet buffer from queue to rx_used.
3432 * Calls iwl3945_rx_queue_restock to refill any empty 1859 * Calls iwl3945_rx_queue_restock to refill any empty
@@ -3436,70 +1863,9 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3436 */ 1863 */
3437 1864
3438/** 1865/**
3439 * iwl3945_rx_queue_space - Return number of free slots available in queue.
3440 */
3441static int iwl3945_rx_queue_space(const struct iwl3945_rx_queue *q)
3442{
3443 int s = q->read - q->write;
3444 if (s <= 0)
3445 s += RX_QUEUE_SIZE;
3446 /* keep some buffer to not confuse full and empty queue */
3447 s -= 2;
3448 if (s < 0)
3449 s = 0;
3450 return s;
3451}
3452
3453/**
3454 * iwl3945_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3455 */
3456int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv, struct iwl3945_rx_queue *q)
3457{
3458 u32 reg = 0;
3459 int rc = 0;
3460 unsigned long flags;
3461
3462 spin_lock_irqsave(&q->lock, flags);
3463
3464 if (q->need_update == 0)
3465 goto exit_unlock;
3466
3467 /* If power-saving is in use, make sure device is awake */
3468 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3469 reg = iwl3945_read32(priv, CSR_UCODE_DRV_GP1);
3470
3471 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3472 iwl3945_set_bit(priv, CSR_GP_CNTRL,
3473 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3474 goto exit_unlock;
3475 }
3476
3477 rc = iwl3945_grab_nic_access(priv);
3478 if (rc)
3479 goto exit_unlock;
3480
3481 /* Device expects a multiple of 8 */
3482 iwl3945_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
3483 q->write & ~0x7);
3484 iwl3945_release_nic_access(priv);
3485
3486 /* Else device is assumed to be awake */
3487 } else
3488 /* Device expects a multiple of 8 */
3489 iwl3945_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3490
3491
3492 q->need_update = 0;
3493
3494 exit_unlock:
3495 spin_unlock_irqrestore(&q->lock, flags);
3496 return rc;
3497}
3498
3499/**
3500 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 1866 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3501 */ 1867 */
3502static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl3945_priv *priv, 1868static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
3503 dma_addr_t dma_addr) 1869 dma_addr_t dma_addr)
3504{ 1870{
3505 return cpu_to_le32((u32)dma_addr); 1871 return cpu_to_le32((u32)dma_addr);
@@ -3516,24 +1882,24 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl3945_priv *priv,
3516 * also updates the memory address in the firmware to reference the new 1882 * also updates the memory address in the firmware to reference the new
3517 * target buffer. 1883 * target buffer.
3518 */ 1884 */
3519static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv) 1885static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
3520{ 1886{
3521 struct iwl3945_rx_queue *rxq = &priv->rxq; 1887 struct iwl_rx_queue *rxq = &priv->rxq;
3522 struct list_head *element; 1888 struct list_head *element;
3523 struct iwl3945_rx_mem_buffer *rxb; 1889 struct iwl_rx_mem_buffer *rxb;
3524 unsigned long flags; 1890 unsigned long flags;
3525 int write, rc; 1891 int write, rc;
3526 1892
3527 spin_lock_irqsave(&rxq->lock, flags); 1893 spin_lock_irqsave(&rxq->lock, flags);
3528 write = rxq->write & ~0x7; 1894 write = rxq->write & ~0x7;
3529 while ((iwl3945_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 1895 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3530 /* Get next free Rx buffer, remove from free list */ 1896 /* Get next free Rx buffer, remove from free list */
3531 element = rxq->rx_free.next; 1897 element = rxq->rx_free.next;
3532 rxb = list_entry(element, struct iwl3945_rx_mem_buffer, list); 1898 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
3533 list_del(element); 1899 list_del(element);
3534 1900
3535 /* Point to Rx buffer via next RBD in circular buffer */ 1901 /* Point to Rx buffer via next RBD in circular buffer */
3536 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->dma_addr); 1902 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
3537 rxq->queue[rxq->write] = rxb; 1903 rxq->queue[rxq->write] = rxb;
3538 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1904 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3539 rxq->free_count--; 1905 rxq->free_count--;
@@ -3552,7 +1918,7 @@ static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv)
3552 spin_lock_irqsave(&rxq->lock, flags); 1918 spin_lock_irqsave(&rxq->lock, flags);
3553 rxq->need_update = 1; 1919 rxq->need_update = 1;
3554 spin_unlock_irqrestore(&rxq->lock, flags); 1920 spin_unlock_irqrestore(&rxq->lock, flags);
3555 rc = iwl3945_rx_queue_update_write_ptr(priv, rxq); 1921 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
3556 if (rc) 1922 if (rc)
3557 return rc; 1923 return rc;
3558 } 1924 }
@@ -3568,24 +1934,24 @@ static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv)
3568 * Also restock the Rx queue via iwl3945_rx_queue_restock. 1934 * Also restock the Rx queue via iwl3945_rx_queue_restock.
3569 * This is called as a scheduled work item (except for during initialization) 1935 * This is called as a scheduled work item (except for during initialization)
3570 */ 1936 */
3571static void iwl3945_rx_allocate(struct iwl3945_priv *priv) 1937static void iwl3945_rx_allocate(struct iwl_priv *priv)
3572{ 1938{
3573 struct iwl3945_rx_queue *rxq = &priv->rxq; 1939 struct iwl_rx_queue *rxq = &priv->rxq;
3574 struct list_head *element; 1940 struct list_head *element;
3575 struct iwl3945_rx_mem_buffer *rxb; 1941 struct iwl_rx_mem_buffer *rxb;
3576 unsigned long flags; 1942 unsigned long flags;
3577 spin_lock_irqsave(&rxq->lock, flags); 1943 spin_lock_irqsave(&rxq->lock, flags);
3578 while (!list_empty(&rxq->rx_used)) { 1944 while (!list_empty(&rxq->rx_used)) {
3579 element = rxq->rx_used.next; 1945 element = rxq->rx_used.next;
3580 rxb = list_entry(element, struct iwl3945_rx_mem_buffer, list); 1946 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
3581 1947
3582 /* Alloc a new receive buffer */ 1948 /* Alloc a new receive buffer */
3583 rxb->skb = 1949 rxb->skb =
3584 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); 1950 alloc_skb(priv->hw_params.rx_buf_size,
1951 __GFP_NOWARN | GFP_ATOMIC);
3585 if (!rxb->skb) { 1952 if (!rxb->skb) {
3586 if (net_ratelimit()) 1953 if (net_ratelimit())
3587 printk(KERN_CRIT DRV_NAME 1954 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
3588 ": Can not allocate SKB buffers\n");
3589 /* We don't reschedule replenish work here -- we will 1955 /* We don't reschedule replenish work here -- we will
3590 * call the restock method and if it still needs 1956 * call the restock method and if it still needs
3591 * more buffers it will schedule replenish */ 1957 * more buffers it will schedule replenish */
@@ -3604,9 +1970,10 @@ static void iwl3945_rx_allocate(struct iwl3945_priv *priv)
3604 list_del(element); 1970 list_del(element);
3605 1971
3606 /* Get physical address of RB/SKB */ 1972 /* Get physical address of RB/SKB */
3607 rxb->dma_addr = 1973 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
3608 pci_map_single(priv->pci_dev, rxb->skb->data, 1974 rxb->skb->data,
3609 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1975 priv->hw_params.rx_buf_size,
1976 PCI_DMA_FROMDEVICE);
3610 list_add_tail(&rxb->list, &rxq->rx_free); 1977 list_add_tail(&rxb->list, &rxq->rx_free);
3611 rxq->free_count++; 1978 rxq->free_count++;
3612 } 1979 }
@@ -3618,7 +1985,7 @@ static void iwl3945_rx_allocate(struct iwl3945_priv *priv)
3618 */ 1985 */
3619static void __iwl3945_rx_replenish(void *data) 1986static void __iwl3945_rx_replenish(void *data)
3620{ 1987{
3621 struct iwl3945_priv *priv = data; 1988 struct iwl_priv *priv = data;
3622 1989
3623 iwl3945_rx_allocate(priv); 1990 iwl3945_rx_allocate(priv);
3624 iwl3945_rx_queue_restock(priv); 1991 iwl3945_rx_queue_restock(priv);
@@ -3627,7 +1994,7 @@ static void __iwl3945_rx_replenish(void *data)
3627 1994
3628void iwl3945_rx_replenish(void *data) 1995void iwl3945_rx_replenish(void *data)
3629{ 1996{
3630 struct iwl3945_priv *priv = data; 1997 struct iwl_priv *priv = data;
3631 unsigned long flags; 1998 unsigned long flags;
3632 1999
3633 iwl3945_rx_allocate(priv); 2000 iwl3945_rx_allocate(priv);
@@ -3637,84 +2004,6 @@ void iwl3945_rx_replenish(void *data)
3637 spin_unlock_irqrestore(&priv->lock, flags); 2004 spin_unlock_irqrestore(&priv->lock, flags);
3638} 2005}
3639 2006
3640/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3641 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3642 * This free routine walks the list of POOL entries and if SKB is set to
3643 * non NULL it is unmapped and freed
3644 */
3645static void iwl3945_rx_queue_free(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
3646{
3647 int i;
3648 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3649 if (rxq->pool[i].skb != NULL) {
3650 pci_unmap_single(priv->pci_dev,
3651 rxq->pool[i].dma_addr,
3652 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3653 dev_kfree_skb(rxq->pool[i].skb);
3654 }
3655 }
3656
3657 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3658 rxq->dma_addr);
3659 rxq->bd = NULL;
3660}
3661
3662int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv)
3663{
3664 struct iwl3945_rx_queue *rxq = &priv->rxq;
3665 struct pci_dev *dev = priv->pci_dev;
3666 int i;
3667
3668 spin_lock_init(&rxq->lock);
3669 INIT_LIST_HEAD(&rxq->rx_free);
3670 INIT_LIST_HEAD(&rxq->rx_used);
3671
3672 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3673 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3674 if (!rxq->bd)
3675 return -ENOMEM;
3676
3677 /* Fill the rx_used queue with _all_ of the Rx buffers */
3678 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3679 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3680
3681 /* Set us so that we have processed and used all buffers, but have
3682 * not restocked the Rx queue with fresh buffers */
3683 rxq->read = rxq->write = 0;
3684 rxq->free_count = 0;
3685 rxq->need_update = 0;
3686 return 0;
3687}
3688
3689void iwl3945_rx_queue_reset(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
3690{
3691 unsigned long flags;
3692 int i;
3693 spin_lock_irqsave(&rxq->lock, flags);
3694 INIT_LIST_HEAD(&rxq->rx_free);
3695 INIT_LIST_HEAD(&rxq->rx_used);
3696 /* Fill the rx_used queue with _all_ of the Rx buffers */
3697 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3698 /* In the reset function, these buffers may have been allocated
3699 * to an SKB, so we need to unmap and free potential storage */
3700 if (rxq->pool[i].skb != NULL) {
3701 pci_unmap_single(priv->pci_dev,
3702 rxq->pool[i].dma_addr,
3703 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3704 priv->alloc_rxb_skb--;
3705 dev_kfree_skb(rxq->pool[i].skb);
3706 rxq->pool[i].skb = NULL;
3707 }
3708 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3709 }
3710
3711 /* Set us so that we have processed and used all buffers, but have
3712 * not restocked the Rx queue with fresh buffers */
3713 rxq->read = rxq->write = 0;
3714 rxq->free_count = 0;
3715 spin_unlock_irqrestore(&rxq->lock, flags);
3716}
3717
3718/* Convert linear signal-to-noise ratio into dB */ 2007/* Convert linear signal-to-noise ratio into dB */
3719static u8 ratio2dB[100] = { 2008static u8 ratio2dB[100] = {
3720/* 0 1 2 3 4 5 6 7 8 9 */ 2009/* 0 1 2 3 4 5 6 7 8 9 */
@@ -3800,11 +2089,11 @@ int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
3800 * the appropriate handlers, including command responses, 2089 * the appropriate handlers, including command responses,
3801 * frame-received notifications, and other notifications. 2090 * frame-received notifications, and other notifications.
3802 */ 2091 */
3803static void iwl3945_rx_handle(struct iwl3945_priv *priv) 2092static void iwl3945_rx_handle(struct iwl_priv *priv)
3804{ 2093{
3805 struct iwl3945_rx_mem_buffer *rxb; 2094 struct iwl_rx_mem_buffer *rxb;
3806 struct iwl3945_rx_packet *pkt; 2095 struct iwl_rx_packet *pkt;
3807 struct iwl3945_rx_queue *rxq = &priv->rxq; 2096 struct iwl_rx_queue *rxq = &priv->rxq;
3808 u32 r, i; 2097 u32 r, i;
3809 int reclaim; 2098 int reclaim;
3810 unsigned long flags; 2099 unsigned long flags;
@@ -3813,14 +2102,14 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3813 2102
3814 /* uCode's read index (stored in shared DRAM) indicates the last Rx 2103 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3815 * buffer that the driver may process (last buffer filled by ucode). */ 2104 * buffer that the driver may process (last buffer filled by ucode). */
3816 r = iwl3945_hw_get_rx_read(priv); 2105 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3817 i = rxq->read; 2106 i = rxq->read;
3818 2107
3819 if (iwl3945_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 2108 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3820 fill_rx = 1; 2109 fill_rx = 1;
3821 /* Rx interrupt, but nothing sent from uCode */ 2110 /* Rx interrupt, but nothing sent from uCode */
3822 if (i == r) 2111 if (i == r)
3823 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); 2112 IWL_DEBUG(priv, IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3824 2113
3825 while (i != r) { 2114 while (i != r) {
3826 rxb = rxq->queue[i]; 2115 rxb = rxq->queue[i];
@@ -3832,10 +2121,10 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3832 2121
3833 rxq->queue[i] = NULL; 2122 rxq->queue[i] = NULL;
3834 2123
3835 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 2124 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
3836 IWL_RX_BUF_SIZE, 2125 priv->hw_params.rx_buf_size,
3837 PCI_DMA_FROMDEVICE); 2126 PCI_DMA_FROMDEVICE);
3838 pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 2127 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3839 2128
3840 /* Reclaim a command buffer only if this packet is a response 2129 /* Reclaim a command buffer only if this packet is a response
3841 * to a (driver-originated) command. 2130 * to a (driver-originated) command.
@@ -3851,13 +2140,13 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3851 * handle those that need handling via function in 2140 * handle those that need handling via function in
3852 * rx_handlers table. See iwl3945_setup_rx_handlers() */ 2141 * rx_handlers table. See iwl3945_setup_rx_handlers() */
3853 if (priv->rx_handlers[pkt->hdr.cmd]) { 2142 if (priv->rx_handlers[pkt->hdr.cmd]) {
3854 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 2143 IWL_DEBUG(priv, IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3855 "r = %d, i = %d, %s, 0x%02x\n", r, i, 2144 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3856 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 2145 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3857 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 2146 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3858 } else { 2147 } else {
3859 /* No handling needed */ 2148 /* No handling needed */
3860 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 2149 IWL_DEBUG(priv, IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3861 "r %d i %d No handler needed for %s, 0x%02x\n", 2150 "r %d i %d No handler needed for %s, 0x%02x\n",
3862 r, i, get_cmd_string(pkt->hdr.cmd), 2151 r, i, get_cmd_string(pkt->hdr.cmd),
3863 pkt->hdr.cmd); 2152 pkt->hdr.cmd);
@@ -3865,12 +2154,12 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3865 2154
3866 if (reclaim) { 2155 if (reclaim) {
3867 /* Invoke any callbacks, transfer the skb to caller, and 2156 /* Invoke any callbacks, transfer the skb to caller, and
3868 * fire off the (possibly) blocking iwl3945_send_cmd() 2157 * fire off the (possibly) blocking iwl_send_cmd()
3869 * as we reclaim the driver command queue */ 2158 * as we reclaim the driver command queue */
3870 if (rxb && rxb->skb) 2159 if (rxb && rxb->skb)
3871 iwl3945_tx_cmd_complete(priv, rxb); 2160 iwl3945_tx_cmd_complete(priv, rxb);
3872 else 2161 else
3873 IWL_WARNING("Claim null rxb?\n"); 2162 IWL_WARN(priv, "Claim null rxb?\n");
3874 } 2163 }
3875 2164
3876 /* For now we just don't re-use anything. We can tweak this 2165 /* For now we just don't re-use anything. We can tweak this
@@ -3882,8 +2171,9 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3882 rxb->skb = NULL; 2171 rxb->skb = NULL;
3883 } 2172 }
3884 2173
3885 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 2174 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
3886 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 2175 priv->hw_params.rx_buf_size,
2176 PCI_DMA_FROMDEVICE);
3887 spin_lock_irqsave(&rxq->lock, flags); 2177 spin_lock_irqsave(&rxq->lock, flags);
3888 list_add_tail(&rxb->list, &priv->rxq.rx_used); 2178 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3889 spin_unlock_irqrestore(&rxq->lock, flags); 2179 spin_unlock_irqrestore(&rxq->lock, flags);
@@ -3905,81 +2195,16 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
3905 iwl3945_rx_queue_restock(priv); 2195 iwl3945_rx_queue_restock(priv);
3906} 2196}
3907 2197
3908/** 2198static void iwl3945_enable_interrupts(struct iwl_priv *priv)
3909 * iwl3945_tx_queue_update_write_ptr - Send new write index to hardware
3910 */
3911static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
3912 struct iwl3945_tx_queue *txq)
3913{ 2199{
3914 u32 reg = 0; 2200 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
3915 int rc = 0;
3916 int txq_id = txq->q.id;
3917
3918 if (txq->need_update == 0)
3919 return rc;
3920
3921 /* if we're trying to save power */
3922 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3923 /* wake up nic if it's powered down ...
3924 * uCode will wake up, and interrupt us again, so next
3925 * time we'll skip this part. */
3926 reg = iwl3945_read32(priv, CSR_UCODE_DRV_GP1);
3927
3928 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3929 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
3930 iwl3945_set_bit(priv, CSR_GP_CNTRL,
3931 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3932 return rc;
3933 }
3934
3935 /* restore this queue's parameters in nic hardware. */
3936 rc = iwl3945_grab_nic_access(priv);
3937 if (rc)
3938 return rc;
3939 iwl3945_write_direct32(priv, HBUS_TARG_WRPTR,
3940 txq->q.write_ptr | (txq_id << 8));
3941 iwl3945_release_nic_access(priv);
3942
3943 /* else not in power-save mode, uCode will never sleep when we're
3944 * trying to tx (during RFKILL, we're not trying to tx). */
3945 } else
3946 iwl3945_write32(priv, HBUS_TARG_WRPTR,
3947 txq->q.write_ptr | (txq_id << 8));
3948
3949 txq->need_update = 0;
3950
3951 return rc;
3952}
3953
3954#ifdef CONFIG_IWL3945_DEBUG
3955static void iwl3945_print_rx_config_cmd(struct iwl3945_rxon_cmd *rxon)
3956{
3957 IWL_DEBUG_RADIO("RX CONFIG:\n");
3958 iwl3945_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3959 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3960 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3961 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3962 le32_to_cpu(rxon->filter_flags));
3963 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3964 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3965 rxon->ofdm_basic_rates);
3966 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3967 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3968 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3969 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3970}
3971#endif
3972
3973static void iwl3945_enable_interrupts(struct iwl3945_priv *priv)
3974{
3975 IWL_DEBUG_ISR("Enabling interrupts\n");
3976 set_bit(STATUS_INT_ENABLED, &priv->status); 2201 set_bit(STATUS_INT_ENABLED, &priv->status);
3977 iwl3945_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); 2202 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
3978} 2203}
3979 2204
3980 2205
3981/* call this function to flush any scheduled tasklet */ 2206/* call this function to flush any scheduled tasklet */
3982static inline void iwl_synchronize_irq(struct iwl3945_priv *priv) 2207static inline void iwl_synchronize_irq(struct iwl_priv *priv)
3983{ 2208{
3984 /* wait to make sure we flush pending tasklet*/ 2209 /* wait to make sure we flush pending tasklet*/
3985 synchronize_irq(priv->pci_dev->irq); 2210 synchronize_irq(priv->pci_dev->irq);
@@ -3987,18 +2212,18 @@ static inline void iwl_synchronize_irq(struct iwl3945_priv *priv)
3987} 2212}
3988 2213
3989 2214
3990static inline void iwl3945_disable_interrupts(struct iwl3945_priv *priv) 2215static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
3991{ 2216{
3992 clear_bit(STATUS_INT_ENABLED, &priv->status); 2217 clear_bit(STATUS_INT_ENABLED, &priv->status);
3993 2218
3994 /* disable interrupts from uCode/NIC to host */ 2219 /* disable interrupts from uCode/NIC to host */
3995 iwl3945_write32(priv, CSR_INT_MASK, 0x00000000); 2220 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
3996 2221
3997 /* acknowledge/clear/reset any interrupts still pending 2222 /* acknowledge/clear/reset any interrupts still pending
3998 * from uCode or flow handler (Rx/Tx DMA) */ 2223 * from uCode or flow handler (Rx/Tx DMA) */
3999 iwl3945_write32(priv, CSR_INT, 0xffffffff); 2224 iwl_write32(priv, CSR_INT, 0xffffffff);
4000 iwl3945_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); 2225 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4001 IWL_DEBUG_ISR("Disabled interrupts\n"); 2226 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
4002} 2227}
4003 2228
4004static const char *desc_lookup(int i) 2229static const char *desc_lookup(int i)
@@ -4024,7 +2249,7 @@ static const char *desc_lookup(int i)
4024#define ERROR_START_OFFSET (1 * sizeof(u32)) 2249#define ERROR_START_OFFSET (1 * sizeof(u32))
4025#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 2250#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4026 2251
4027static void iwl3945_dump_nic_error_log(struct iwl3945_priv *priv) 2252static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
4028{ 2253{
4029 u32 i; 2254 u32 i;
4030 u32 desc, time, count, base, data1; 2255 u32 desc, time, count, base, data1;
@@ -4034,49 +2259,50 @@ static void iwl3945_dump_nic_error_log(struct iwl3945_priv *priv)
4034 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 2259 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4035 2260
4036 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 2261 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
4037 IWL_ERROR("Not valid error log pointer 0x%08X\n", base); 2262 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
4038 return; 2263 return;
4039 } 2264 }
4040 2265
4041 rc = iwl3945_grab_nic_access(priv); 2266 rc = iwl_grab_nic_access(priv);
4042 if (rc) { 2267 if (rc) {
4043 IWL_WARNING("Can not read from adapter at this time.\n"); 2268 IWL_WARN(priv, "Can not read from adapter at this time.\n");
4044 return; 2269 return;
4045 } 2270 }
4046 2271
4047 count = iwl3945_read_targ_mem(priv, base); 2272 count = iwl_read_targ_mem(priv, base);
4048 2273
4049 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 2274 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4050 IWL_ERROR("Start IWL Error Log Dump:\n"); 2275 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
4051 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count); 2276 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
2277 priv->status, count);
4052 } 2278 }
4053 2279
4054 IWL_ERROR("Desc Time asrtPC blink2 " 2280 IWL_ERR(priv, "Desc Time asrtPC blink2 "
4055 "ilink1 nmiPC Line\n"); 2281 "ilink1 nmiPC Line\n");
4056 for (i = ERROR_START_OFFSET; 2282 for (i = ERROR_START_OFFSET;
4057 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 2283 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
4058 i += ERROR_ELEM_SIZE) { 2284 i += ERROR_ELEM_SIZE) {
4059 desc = iwl3945_read_targ_mem(priv, base + i); 2285 desc = iwl_read_targ_mem(priv, base + i);
4060 time = 2286 time =
4061 iwl3945_read_targ_mem(priv, base + i + 1 * sizeof(u32)); 2287 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
4062 blink1 = 2288 blink1 =
4063 iwl3945_read_targ_mem(priv, base + i + 2 * sizeof(u32)); 2289 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
4064 blink2 = 2290 blink2 =
4065 iwl3945_read_targ_mem(priv, base + i + 3 * sizeof(u32)); 2291 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
4066 ilink1 = 2292 ilink1 =
4067 iwl3945_read_targ_mem(priv, base + i + 4 * sizeof(u32)); 2293 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
4068 ilink2 = 2294 ilink2 =
4069 iwl3945_read_targ_mem(priv, base + i + 5 * sizeof(u32)); 2295 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
4070 data1 = 2296 data1 =
4071 iwl3945_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 2297 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
4072 2298
4073 IWL_ERROR 2299 IWL_ERR(priv,
4074 ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 2300 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
4075 desc_lookup(desc), desc, time, blink1, blink2, 2301 desc_lookup(desc), desc, time, blink1, blink2,
4076 ilink1, ilink2, data1); 2302 ilink1, ilink2, data1);
4077 } 2303 }
4078 2304
4079 iwl3945_release_nic_access(priv); 2305 iwl_release_nic_access(priv);
4080 2306
4081} 2307}
4082 2308
@@ -4085,9 +2311,9 @@ static void iwl3945_dump_nic_error_log(struct iwl3945_priv *priv)
4085/** 2311/**
4086 * iwl3945_print_event_log - Dump error event log to syslog 2312 * iwl3945_print_event_log - Dump error event log to syslog
4087 * 2313 *
4088 * NOTE: Must be called with iwl3945_grab_nic_access() already obtained! 2314 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4089 */ 2315 */
4090static void iwl3945_print_event_log(struct iwl3945_priv *priv, u32 start_idx, 2316static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
4091 u32 num_events, u32 mode) 2317 u32 num_events, u32 mode)
4092{ 2318{
4093 u32 i; 2319 u32 i;
@@ -4111,21 +2337,22 @@ static void iwl3945_print_event_log(struct iwl3945_priv *priv, u32 start_idx,
4111 /* "time" is actually "data" for mode 0 (no timestamp). 2337 /* "time" is actually "data" for mode 0 (no timestamp).
4112 * place event id # at far right for easier visual parsing. */ 2338 * place event id # at far right for easier visual parsing. */
4113 for (i = 0; i < num_events; i++) { 2339 for (i = 0; i < num_events; i++) {
4114 ev = iwl3945_read_targ_mem(priv, ptr); 2340 ev = iwl_read_targ_mem(priv, ptr);
4115 ptr += sizeof(u32); 2341 ptr += sizeof(u32);
4116 time = iwl3945_read_targ_mem(priv, ptr); 2342 time = iwl_read_targ_mem(priv, ptr);
4117 ptr += sizeof(u32); 2343 ptr += sizeof(u32);
4118 if (mode == 0) 2344 if (mode == 0) {
4119 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ 2345 /* data, ev */
4120 else { 2346 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
4121 data = iwl3945_read_targ_mem(priv, ptr); 2347 } else {
2348 data = iwl_read_targ_mem(priv, ptr);
4122 ptr += sizeof(u32); 2349 ptr += sizeof(u32);
4123 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); 2350 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
4124 } 2351 }
4125 } 2352 }
4126} 2353}
4127 2354
4128static void iwl3945_dump_nic_event_log(struct iwl3945_priv *priv) 2355static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
4129{ 2356{
4130 int rc; 2357 int rc;
4131 u32 base; /* SRAM byte address of event log header */ 2358 u32 base; /* SRAM byte address of event log header */
@@ -4137,32 +2364,32 @@ static void iwl3945_dump_nic_event_log(struct iwl3945_priv *priv)
4137 2364
4138 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 2365 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4139 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 2366 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
4140 IWL_ERROR("Invalid event log pointer 0x%08X\n", base); 2367 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
4141 return; 2368 return;
4142 } 2369 }
4143 2370
4144 rc = iwl3945_grab_nic_access(priv); 2371 rc = iwl_grab_nic_access(priv);
4145 if (rc) { 2372 if (rc) {
4146 IWL_WARNING("Can not read from adapter at this time.\n"); 2373 IWL_WARN(priv, "Can not read from adapter at this time.\n");
4147 return; 2374 return;
4148 } 2375 }
4149 2376
4150 /* event log header */ 2377 /* event log header */
4151 capacity = iwl3945_read_targ_mem(priv, base); 2378 capacity = iwl_read_targ_mem(priv, base);
4152 mode = iwl3945_read_targ_mem(priv, base + (1 * sizeof(u32))); 2379 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4153 num_wraps = iwl3945_read_targ_mem(priv, base + (2 * sizeof(u32))); 2380 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4154 next_entry = iwl3945_read_targ_mem(priv, base + (3 * sizeof(u32))); 2381 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
4155 2382
4156 size = num_wraps ? capacity : next_entry; 2383 size = num_wraps ? capacity : next_entry;
4157 2384
4158 /* bail out if nothing in log */ 2385 /* bail out if nothing in log */
4159 if (size == 0) { 2386 if (size == 0) {
4160 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); 2387 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
4161 iwl3945_release_nic_access(priv); 2388 iwl_release_nic_access(priv);
4162 return; 2389 return;
4163 } 2390 }
4164 2391
4165 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", 2392 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
4166 size, num_wraps); 2393 size, num_wraps);
4167 2394
4168 /* if uCode has wrapped back to top of log, start at the oldest entry, 2395 /* if uCode has wrapped back to top of log, start at the oldest entry,
@@ -4174,48 +2401,10 @@ static void iwl3945_dump_nic_event_log(struct iwl3945_priv *priv)
4174 /* (then/else) start at top of log */ 2401 /* (then/else) start at top of log */
4175 iwl3945_print_event_log(priv, 0, next_entry, mode); 2402 iwl3945_print_event_log(priv, 0, next_entry, mode);
4176 2403
4177 iwl3945_release_nic_access(priv); 2404 iwl_release_nic_access(priv);
4178} 2405}
4179 2406
4180/** 2407static void iwl3945_error_recovery(struct iwl_priv *priv)
4181 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
4182 */
4183static void iwl3945_irq_handle_error(struct iwl3945_priv *priv)
4184{
4185 /* Set the FW error flag -- cleared on iwl3945_down */
4186 set_bit(STATUS_FW_ERROR, &priv->status);
4187
4188 /* Cancel currently queued command. */
4189 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4190
4191#ifdef CONFIG_IWL3945_DEBUG
4192 if (iwl3945_debug_level & IWL_DL_FW_ERRORS) {
4193 iwl3945_dump_nic_error_log(priv);
4194 iwl3945_dump_nic_event_log(priv);
4195 iwl3945_print_rx_config_cmd(&priv->staging_rxon);
4196 }
4197#endif
4198
4199 wake_up_interruptible(&priv->wait_command_queue);
4200
4201 /* Keep the restart process from trying to send host
4202 * commands by clearing the INIT status bit */
4203 clear_bit(STATUS_READY, &priv->status);
4204
4205 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4206 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4207 "Restarting adapter due to uCode error.\n");
4208
4209 if (iwl3945_is_associated(priv)) {
4210 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4211 sizeof(priv->recovery_rxon));
4212 priv->error_recovering = 1;
4213 }
4214 queue_work(priv->workqueue, &priv->restart);
4215 }
4216}
4217
4218static void iwl3945_error_recovery(struct iwl3945_priv *priv)
4219{ 2408{
4220 unsigned long flags; 2409 unsigned long flags;
4221 2410
@@ -4232,12 +2421,12 @@ static void iwl3945_error_recovery(struct iwl3945_priv *priv)
4232 spin_unlock_irqrestore(&priv->lock, flags); 2421 spin_unlock_irqrestore(&priv->lock, flags);
4233} 2422}
4234 2423
4235static void iwl3945_irq_tasklet(struct iwl3945_priv *priv) 2424static void iwl3945_irq_tasklet(struct iwl_priv *priv)
4236{ 2425{
4237 u32 inta, handled = 0; 2426 u32 inta, handled = 0;
4238 u32 inta_fh; 2427 u32 inta_fh;
4239 unsigned long flags; 2428 unsigned long flags;
4240#ifdef CONFIG_IWL3945_DEBUG 2429#ifdef CONFIG_IWLWIFI_DEBUG
4241 u32 inta_mask; 2430 u32 inta_mask;
4242#endif 2431#endif
4243 2432
@@ -4246,20 +2435,20 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4246 /* Ack/clear/reset pending uCode interrupts. 2435 /* Ack/clear/reset pending uCode interrupts.
4247 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 2436 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4248 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 2437 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4249 inta = iwl3945_read32(priv, CSR_INT); 2438 inta = iwl_read32(priv, CSR_INT);
4250 iwl3945_write32(priv, CSR_INT, inta); 2439 iwl_write32(priv, CSR_INT, inta);
4251 2440
4252 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 2441 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4253 * Any new interrupts that happen after this, either while we're 2442 * Any new interrupts that happen after this, either while we're
4254 * in this tasklet, or later, will show up in next ISR/tasklet. */ 2443 * in this tasklet, or later, will show up in next ISR/tasklet. */
4255 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS); 2444 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4256 iwl3945_write32(priv, CSR_FH_INT_STATUS, inta_fh); 2445 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4257 2446
4258#ifdef CONFIG_IWL3945_DEBUG 2447#ifdef CONFIG_IWLWIFI_DEBUG
4259 if (iwl3945_debug_level & IWL_DL_ISR) { 2448 if (priv->debug_level & IWL_DL_ISR) {
4260 /* just for debug */ 2449 /* just for debug */
4261 inta_mask = iwl3945_read32(priv, CSR_INT_MASK); 2450 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4262 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 2451 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4263 inta, inta_mask, inta_fh); 2452 inta, inta_mask, inta_fh);
4264 } 2453 }
4265#endif 2454#endif
@@ -4275,12 +2464,12 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4275 2464
4276 /* Now service all interrupt bits discovered above. */ 2465 /* Now service all interrupt bits discovered above. */
4277 if (inta & CSR_INT_BIT_HW_ERR) { 2466 if (inta & CSR_INT_BIT_HW_ERR) {
4278 IWL_ERROR("Microcode HW error detected. Restarting.\n"); 2467 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
4279 2468
4280 /* Tell the device to stop sending interrupts */ 2469 /* Tell the device to stop sending interrupts */
4281 iwl3945_disable_interrupts(priv); 2470 iwl3945_disable_interrupts(priv);
4282 2471
4283 iwl3945_irq_handle_error(priv); 2472 iwl_irq_handle_error(priv);
4284 2473
4285 handled |= CSR_INT_BIT_HW_ERR; 2474 handled |= CSR_INT_BIT_HW_ERR;
4286 2475
@@ -4289,16 +2478,16 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4289 return; 2478 return;
4290 } 2479 }
4291 2480
4292#ifdef CONFIG_IWL3945_DEBUG 2481#ifdef CONFIG_IWLWIFI_DEBUG
4293 if (iwl3945_debug_level & (IWL_DL_ISR)) { 2482 if (priv->debug_level & (IWL_DL_ISR)) {
4294 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 2483 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4295 if (inta & CSR_INT_BIT_SCD) 2484 if (inta & CSR_INT_BIT_SCD)
4296 IWL_DEBUG_ISR("Scheduler finished to transmit " 2485 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
4297 "the frame/frames.\n"); 2486 "the frame/frames.\n");
4298 2487
4299 /* Alive notification via Rx interrupt will do the real work */ 2488 /* Alive notification via Rx interrupt will do the real work */
4300 if (inta & CSR_INT_BIT_ALIVE) 2489 if (inta & CSR_INT_BIT_ALIVE)
4301 IWL_DEBUG_ISR("Alive interrupt\n"); 2490 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
4302 } 2491 }
4303#endif 2492#endif
4304 /* Safely ignore these bits for debug checks below */ 2493 /* Safely ignore these bits for debug checks below */
@@ -4306,22 +2495,22 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4306 2495
4307 /* Error detected by uCode */ 2496 /* Error detected by uCode */
4308 if (inta & CSR_INT_BIT_SW_ERR) { 2497 if (inta & CSR_INT_BIT_SW_ERR) {
4309 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", 2498 IWL_ERR(priv, "Microcode SW error detected. "
4310 inta); 2499 "Restarting 0x%X.\n", inta);
4311 iwl3945_irq_handle_error(priv); 2500 iwl_irq_handle_error(priv);
4312 handled |= CSR_INT_BIT_SW_ERR; 2501 handled |= CSR_INT_BIT_SW_ERR;
4313 } 2502 }
4314 2503
4315 /* uCode wakes up after power-down sleep */ 2504 /* uCode wakes up after power-down sleep */
4316 if (inta & CSR_INT_BIT_WAKEUP) { 2505 if (inta & CSR_INT_BIT_WAKEUP) {
4317 IWL_DEBUG_ISR("Wakeup interrupt\n"); 2506 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
4318 iwl3945_rx_queue_update_write_ptr(priv, &priv->rxq); 2507 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4319 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[0]); 2508 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
4320 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[1]); 2509 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
4321 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[2]); 2510 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
4322 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[3]); 2511 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
4323 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[4]); 2512 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
4324 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[5]); 2513 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
4325 2514
4326 handled |= CSR_INT_BIT_WAKEUP; 2515 handled |= CSR_INT_BIT_WAKEUP;
4327 } 2516 }
@@ -4335,25 +2524,24 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4335 } 2524 }
4336 2525
4337 if (inta & CSR_INT_BIT_FH_TX) { 2526 if (inta & CSR_INT_BIT_FH_TX) {
4338 IWL_DEBUG_ISR("Tx interrupt\n"); 2527 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
4339 2528
4340 iwl3945_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); 2529 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4341 if (!iwl3945_grab_nic_access(priv)) { 2530 if (!iwl_grab_nic_access(priv)) {
4342 iwl3945_write_direct32(priv, 2531 iwl_write_direct32(priv, FH39_TCSR_CREDIT
4343 FH_TCSR_CREDIT 2532 (FH39_SRVC_CHNL), 0x0);
4344 (ALM_FH_SRVC_CHNL), 0x0); 2533 iwl_release_nic_access(priv);
4345 iwl3945_release_nic_access(priv);
4346 } 2534 }
4347 handled |= CSR_INT_BIT_FH_TX; 2535 handled |= CSR_INT_BIT_FH_TX;
4348 } 2536 }
4349 2537
4350 if (inta & ~handled) 2538 if (inta & ~handled)
4351 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 2539 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
4352 2540
4353 if (inta & ~CSR_INI_SET_MASK) { 2541 if (inta & ~CSR_INI_SET_MASK) {
4354 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", 2542 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
4355 inta & ~CSR_INI_SET_MASK); 2543 inta & ~CSR_INI_SET_MASK);
4356 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); 2544 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
4357 } 2545 }
4358 2546
4359 /* Re-enable all interrupts */ 2547 /* Re-enable all interrupts */
@@ -4361,12 +2549,12 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4361 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 2549 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4362 iwl3945_enable_interrupts(priv); 2550 iwl3945_enable_interrupts(priv);
4363 2551
4364#ifdef CONFIG_IWL3945_DEBUG 2552#ifdef CONFIG_IWLWIFI_DEBUG
4365 if (iwl3945_debug_level & (IWL_DL_ISR)) { 2553 if (priv->debug_level & (IWL_DL_ISR)) {
4366 inta = iwl3945_read32(priv, CSR_INT); 2554 inta = iwl_read32(priv, CSR_INT);
4367 inta_mask = iwl3945_read32(priv, CSR_INT_MASK); 2555 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4368 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS); 2556 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4369 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 2557 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4370 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 2558 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4371 } 2559 }
4372#endif 2560#endif
@@ -4375,7 +2563,7 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4375 2563
4376static irqreturn_t iwl3945_isr(int irq, void *data) 2564static irqreturn_t iwl3945_isr(int irq, void *data)
4377{ 2565{
4378 struct iwl3945_priv *priv = data; 2566 struct iwl_priv *priv = data;
4379 u32 inta, inta_mask; 2567 u32 inta, inta_mask;
4380 u32 inta_fh; 2568 u32 inta_fh;
4381 if (!priv) 2569 if (!priv)
@@ -4387,28 +2575,28 @@ static irqreturn_t iwl3945_isr(int irq, void *data)
4387 * back-to-back ISRs and sporadic interrupts from our NIC. 2575 * back-to-back ISRs and sporadic interrupts from our NIC.
4388 * If we have something to service, the tasklet will re-enable ints. 2576 * If we have something to service, the tasklet will re-enable ints.
4389 * If we *don't* have something, we'll re-enable before leaving here. */ 2577 * If we *don't* have something, we'll re-enable before leaving here. */
4390 inta_mask = iwl3945_read32(priv, CSR_INT_MASK); /* just for debug */ 2578 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4391 iwl3945_write32(priv, CSR_INT_MASK, 0x00000000); 2579 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4392 2580
4393 /* Discover which interrupts are active/pending */ 2581 /* Discover which interrupts are active/pending */
4394 inta = iwl3945_read32(priv, CSR_INT); 2582 inta = iwl_read32(priv, CSR_INT);
4395 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS); 2583 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4396 2584
4397 /* Ignore interrupt if there's nothing in NIC to service. 2585 /* Ignore interrupt if there's nothing in NIC to service.
4398 * This may be due to IRQ shared with another device, 2586 * This may be due to IRQ shared with another device,
4399 * or due to sporadic interrupts thrown from our NIC. */ 2587 * or due to sporadic interrupts thrown from our NIC. */
4400 if (!inta && !inta_fh) { 2588 if (!inta && !inta_fh) {
4401 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 2589 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
4402 goto none; 2590 goto none;
4403 } 2591 }
4404 2592
4405 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 2593 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4406 /* Hardware disappeared */ 2594 /* Hardware disappeared */
4407 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta); 2595 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
4408 goto unplugged; 2596 goto unplugged;
4409 } 2597 }
4410 2598
4411 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 2599 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4412 inta, inta_mask, inta_fh); 2600 inta, inta_mask, inta_fh);
4413 2601
4414 inta &= ~CSR_INT_BIT_SCD; 2602 inta &= ~CSR_INT_BIT_SCD;
@@ -4430,337 +2618,26 @@ unplugged:
4430 return IRQ_NONE; 2618 return IRQ_NONE;
4431} 2619}
4432 2620
4433/************************** EEPROM BANDS **************************** 2621static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
4434 *
4435 * The iwl3945_eeprom_band definitions below provide the mapping from the
4436 * EEPROM contents to the specific channel number supported for each
4437 * band.
4438 *
4439 * For example, iwl3945_priv->eeprom.band_3_channels[4] from the band_3
4440 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4441 * The specific geography and calibration information for that channel
4442 * is contained in the eeprom map itself.
4443 *
4444 * During init, we copy the eeprom information and channel map
4445 * information into priv->channel_info_24/52 and priv->channel_map_24/52
4446 *
4447 * channel_map_24/52 provides the index in the channel_info array for a
4448 * given channel. We have to have two separate maps as there is channel
4449 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4450 * band_2
4451 *
4452 * A value of 0xff stored in the channel_map indicates that the channel
4453 * is not supported by the hardware at all.
4454 *
4455 * A value of 0xfe in the channel_map indicates that the channel is not
4456 * valid for Tx with the current hardware. This means that
4457 * while the system can tune and receive on a given channel, it may not
4458 * be able to associate or transmit any frames on that
4459 * channel. There is no corresponding channel information for that
4460 * entry.
4461 *
4462 *********************************************************************/
4463
4464/* 2.4 GHz */
4465static const u8 iwl3945_eeprom_band_1[14] = {
4466 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4467};
4468
4469/* 5.2 GHz bands */
4470static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
4471 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4472};
4473
4474static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
4475 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4476};
4477
4478static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
4479 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4480};
4481
4482static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
4483 145, 149, 153, 157, 161, 165
4484};
4485
4486static void iwl3945_init_band_reference(const struct iwl3945_priv *priv, int band,
4487 int *eeprom_ch_count,
4488 const struct iwl3945_eeprom_channel
4489 **eeprom_ch_info,
4490 const u8 **eeprom_ch_index)
4491{
4492 switch (band) {
4493 case 1: /* 2.4GHz band */
4494 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
4495 *eeprom_ch_info = priv->eeprom.band_1_channels;
4496 *eeprom_ch_index = iwl3945_eeprom_band_1;
4497 break;
4498 case 2: /* 4.9GHz band */
4499 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
4500 *eeprom_ch_info = priv->eeprom.band_2_channels;
4501 *eeprom_ch_index = iwl3945_eeprom_band_2;
4502 break;
4503 case 3: /* 5.2GHz band */
4504 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
4505 *eeprom_ch_info = priv->eeprom.band_3_channels;
4506 *eeprom_ch_index = iwl3945_eeprom_band_3;
4507 break;
4508 case 4: /* 5.5GHz band */
4509 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
4510 *eeprom_ch_info = priv->eeprom.band_4_channels;
4511 *eeprom_ch_index = iwl3945_eeprom_band_4;
4512 break;
4513 case 5: /* 5.7GHz band */
4514 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
4515 *eeprom_ch_info = priv->eeprom.band_5_channels;
4516 *eeprom_ch_index = iwl3945_eeprom_band_5;
4517 break;
4518 default:
4519 BUG();
4520 return;
4521 }
4522}
4523
4524/**
4525 * iwl3945_get_channel_info - Find driver's private channel info
4526 *
4527 * Based on band and channel number.
4528 */
4529const struct iwl3945_channel_info *iwl3945_get_channel_info(const struct iwl3945_priv *priv,
4530 enum ieee80211_band band, u16 channel)
4531{
4532 int i;
4533
4534 switch (band) {
4535 case IEEE80211_BAND_5GHZ:
4536 for (i = 14; i < priv->channel_count; i++) {
4537 if (priv->channel_info[i].channel == channel)
4538 return &priv->channel_info[i];
4539 }
4540 break;
4541
4542 case IEEE80211_BAND_2GHZ:
4543 if (channel >= 1 && channel <= 14)
4544 return &priv->channel_info[channel - 1];
4545 break;
4546 case IEEE80211_NUM_BANDS:
4547 WARN_ON(1);
4548 }
4549
4550 return NULL;
4551}
4552
4553#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4554 ? # x " " : "")
4555
4556/**
4557 * iwl3945_init_channel_map - Set up driver's info for all possible channels
4558 */
4559static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4560{
4561 int eeprom_ch_count = 0;
4562 const u8 *eeprom_ch_index = NULL;
4563 const struct iwl3945_eeprom_channel *eeprom_ch_info = NULL;
4564 int band, ch;
4565 struct iwl3945_channel_info *ch_info;
4566
4567 if (priv->channel_count) {
4568 IWL_DEBUG_INFO("Channel map already initialized.\n");
4569 return 0;
4570 }
4571
4572 if (priv->eeprom.version < 0x2f) {
4573 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
4574 priv->eeprom.version);
4575 return -EINVAL;
4576 }
4577
4578 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
4579
4580 priv->channel_count =
4581 ARRAY_SIZE(iwl3945_eeprom_band_1) +
4582 ARRAY_SIZE(iwl3945_eeprom_band_2) +
4583 ARRAY_SIZE(iwl3945_eeprom_band_3) +
4584 ARRAY_SIZE(iwl3945_eeprom_band_4) +
4585 ARRAY_SIZE(iwl3945_eeprom_band_5);
4586
4587 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
4588
4589 priv->channel_info = kzalloc(sizeof(struct iwl3945_channel_info) *
4590 priv->channel_count, GFP_KERNEL);
4591 if (!priv->channel_info) {
4592 IWL_ERROR("Could not allocate channel_info\n");
4593 priv->channel_count = 0;
4594 return -ENOMEM;
4595 }
4596
4597 ch_info = priv->channel_info;
4598
4599 /* Loop through the 5 EEPROM bands adding them in order to the
4600 * channel map we maintain (that contains additional information than
4601 * what just in the EEPROM) */
4602 for (band = 1; band <= 5; band++) {
4603
4604 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
4605 &eeprom_ch_info, &eeprom_ch_index);
4606
4607 /* Loop through each band adding each of the channels */
4608 for (ch = 0; ch < eeprom_ch_count; ch++) {
4609 ch_info->channel = eeprom_ch_index[ch];
4610 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
4611 IEEE80211_BAND_5GHZ;
4612
4613 /* permanently store EEPROM's channel regulatory flags
4614 * and max power in channel info database. */
4615 ch_info->eeprom = eeprom_ch_info[ch];
4616
4617 /* Copy the run-time flags so they are there even on
4618 * invalid channels */
4619 ch_info->flags = eeprom_ch_info[ch].flags;
4620
4621 if (!(is_channel_valid(ch_info))) {
4622 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
4623 "No traffic\n",
4624 ch_info->channel,
4625 ch_info->flags,
4626 is_channel_a_band(ch_info) ?
4627 "5.2" : "2.4");
4628 ch_info++;
4629 continue;
4630 }
4631
4632 /* Initialize regulatory-based run-time data */
4633 ch_info->max_power_avg = ch_info->curr_txpow =
4634 eeprom_ch_info[ch].max_power_avg;
4635 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4636 ch_info->min_power = 0;
4637
4638 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
4639 " %ddBm): Ad-Hoc %ssupported\n",
4640 ch_info->channel,
4641 is_channel_a_band(ch_info) ?
4642 "5.2" : "2.4",
4643 CHECK_AND_PRINT(VALID),
4644 CHECK_AND_PRINT(IBSS),
4645 CHECK_AND_PRINT(ACTIVE),
4646 CHECK_AND_PRINT(RADAR),
4647 CHECK_AND_PRINT(WIDE),
4648 CHECK_AND_PRINT(DFS),
4649 eeprom_ch_info[ch].flags,
4650 eeprom_ch_info[ch].max_power_avg,
4651 ((eeprom_ch_info[ch].
4652 flags & EEPROM_CHANNEL_IBSS)
4653 && !(eeprom_ch_info[ch].
4654 flags & EEPROM_CHANNEL_RADAR))
4655 ? "" : "not ");
4656
4657 /* Set the user_txpower_limit to the highest power
4658 * supported by any channel */
4659 if (eeprom_ch_info[ch].max_power_avg >
4660 priv->user_txpower_limit)
4661 priv->user_txpower_limit =
4662 eeprom_ch_info[ch].max_power_avg;
4663
4664 ch_info++;
4665 }
4666 }
4667
4668 /* Set up txpower settings in driver for all channels */
4669 if (iwl3945_txpower_set_from_eeprom(priv))
4670 return -EIO;
4671
4672 return 0;
4673}
4674
4675/*
4676 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
4677 */
4678static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4679{
4680 kfree(priv->channel_info);
4681 priv->channel_count = 0;
4682}
4683
4684/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4685 * sending probe req. This should be set long enough to hear probe responses
4686 * from more than one AP. */
4687#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
4688#define IWL_ACTIVE_DWELL_TIME_52 (20)
4689
4690#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4691#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
4692
4693/* For faster active scanning, scan will move to the next channel if fewer than
4694 * PLCP_QUIET_THRESH packets are heard on this channel within
4695 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
4696 * time if it's a quiet channel (nothing responded to our probe, and there's
4697 * no other traffic).
4698 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4699#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
4700#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
4701
4702/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4703 * Must be set longer than active dwell time.
4704 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4705#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
4706#define IWL_PASSIVE_DWELL_TIME_52 (10)
4707#define IWL_PASSIVE_DWELL_BASE (100)
4708#define IWL_CHANNEL_TUNE_TIME 5
4709
4710#define IWL_SCAN_PROBE_MASK(n) (BIT(n) | (BIT(n) - BIT(1)))
4711
4712static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
4713 enum ieee80211_band band,
4714 u8 n_probes)
4715{
4716 if (band == IEEE80211_BAND_5GHZ)
4717 return IWL_ACTIVE_DWELL_TIME_52 +
4718 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
4719 else
4720 return IWL_ACTIVE_DWELL_TIME_24 +
4721 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
4722}
4723
4724static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4725 enum ieee80211_band band)
4726{
4727 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
4728 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4729 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4730
4731 if (iwl3945_is_associated(priv)) {
4732 /* If we're associated, we clamp the maximum passive
4733 * dwell time to be 98% of the beacon interval (minus
4734 * 2 * channel tune time) */
4735 passive = priv->beacon_int;
4736 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4737 passive = IWL_PASSIVE_DWELL_BASE;
4738 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4739 }
4740
4741 return passive;
4742}
4743
4744static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4745 enum ieee80211_band band, 2622 enum ieee80211_band band,
4746 u8 is_active, u8 n_probes, 2623 u8 is_active, u8 n_probes,
4747 struct iwl3945_scan_channel *scan_ch) 2624 struct iwl3945_scan_channel *scan_ch)
4748{ 2625{
4749 const struct ieee80211_channel *channels = NULL; 2626 const struct ieee80211_channel *channels = NULL;
4750 const struct ieee80211_supported_band *sband; 2627 const struct ieee80211_supported_band *sband;
4751 const struct iwl3945_channel_info *ch_info; 2628 const struct iwl_channel_info *ch_info;
4752 u16 passive_dwell = 0; 2629 u16 passive_dwell = 0;
4753 u16 active_dwell = 0; 2630 u16 active_dwell = 0;
4754 int added, i; 2631 int added, i;
4755 2632
4756 sband = iwl3945_get_band(priv, band); 2633 sband = iwl_get_hw_mode(priv, band);
4757 if (!sband) 2634 if (!sband)
4758 return 0; 2635 return 0;
4759 2636
4760 channels = sband->channels; 2637 channels = sband->channels;
4761 2638
4762 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes); 2639 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
4763 passive_dwell = iwl3945_get_passive_dwell_time(priv, band); 2640 passive_dwell = iwl_get_passive_dwell_time(priv, band);
4764 2641
4765 if (passive_dwell <= active_dwell) 2642 if (passive_dwell <= active_dwell)
4766 passive_dwell = active_dwell + 1; 2643 passive_dwell = active_dwell + 1;
@@ -4771,9 +2648,9 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4771 2648
4772 scan_ch->channel = channels[i].hw_value; 2649 scan_ch->channel = channels[i].hw_value;
4773 2650
4774 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel); 2651 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
4775 if (!is_channel_valid(ch_info)) { 2652 if (!is_channel_valid(ch_info)) {
4776 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", 2653 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
4777 scan_ch->channel); 2654 scan_ch->channel);
4778 continue; 2655 continue;
4779 } 2656 }
@@ -4798,12 +2675,12 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4798 * hearing clear Rx packet).*/ 2675 * hearing clear Rx packet).*/
4799 if (IWL_UCODE_API(priv->ucode_ver) >= 2) { 2676 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4800 if (n_probes) 2677 if (n_probes)
4801 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 2678 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
4802 } else { 2679 } else {
4803 /* uCode v1 does not allow setting direct probe bits on 2680 /* uCode v1 does not allow setting direct probe bits on
4804 * passive channel. */ 2681 * passive channel. */
4805 if ((scan_ch->type & 1) && n_probes) 2682 if ((scan_ch->type & 1) && n_probes)
4806 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 2683 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
4807 } 2684 }
4808 2685
4809 /* Set txpower levels to defaults */ 2686 /* Set txpower levels to defaults */
@@ -4821,7 +2698,7 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4821 */ 2698 */
4822 } 2699 }
4823 2700
4824 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", 2701 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
4825 scan_ch->channel, 2702 scan_ch->channel,
4826 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", 2703 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4827 (scan_ch->type & 1) ? 2704 (scan_ch->type & 1) ?
@@ -4831,11 +2708,11 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4831 added++; 2708 added++;
4832 } 2709 }
4833 2710
4834 IWL_DEBUG_SCAN("total channels to scan %d \n", added); 2711 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
4835 return added; 2712 return added;
4836} 2713}
4837 2714
4838static void iwl3945_init_hw_rates(struct iwl3945_priv *priv, 2715static void iwl3945_init_hw_rates(struct iwl_priv *priv,
4839 struct ieee80211_rate *rates) 2716 struct ieee80211_rate *rates)
4840{ 2717{
4841 int i; 2718 int i;
@@ -4845,7 +2722,7 @@ static void iwl3945_init_hw_rates(struct iwl3945_priv *priv,
4845 rates[i].hw_value = i; /* Rate scaling will work on indexes */ 2722 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4846 rates[i].hw_value_short = i; 2723 rates[i].hw_value_short = i;
4847 rates[i].flags = 0; 2724 rates[i].flags = 0;
4848 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { 2725 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
4849 /* 2726 /*
4850 * If CCK != 1M then set short preamble rate flag. 2727 * If CCK != 1M then set short preamble rate flag.
4851 */ 2728 */
@@ -4855,145 +2732,13 @@ static void iwl3945_init_hw_rates(struct iwl3945_priv *priv,
4855 } 2732 }
4856} 2733}
4857 2734
4858/**
4859 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
4860 */
4861static int iwl3945_init_geos(struct iwl3945_priv *priv)
4862{
4863 struct iwl3945_channel_info *ch;
4864 struct ieee80211_supported_band *sband;
4865 struct ieee80211_channel *channels;
4866 struct ieee80211_channel *geo_ch;
4867 struct ieee80211_rate *rates;
4868 int i = 0;
4869
4870 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4871 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
4872 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4873 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4874 return 0;
4875 }
4876
4877 channels = kzalloc(sizeof(struct ieee80211_channel) *
4878 priv->channel_count, GFP_KERNEL);
4879 if (!channels)
4880 return -ENOMEM;
4881
4882 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
4883 GFP_KERNEL);
4884 if (!rates) {
4885 kfree(channels);
4886 return -ENOMEM;
4887 }
4888
4889 /* 5.2GHz channels start after the 2.4GHz channels */
4890 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4891 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
4892 /* just OFDM */
4893 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4894 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4895
4896 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4897 sband->channels = channels;
4898 /* OFDM & CCK */
4899 sband->bitrates = rates;
4900 sband->n_bitrates = IWL_RATE_COUNT;
4901
4902 priv->ieee_channels = channels;
4903 priv->ieee_rates = rates;
4904
4905 iwl3945_init_hw_rates(priv, rates);
4906
4907 for (i = 0; i < priv->channel_count; i++) {
4908 ch = &priv->channel_info[i];
4909
4910 /* FIXME: might be removed if scan is OK*/
4911 if (!is_channel_valid(ch))
4912 continue;
4913
4914 if (is_channel_a_band(ch))
4915 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4916 else
4917 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4918
4919 geo_ch = &sband->channels[sband->n_channels++];
4920
4921 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
4922 geo_ch->max_power = ch->max_power_avg;
4923 geo_ch->max_antenna_gain = 0xff;
4924 geo_ch->hw_value = ch->channel;
4925
4926 if (is_channel_valid(ch)) {
4927 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4928 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
4929
4930 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4931 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
4932
4933 if (ch->flags & EEPROM_CHANNEL_RADAR)
4934 geo_ch->flags |= IEEE80211_CHAN_RADAR;
4935
4936 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4937 priv->max_channel_txpower_limit =
4938 ch->max_power_avg;
4939 } else {
4940 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4941 }
4942
4943 /* Save flags for reg domain usage */
4944 geo_ch->orig_flags = geo_ch->flags;
4945
4946 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4947 ch->channel, geo_ch->center_freq,
4948 is_channel_a_band(ch) ? "5.2" : "2.4",
4949 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4950 "restricted" : "valid",
4951 geo_ch->flags);
4952 }
4953
4954 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4955 priv->cfg->sku & IWL_SKU_A) {
4956 printk(KERN_INFO DRV_NAME
4957 ": Incorrectly detected BG card as ABG. Please send "
4958 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4959 priv->pci_dev->device, priv->pci_dev->subsystem_device);
4960 priv->cfg->sku &= ~IWL_SKU_A;
4961 }
4962
4963 printk(KERN_INFO DRV_NAME
4964 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4965 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4966 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4967
4968 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4969 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4970 &priv->bands[IEEE80211_BAND_2GHZ];
4971 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4972 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4973 &priv->bands[IEEE80211_BAND_5GHZ];
4974
4975 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4976
4977 return 0;
4978}
4979
4980/*
4981 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4982 */
4983static void iwl3945_free_geos(struct iwl3945_priv *priv)
4984{
4985 kfree(priv->ieee_channels);
4986 kfree(priv->ieee_rates);
4987 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4988}
4989
4990/****************************************************************************** 2735/******************************************************************************
4991 * 2736 *
4992 * uCode download functions 2737 * uCode download functions
4993 * 2738 *
4994 ******************************************************************************/ 2739 ******************************************************************************/
4995 2740
4996static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv) 2741static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
4997{ 2742{
4998 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 2743 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4999 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 2744 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
@@ -5007,29 +2752,30 @@ static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv)
5007 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, 2752 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
5008 * looking at all data. 2753 * looking at all data.
5009 */ 2754 */
5010static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u32 len) 2755static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
5011{ 2756{
5012 u32 val; 2757 u32 val;
5013 u32 save_len = len; 2758 u32 save_len = len;
5014 int rc = 0; 2759 int rc = 0;
5015 u32 errcnt; 2760 u32 errcnt;
5016 2761
5017 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 2762 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
5018 2763
5019 rc = iwl3945_grab_nic_access(priv); 2764 rc = iwl_grab_nic_access(priv);
5020 if (rc) 2765 if (rc)
5021 return rc; 2766 return rc;
5022 2767
5023 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); 2768 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2769 IWL39_RTC_INST_LOWER_BOUND);
5024 2770
5025 errcnt = 0; 2771 errcnt = 0;
5026 for (; len > 0; len -= sizeof(u32), image++) { 2772 for (; len > 0; len -= sizeof(u32), image++) {
5027 /* read data comes through single port, auto-incr addr */ 2773 /* read data comes through single port, auto-incr addr */
5028 /* NOTE: Use the debugless read so we don't flood kernel log 2774 /* NOTE: Use the debugless read so we don't flood kernel log
5029 * if IWL_DL_IO is set */ 2775 * if IWL_DL_IO is set */
5030 val = _iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2776 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
5031 if (val != le32_to_cpu(*image)) { 2777 if (val != le32_to_cpu(*image)) {
5032 IWL_ERROR("uCode INST section is invalid at " 2778 IWL_ERR(priv, "uCode INST section is invalid at "
5033 "offset 0x%x, is 0x%x, s/b 0x%x\n", 2779 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5034 save_len - len, val, le32_to_cpu(*image)); 2780 save_len - len, val, le32_to_cpu(*image));
5035 rc = -EIO; 2781 rc = -EIO;
@@ -5039,10 +2785,11 @@ static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u3
5039 } 2785 }
5040 } 2786 }
5041 2787
5042 iwl3945_release_nic_access(priv); 2788 iwl_release_nic_access(priv);
5043 2789
5044 if (!errcnt) 2790 if (!errcnt)
5045 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n"); 2791 IWL_DEBUG_INFO(priv,
2792 "ucode image in INSTRUCTION memory is good\n");
5046 2793
5047 return rc; 2794 return rc;
5048} 2795}
@@ -5053,16 +2800,16 @@ static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u3
5053 * using sample data 100 bytes apart. If these sample points are good, 2800 * using sample data 100 bytes apart. If these sample points are good,
5054 * it's a pretty good bet that everything between them is good, too. 2801 * it's a pretty good bet that everything between them is good, too.
5055 */ 2802 */
5056static int iwl3945_verify_inst_sparse(struct iwl3945_priv *priv, __le32 *image, u32 len) 2803static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5057{ 2804{
5058 u32 val; 2805 u32 val;
5059 int rc = 0; 2806 int rc = 0;
5060 u32 errcnt = 0; 2807 u32 errcnt = 0;
5061 u32 i; 2808 u32 i;
5062 2809
5063 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 2810 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
5064 2811
5065 rc = iwl3945_grab_nic_access(priv); 2812 rc = iwl_grab_nic_access(priv);
5066 if (rc) 2813 if (rc)
5067 return rc; 2814 return rc;
5068 2815
@@ -5070,12 +2817,12 @@ static int iwl3945_verify_inst_sparse(struct iwl3945_priv *priv, __le32 *image,
5070 /* read data comes through single port, auto-incr addr */ 2817 /* read data comes through single port, auto-incr addr */
5071 /* NOTE: Use the debugless read so we don't flood kernel log 2818 /* NOTE: Use the debugless read so we don't flood kernel log
5072 * if IWL_DL_IO is set */ 2819 * if IWL_DL_IO is set */
5073 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, 2820 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
5074 i + RTC_INST_LOWER_BOUND); 2821 i + IWL39_RTC_INST_LOWER_BOUND);
5075 val = _iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2822 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
5076 if (val != le32_to_cpu(*image)) { 2823 if (val != le32_to_cpu(*image)) {
5077#if 0 /* Enable this if you want to see details */ 2824#if 0 /* Enable this if you want to see details */
5078 IWL_ERROR("uCode INST section is invalid at " 2825 IWL_ERR(priv, "uCode INST section is invalid at "
5079 "offset 0x%x, is 0x%x, s/b 0x%x\n", 2826 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5080 i, val, *image); 2827 i, val, *image);
5081#endif 2828#endif
@@ -5086,7 +2833,7 @@ static int iwl3945_verify_inst_sparse(struct iwl3945_priv *priv, __le32 *image,
5086 } 2833 }
5087 } 2834 }
5088 2835
5089 iwl3945_release_nic_access(priv); 2836 iwl_release_nic_access(priv);
5090 2837
5091 return rc; 2838 return rc;
5092} 2839}
@@ -5096,7 +2843,7 @@ static int iwl3945_verify_inst_sparse(struct iwl3945_priv *priv, __le32 *image,
5096 * iwl3945_verify_ucode - determine which instruction image is in SRAM, 2843 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
5097 * and verify its contents 2844 * and verify its contents
5098 */ 2845 */
5099static int iwl3945_verify_ucode(struct iwl3945_priv *priv) 2846static int iwl3945_verify_ucode(struct iwl_priv *priv)
5100{ 2847{
5101 __le32 *image; 2848 __le32 *image;
5102 u32 len; 2849 u32 len;
@@ -5107,7 +2854,7 @@ static int iwl3945_verify_ucode(struct iwl3945_priv *priv)
5107 len = priv->ucode_boot.len; 2854 len = priv->ucode_boot.len;
5108 rc = iwl3945_verify_inst_sparse(priv, image, len); 2855 rc = iwl3945_verify_inst_sparse(priv, image, len);
5109 if (rc == 0) { 2856 if (rc == 0) {
5110 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); 2857 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
5111 return 0; 2858 return 0;
5112 } 2859 }
5113 2860
@@ -5116,7 +2863,7 @@ static int iwl3945_verify_ucode(struct iwl3945_priv *priv)
5116 len = priv->ucode_init.len; 2863 len = priv->ucode_init.len;
5117 rc = iwl3945_verify_inst_sparse(priv, image, len); 2864 rc = iwl3945_verify_inst_sparse(priv, image, len);
5118 if (rc == 0) { 2865 if (rc == 0) {
5119 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); 2866 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
5120 return 0; 2867 return 0;
5121 } 2868 }
5122 2869
@@ -5125,11 +2872,11 @@ static int iwl3945_verify_ucode(struct iwl3945_priv *priv)
5125 len = priv->ucode_code.len; 2872 len = priv->ucode_code.len;
5126 rc = iwl3945_verify_inst_sparse(priv, image, len); 2873 rc = iwl3945_verify_inst_sparse(priv, image, len);
5127 if (rc == 0) { 2874 if (rc == 0) {
5128 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); 2875 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
5129 return 0; 2876 return 0;
5130 } 2877 }
5131 2878
5132 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 2879 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5133 2880
5134 /* Since nothing seems to match, show first several data entries in 2881 /* Since nothing seems to match, show first several data entries in
5135 * instruction SRAM, so maybe visual inspection will give a clue. 2882 * instruction SRAM, so maybe visual inspection will give a clue.
@@ -5141,160 +2888,10 @@ static int iwl3945_verify_ucode(struct iwl3945_priv *priv)
5141 return rc; 2888 return rc;
5142} 2889}
5143 2890
5144 2891static void iwl3945_nic_start(struct iwl_priv *priv)
5145/* check contents of special bootstrap uCode SRAM */
5146static int iwl3945_verify_bsm(struct iwl3945_priv *priv)
5147{
5148 __le32 *image = priv->ucode_boot.v_addr;
5149 u32 len = priv->ucode_boot.len;
5150 u32 reg;
5151 u32 val;
5152
5153 IWL_DEBUG_INFO("Begin verify bsm\n");
5154
5155 /* verify BSM SRAM contents */
5156 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG);
5157 for (reg = BSM_SRAM_LOWER_BOUND;
5158 reg < BSM_SRAM_LOWER_BOUND + len;
5159 reg += sizeof(u32), image++) {
5160 val = iwl3945_read_prph(priv, reg);
5161 if (val != le32_to_cpu(*image)) {
5162 IWL_ERROR("BSM uCode verification failed at "
5163 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5164 BSM_SRAM_LOWER_BOUND,
5165 reg - BSM_SRAM_LOWER_BOUND, len,
5166 val, le32_to_cpu(*image));
5167 return -EIO;
5168 }
5169 }
5170
5171 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5172
5173 return 0;
5174}
5175
5176/**
5177 * iwl3945_load_bsm - Load bootstrap instructions
5178 *
5179 * BSM operation:
5180 *
5181 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5182 * in special SRAM that does not power down during RFKILL. When powering back
5183 * up after power-saving sleeps (or during initial uCode load), the BSM loads
5184 * the bootstrap program into the on-board processor, and starts it.
5185 *
5186 * The bootstrap program loads (via DMA) instructions and data for a new
5187 * program from host DRAM locations indicated by the host driver in the
5188 * BSM_DRAM_* registers. Once the new program is loaded, it starts
5189 * automatically.
5190 *
5191 * When initializing the NIC, the host driver points the BSM to the
5192 * "initialize" uCode image. This uCode sets up some internal data, then
5193 * notifies host via "initialize alive" that it is complete.
5194 *
5195 * The host then replaces the BSM_DRAM_* pointer values to point to the
5196 * normal runtime uCode instructions and a backup uCode data cache buffer
5197 * (filled initially with starting data values for the on-board processor),
5198 * then triggers the "initialize" uCode to load and launch the runtime uCode,
5199 * which begins normal operation.
5200 *
5201 * When doing a power-save shutdown, runtime uCode saves data SRAM into
5202 * the backup data cache in DRAM before SRAM is powered down.
5203 *
5204 * When powering back up, the BSM loads the bootstrap program. This reloads
5205 * the runtime uCode instructions and the backup data cache into SRAM,
5206 * and re-launches the runtime uCode from where it left off.
5207 */
5208static int iwl3945_load_bsm(struct iwl3945_priv *priv)
5209{
5210 __le32 *image = priv->ucode_boot.v_addr;
5211 u32 len = priv->ucode_boot.len;
5212 dma_addr_t pinst;
5213 dma_addr_t pdata;
5214 u32 inst_len;
5215 u32 data_len;
5216 int rc;
5217 int i;
5218 u32 done;
5219 u32 reg_offset;
5220
5221 IWL_DEBUG_INFO("Begin load bsm\n");
5222
5223 /* make sure bootstrap program is no larger than BSM's SRAM size */
5224 if (len > IWL_MAX_BSM_SIZE)
5225 return -EINVAL;
5226
5227 /* Tell bootstrap uCode where to find the "Initialize" uCode
5228 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
5229 * NOTE: iwl3945_initialize_alive_start() will replace these values,
5230 * after the "initialize" uCode has run, to point to
5231 * runtime/protocol instructions and backup data cache. */
5232 pinst = priv->ucode_init.p_addr;
5233 pdata = priv->ucode_init_data.p_addr;
5234 inst_len = priv->ucode_init.len;
5235 data_len = priv->ucode_init_data.len;
5236
5237 rc = iwl3945_grab_nic_access(priv);
5238 if (rc)
5239 return rc;
5240
5241 iwl3945_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5242 iwl3945_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5243 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5244 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
5245
5246 /* Fill BSM memory with bootstrap instructions */
5247 for (reg_offset = BSM_SRAM_LOWER_BOUND;
5248 reg_offset < BSM_SRAM_LOWER_BOUND + len;
5249 reg_offset += sizeof(u32), image++)
5250 _iwl3945_write_prph(priv, reg_offset,
5251 le32_to_cpu(*image));
5252
5253 rc = iwl3945_verify_bsm(priv);
5254 if (rc) {
5255 iwl3945_release_nic_access(priv);
5256 return rc;
5257 }
5258
5259 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
5260 iwl3945_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
5261 iwl3945_write_prph(priv, BSM_WR_MEM_DST_REG,
5262 RTC_INST_LOWER_BOUND);
5263 iwl3945_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
5264
5265 /* Load bootstrap code into instruction SRAM now,
5266 * to prepare to load "initialize" uCode */
5267 iwl3945_write_prph(priv, BSM_WR_CTRL_REG,
5268 BSM_WR_CTRL_REG_BIT_START);
5269
5270 /* Wait for load of bootstrap uCode to finish */
5271 for (i = 0; i < 100; i++) {
5272 done = iwl3945_read_prph(priv, BSM_WR_CTRL_REG);
5273 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5274 break;
5275 udelay(10);
5276 }
5277 if (i < 100)
5278 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5279 else {
5280 IWL_ERROR("BSM write did not complete!\n");
5281 return -EIO;
5282 }
5283
5284 /* Enable future boot loads whenever power management unit triggers it
5285 * (e.g. when powering back up after power-save shutdown) */
5286 iwl3945_write_prph(priv, BSM_WR_CTRL_REG,
5287 BSM_WR_CTRL_REG_BIT_START_EN);
5288
5289 iwl3945_release_nic_access(priv);
5290
5291 return 0;
5292}
5293
5294static void iwl3945_nic_start(struct iwl3945_priv *priv)
5295{ 2892{
5296 /* Remove all resets to allow NIC to operate */ 2893 /* Remove all resets to allow NIC to operate */
5297 iwl3945_write32(priv, CSR_RESET, 0); 2894 iwl_write32(priv, CSR_RESET, 0);
5298} 2895}
5299 2896
5300/** 2897/**
@@ -5302,9 +2899,9 @@ static void iwl3945_nic_start(struct iwl3945_priv *priv)
5302 * 2899 *
5303 * Copy into buffers for card to fetch via bus-mastering 2900 * Copy into buffers for card to fetch via bus-mastering
5304 */ 2901 */
5305static int iwl3945_read_ucode(struct iwl3945_priv *priv) 2902static int iwl3945_read_ucode(struct iwl_priv *priv)
5306{ 2903{
5307 struct iwl3945_ucode *ucode; 2904 struct iwl_ucode *ucode;
5308 int ret = -EINVAL, index; 2905 int ret = -EINVAL, index;
5309 const struct firmware *ucode_raw; 2906 const struct firmware *ucode_raw;
5310 /* firmware file name contains uCode/driver compatibility version */ 2907 /* firmware file name contains uCode/driver compatibility version */
@@ -5322,7 +2919,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5322 sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); 2919 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
5323 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); 2920 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
5324 if (ret < 0) { 2921 if (ret < 0) {
5325 IWL_ERROR("%s firmware file req failed: Reason %d\n", 2922 IWL_ERR(priv, "%s firmware file req failed: %d\n",
5326 buf, ret); 2923 buf, ret);
5327 if (ret == -ENOENT) 2924 if (ret == -ENOENT)
5328 continue; 2925 continue;
@@ -5330,9 +2927,12 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5330 goto error; 2927 goto error;
5331 } else { 2928 } else {
5332 if (index < api_max) 2929 if (index < api_max)
5333 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n", 2930 IWL_ERR(priv, "Loaded firmware %s, "
2931 "which is deprecated. "
2932 " Please use API v%u instead.\n",
5334 buf, api_max); 2933 buf, api_max);
5335 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", 2934 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
2935 "(%zd bytes) from disk\n",
5336 buf, ucode_raw->size); 2936 buf, ucode_raw->size);
5337 break; 2937 break;
5338 } 2938 }
@@ -5343,7 +2943,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5343 2943
5344 /* Make sure that we got at least our header! */ 2944 /* Make sure that we got at least our header! */
5345 if (ucode_raw->size < sizeof(*ucode)) { 2945 if (ucode_raw->size < sizeof(*ucode)) {
5346 IWL_ERROR("File size way too small!\n"); 2946 IWL_ERR(priv, "File size way too small!\n");
5347 ret = -EINVAL; 2947 ret = -EINVAL;
5348 goto err_release; 2948 goto err_release;
5349 } 2949 }
@@ -5364,7 +2964,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5364 * on the API version read from firware header from here on forward */ 2964 * on the API version read from firware header from here on forward */
5365 2965
5366 if (api_ver < api_min || api_ver > api_max) { 2966 if (api_ver < api_min || api_ver > api_max) {
5367 IWL_ERROR("Driver unable to support your firmware API. " 2967 IWL_ERR(priv, "Driver unable to support your firmware API. "
5368 "Driver supports v%u, firmware is v%u.\n", 2968 "Driver supports v%u, firmware is v%u.\n",
5369 api_max, api_ver); 2969 api_max, api_ver);
5370 priv->ucode_ver = 0; 2970 priv->ucode_ver = 0;
@@ -5372,23 +2972,29 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5372 goto err_release; 2972 goto err_release;
5373 } 2973 }
5374 if (api_ver != api_max) 2974 if (api_ver != api_max)
5375 IWL_ERROR("Firmware has old API version. Expected %u, " 2975 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
5376 "got %u. New firmware can be obtained " 2976 "got %u. New firmware can be obtained "
5377 "from http://www.intellinuxwireless.org.\n", 2977 "from http://www.intellinuxwireless.org.\n",
5378 api_max, api_ver); 2978 api_max, api_ver);
5379 2979
5380 printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n", 2980 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
5381 IWL_UCODE_MAJOR(priv->ucode_ver), 2981 IWL_UCODE_MAJOR(priv->ucode_ver),
5382 IWL_UCODE_MINOR(priv->ucode_ver), 2982 IWL_UCODE_MINOR(priv->ucode_ver),
5383 IWL_UCODE_API(priv->ucode_ver), 2983 IWL_UCODE_API(priv->ucode_ver),
5384 IWL_UCODE_SERIAL(priv->ucode_ver)); 2984 IWL_UCODE_SERIAL(priv->ucode_ver));
5385 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n", 2985
2986 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
5386 priv->ucode_ver); 2987 priv->ucode_ver);
5387 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size); 2988 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
5388 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size); 2989 inst_size);
5389 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size); 2990 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
5390 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size); 2991 data_size);
5391 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size); 2992 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
2993 init_size);
2994 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
2995 init_data_size);
2996 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
2997 boot_size);
5392 2998
5393 2999
5394 /* Verify size of file vs. image size info in file's header */ 3000 /* Verify size of file vs. image size info in file's header */
@@ -5396,40 +3002,43 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5396 inst_size + data_size + init_size + 3002 inst_size + data_size + init_size +
5397 init_data_size + boot_size) { 3003 init_data_size + boot_size) {
5398 3004
5399 IWL_DEBUG_INFO("uCode file size %d too small\n", 3005 IWL_DEBUG_INFO(priv, "uCode file size %zd too small\n",
5400 (int)ucode_raw->size); 3006 ucode_raw->size);
5401 ret = -EINVAL; 3007 ret = -EINVAL;
5402 goto err_release; 3008 goto err_release;
5403 } 3009 }
5404 3010
5405 /* Verify that uCode images will fit in card's SRAM */ 3011 /* Verify that uCode images will fit in card's SRAM */
5406 if (inst_size > IWL_MAX_INST_SIZE) { 3012 if (inst_size > IWL39_MAX_INST_SIZE) {
5407 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n", 3013 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
5408 inst_size); 3014 inst_size);
5409 ret = -EINVAL; 3015 ret = -EINVAL;
5410 goto err_release; 3016 goto err_release;
5411 } 3017 }
5412 3018
5413 if (data_size > IWL_MAX_DATA_SIZE) { 3019 if (data_size > IWL39_MAX_DATA_SIZE) {
5414 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n", 3020 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
5415 data_size); 3021 data_size);
5416 ret = -EINVAL; 3022 ret = -EINVAL;
5417 goto err_release; 3023 goto err_release;
5418 } 3024 }
5419 if (init_size > IWL_MAX_INST_SIZE) { 3025 if (init_size > IWL39_MAX_INST_SIZE) {
5420 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n", 3026 IWL_DEBUG_INFO(priv,
3027 "uCode init instr len %d too large to fit in\n",
5421 init_size); 3028 init_size);
5422 ret = -EINVAL; 3029 ret = -EINVAL;
5423 goto err_release; 3030 goto err_release;
5424 } 3031 }
5425 if (init_data_size > IWL_MAX_DATA_SIZE) { 3032 if (init_data_size > IWL39_MAX_DATA_SIZE) {
5426 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n", 3033 IWL_DEBUG_INFO(priv,
3034 "uCode init data len %d too large to fit in\n",
5427 init_data_size); 3035 init_data_size);
5428 ret = -EINVAL; 3036 ret = -EINVAL;
5429 goto err_release; 3037 goto err_release;
5430 } 3038 }
5431 if (boot_size > IWL_MAX_BSM_SIZE) { 3039 if (boot_size > IWL39_MAX_BSM_SIZE) {
5432 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n", 3040 IWL_DEBUG_INFO(priv,
3041 "uCode boot instr len %d too large to fit in\n",
5433 boot_size); 3042 boot_size);
5434 ret = -EINVAL; 3043 ret = -EINVAL;
5435 goto err_release; 3044 goto err_release;
@@ -5479,16 +3088,18 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5479 /* Runtime instructions (first block of data in file) */ 3088 /* Runtime instructions (first block of data in file) */
5480 src = &ucode->data[0]; 3089 src = &ucode->data[0];
5481 len = priv->ucode_code.len; 3090 len = priv->ucode_code.len;
5482 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len); 3091 IWL_DEBUG_INFO(priv,
3092 "Copying (but not loading) uCode instr len %zd\n", len);
5483 memcpy(priv->ucode_code.v_addr, src, len); 3093 memcpy(priv->ucode_code.v_addr, src, len);
5484 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 3094 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5485 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 3095 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5486 3096
5487 /* Runtime data (2nd block) 3097 /* Runtime data (2nd block)
5488 * NOTE: Copy into backup buffer will be done in iwl3945_up() */ 3098 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
5489 src = &ucode->data[inst_size]; 3099 src = &ucode->data[inst_size];
5490 len = priv->ucode_data.len; 3100 len = priv->ucode_data.len;
5491 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len); 3101 IWL_DEBUG_INFO(priv,
3102 "Copying (but not loading) uCode data len %zd\n", len);
5492 memcpy(priv->ucode_data.v_addr, src, len); 3103 memcpy(priv->ucode_data.v_addr, src, len);
5493 memcpy(priv->ucode_data_backup.v_addr, src, len); 3104 memcpy(priv->ucode_data_backup.v_addr, src, len);
5494 3105
@@ -5496,8 +3107,8 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5496 if (init_size) { 3107 if (init_size) {
5497 src = &ucode->data[inst_size + data_size]; 3108 src = &ucode->data[inst_size + data_size];
5498 len = priv->ucode_init.len; 3109 len = priv->ucode_init.len;
5499 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n", 3110 IWL_DEBUG_INFO(priv,
5500 len); 3111 "Copying (but not loading) init instr len %zd\n", len);
5501 memcpy(priv->ucode_init.v_addr, src, len); 3112 memcpy(priv->ucode_init.v_addr, src, len);
5502 } 3113 }
5503 3114
@@ -5505,16 +3116,16 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5505 if (init_data_size) { 3116 if (init_data_size) {
5506 src = &ucode->data[inst_size + data_size + init_size]; 3117 src = &ucode->data[inst_size + data_size + init_size];
5507 len = priv->ucode_init_data.len; 3118 len = priv->ucode_init_data.len;
5508 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", 3119 IWL_DEBUG_INFO(priv,
5509 (int)len); 3120 "Copying (but not loading) init data len %zd\n", len);
5510 memcpy(priv->ucode_init_data.v_addr, src, len); 3121 memcpy(priv->ucode_init_data.v_addr, src, len);
5511 } 3122 }
5512 3123
5513 /* Bootstrap instructions (5th block) */ 3124 /* Bootstrap instructions (5th block) */
5514 src = &ucode->data[inst_size + data_size + init_size + init_data_size]; 3125 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5515 len = priv->ucode_boot.len; 3126 len = priv->ucode_boot.len;
5516 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", 3127 IWL_DEBUG_INFO(priv,
5517 (int)len); 3128 "Copying (but not loading) boot instr len %zd\n", len);
5518 memcpy(priv->ucode_boot.v_addr, src, len); 3129 memcpy(priv->ucode_boot.v_addr, src, len);
5519 3130
5520 /* We have our copies now, allow OS release its copies */ 3131 /* We have our copies now, allow OS release its copies */
@@ -5522,7 +3133,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5522 return 0; 3133 return 0;
5523 3134
5524 err_pci_alloc: 3135 err_pci_alloc:
5525 IWL_ERROR("failed to allocate pci memory\n"); 3136 IWL_ERR(priv, "failed to allocate pci memory\n");
5526 ret = -ENOMEM; 3137 ret = -ENOMEM;
5527 iwl3945_dealloc_ucode_pci(priv); 3138 iwl3945_dealloc_ucode_pci(priv);
5528 3139
@@ -5543,7 +3154,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5543 * We need to replace them to load runtime uCode inst and data, 3154 * We need to replace them to load runtime uCode inst and data,
5544 * and to save runtime data when powering down. 3155 * and to save runtime data when powering down.
5545 */ 3156 */
5546static int iwl3945_set_ucode_ptrs(struct iwl3945_priv *priv) 3157static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
5547{ 3158{
5548 dma_addr_t pinst; 3159 dma_addr_t pinst;
5549 dma_addr_t pdata; 3160 dma_addr_t pdata;
@@ -5555,28 +3166,28 @@ static int iwl3945_set_ucode_ptrs(struct iwl3945_priv *priv)
5555 pdata = priv->ucode_data_backup.p_addr; 3166 pdata = priv->ucode_data_backup.p_addr;
5556 3167
5557 spin_lock_irqsave(&priv->lock, flags); 3168 spin_lock_irqsave(&priv->lock, flags);
5558 rc = iwl3945_grab_nic_access(priv); 3169 rc = iwl_grab_nic_access(priv);
5559 if (rc) { 3170 if (rc) {
5560 spin_unlock_irqrestore(&priv->lock, flags); 3171 spin_unlock_irqrestore(&priv->lock, flags);
5561 return rc; 3172 return rc;
5562 } 3173 }
5563 3174
5564 /* Tell bootstrap uCode where to find image to load */ 3175 /* Tell bootstrap uCode where to find image to load */
5565 iwl3945_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 3176 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5566 iwl3945_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 3177 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5567 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 3178 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5568 priv->ucode_data.len); 3179 priv->ucode_data.len);
5569 3180
5570 /* Inst byte count must be last to set up, bit 31 signals uCode 3181 /* Inst byte count must be last to set up, bit 31 signals uCode
5571 * that all new ptr/size info is in place */ 3182 * that all new ptr/size info is in place */
5572 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 3183 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5573 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 3184 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5574 3185
5575 iwl3945_release_nic_access(priv); 3186 iwl_release_nic_access(priv);
5576 3187
5577 spin_unlock_irqrestore(&priv->lock, flags); 3188 spin_unlock_irqrestore(&priv->lock, flags);
5578 3189
5579 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); 3190 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
5580 3191
5581 return rc; 3192 return rc;
5582} 3193}
@@ -5588,13 +3199,13 @@ static int iwl3945_set_ucode_ptrs(struct iwl3945_priv *priv)
5588 * 3199 *
5589 * Tell "initialize" uCode to go ahead and load the runtime uCode. 3200 * Tell "initialize" uCode to go ahead and load the runtime uCode.
5590 */ 3201 */
5591static void iwl3945_init_alive_start(struct iwl3945_priv *priv) 3202static void iwl3945_init_alive_start(struct iwl_priv *priv)
5592{ 3203{
5593 /* Check alive response for "valid" sign from uCode */ 3204 /* Check alive response for "valid" sign from uCode */
5594 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 3205 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5595 /* We had an error bringing up the hardware, so take it 3206 /* We had an error bringing up the hardware, so take it
5596 * all the way back down so we can try again */ 3207 * all the way back down so we can try again */
5597 IWL_DEBUG_INFO("Initialize Alive failed.\n"); 3208 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
5598 goto restart; 3209 goto restart;
5599 } 3210 }
5600 3211
@@ -5604,18 +3215,18 @@ static void iwl3945_init_alive_start(struct iwl3945_priv *priv)
5604 if (iwl3945_verify_ucode(priv)) { 3215 if (iwl3945_verify_ucode(priv)) {
5605 /* Runtime instruction load was bad; 3216 /* Runtime instruction load was bad;
5606 * take it all the way back down so we can try again */ 3217 * take it all the way back down so we can try again */
5607 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); 3218 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
5608 goto restart; 3219 goto restart;
5609 } 3220 }
5610 3221
5611 /* Send pointers to protocol/runtime uCode image ... init code will 3222 /* Send pointers to protocol/runtime uCode image ... init code will
5612 * load and launch runtime uCode, which will send us another "Alive" 3223 * load and launch runtime uCode, which will send us another "Alive"
5613 * notification. */ 3224 * notification. */
5614 IWL_DEBUG_INFO("Initialization Alive received.\n"); 3225 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
5615 if (iwl3945_set_ucode_ptrs(priv)) { 3226 if (iwl3945_set_ucode_ptrs(priv)) {
5616 /* Runtime instruction load won't happen; 3227 /* Runtime instruction load won't happen;
5617 * take it all the way back down so we can try again */ 3228 * take it all the way back down so we can try again */
5618 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); 3229 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
5619 goto restart; 3230 goto restart;
5620 } 3231 }
5621 return; 3232 return;
@@ -5634,18 +3245,18 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
5634 * from protocol/runtime uCode (initialization uCode's 3245 * from protocol/runtime uCode (initialization uCode's
5635 * Alive gets handled by iwl3945_init_alive_start()). 3246 * Alive gets handled by iwl3945_init_alive_start()).
5636 */ 3247 */
5637static void iwl3945_alive_start(struct iwl3945_priv *priv) 3248static void iwl3945_alive_start(struct iwl_priv *priv)
5638{ 3249{
5639 int rc = 0; 3250 int rc = 0;
5640 int thermal_spin = 0; 3251 int thermal_spin = 0;
5641 u32 rfkill; 3252 u32 rfkill;
5642 3253
5643 IWL_DEBUG_INFO("Runtime Alive received.\n"); 3254 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
5644 3255
5645 if (priv->card_alive.is_valid != UCODE_VALID_OK) { 3256 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
5646 /* We had an error bringing up the hardware, so take it 3257 /* We had an error bringing up the hardware, so take it
5647 * all the way back down so we can try again */ 3258 * all the way back down so we can try again */
5648 IWL_DEBUG_INFO("Alive failed.\n"); 3259 IWL_DEBUG_INFO(priv, "Alive failed.\n");
5649 goto restart; 3260 goto restart;
5650 } 3261 }
5651 3262
@@ -5655,21 +3266,21 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5655 if (iwl3945_verify_ucode(priv)) { 3266 if (iwl3945_verify_ucode(priv)) {
5656 /* Runtime instruction load was bad; 3267 /* Runtime instruction load was bad;
5657 * take it all the way back down so we can try again */ 3268 * take it all the way back down so we can try again */
5658 IWL_DEBUG_INFO("Bad runtime uCode load.\n"); 3269 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
5659 goto restart; 3270 goto restart;
5660 } 3271 }
5661 3272
5662 iwl3945_clear_stations_table(priv); 3273 iwl3945_clear_stations_table(priv);
5663 3274
5664 rc = iwl3945_grab_nic_access(priv); 3275 rc = iwl_grab_nic_access(priv);
5665 if (rc) { 3276 if (rc) {
5666 IWL_WARNING("Can not read RFKILL status from adapter\n"); 3277 IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
5667 return; 3278 return;
5668 } 3279 }
5669 3280
5670 rfkill = iwl3945_read_prph(priv, APMG_RFKILL_REG); 3281 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
5671 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill); 3282 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
5672 iwl3945_release_nic_access(priv); 3283 iwl_release_nic_access(priv);
5673 3284
5674 if (rfkill & 0x1) { 3285 if (rfkill & 0x1) {
5675 clear_bit(STATUS_RF_KILL_HW, &priv->status); 3286 clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -5681,7 +3292,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5681 } 3292 }
5682 3293
5683 if (thermal_spin) 3294 if (thermal_spin)
5684 IWL_DEBUG_INFO("Thermal calibration took %dus\n", 3295 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
5685 thermal_spin * 10); 3296 thermal_spin * 10);
5686 } else 3297 } else
5687 set_bit(STATUS_RF_KILL_HW, &priv->status); 3298 set_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -5692,7 +3303,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5692 /* Clear out the uCode error bit if it is set */ 3303 /* Clear out the uCode error bit if it is set */
5693 clear_bit(STATUS_FW_ERROR, &priv->status); 3304 clear_bit(STATUS_FW_ERROR, &priv->status);
5694 3305
5695 if (iwl3945_is_rfkill(priv)) 3306 if (iwl_is_rfkill(priv))
5696 return; 3307 return;
5697 3308
5698 ieee80211_wake_queues(priv->hw); 3309 ieee80211_wake_queues(priv->hw);
@@ -5700,9 +3311,9 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5700 priv->active_rate = priv->rates_mask; 3311 priv->active_rate = priv->rates_mask;
5701 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 3312 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5702 3313
5703 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); 3314 iwl_power_update_mode(priv, false);
5704 3315
5705 if (iwl3945_is_associated(priv)) { 3316 if (iwl_is_associated(priv)) {
5706 struct iwl3945_rxon_cmd *active_rxon = 3317 struct iwl3945_rxon_cmd *active_rxon =
5707 (struct iwl3945_rxon_cmd *)(&priv->active_rxon); 3318 (struct iwl3945_rxon_cmd *)(&priv->active_rxon);
5708 3319
@@ -5711,12 +3322,11 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5711 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3322 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5712 } else { 3323 } else {
5713 /* Initialize our rx_config data */ 3324 /* Initialize our rx_config data */
5714 iwl3945_connection_init_rx_config(priv, priv->iw_mode); 3325 iwl_connection_init_rx_config(priv, priv->iw_mode);
5715 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
5716 } 3326 }
5717 3327
5718 /* Configure Bluetooth device coexistence support */ 3328 /* Configure Bluetooth device coexistence support */
5719 iwl3945_send_bt_config(priv); 3329 iwl_send_bt_config(priv);
5720 3330
5721 /* Configure the adapter for unassociated operation */ 3331 /* Configure the adapter for unassociated operation */
5722 iwl3945_commit_rxon(priv); 3332 iwl3945_commit_rxon(priv);
@@ -5725,7 +3335,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5725 3335
5726 iwl3945_led_register(priv); 3336 iwl3945_led_register(priv);
5727 3337
5728 IWL_DEBUG_INFO("ALIVE processing complete.\n"); 3338 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
5729 set_bit(STATUS_READY, &priv->status); 3339 set_bit(STATUS_READY, &priv->status);
5730 wake_up_interruptible(&priv->wait_command_queue); 3340 wake_up_interruptible(&priv->wait_command_queue);
5731 3341
@@ -5746,15 +3356,15 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5746 queue_work(priv->workqueue, &priv->restart); 3356 queue_work(priv->workqueue, &priv->restart);
5747} 3357}
5748 3358
5749static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv); 3359static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
5750 3360
5751static void __iwl3945_down(struct iwl3945_priv *priv) 3361static void __iwl3945_down(struct iwl_priv *priv)
5752{ 3362{
5753 unsigned long flags; 3363 unsigned long flags;
5754 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 3364 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5755 struct ieee80211_conf *conf = NULL; 3365 struct ieee80211_conf *conf = NULL;
5756 3366
5757 IWL_DEBUG_INFO(DRV_NAME " is going down\n"); 3367 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
5758 3368
5759 conf = ieee80211_get_hw_conf(priv->hw); 3369 conf = ieee80211_get_hw_conf(priv->hw);
5760 3370
@@ -5773,7 +3383,7 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5773 clear_bit(STATUS_EXIT_PENDING, &priv->status); 3383 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5774 3384
5775 /* stop and reset the on-board processor */ 3385 /* stop and reset the on-board processor */
5776 iwl3945_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3386 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5777 3387
5778 /* tell the device to stop sending interrupts */ 3388 /* tell the device to stop sending interrupts */
5779 spin_lock_irqsave(&priv->lock, flags); 3389 spin_lock_irqsave(&priv->lock, flags);
@@ -5786,7 +3396,7 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5786 3396
5787 /* If we have not previously called iwl3945_init() then 3397 /* If we have not previously called iwl3945_init() then
5788 * clear all bits but the RF Kill and SUSPEND bits and return */ 3398 * clear all bits but the RF Kill and SUSPEND bits and return */
5789 if (!iwl3945_is_init(priv)) { 3399 if (!iwl_is_init(priv)) {
5790 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 3400 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5791 STATUS_RF_KILL_HW | 3401 STATUS_RF_KILL_HW |
5792 test_bit(STATUS_RF_KILL_SW, &priv->status) << 3402 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
@@ -5815,29 +3425,31 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5815 test_bit(STATUS_EXIT_PENDING, &priv->status) << 3425 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5816 STATUS_EXIT_PENDING; 3426 STATUS_EXIT_PENDING;
5817 3427
3428 priv->cfg->ops->lib->apm_ops.reset(priv);
5818 spin_lock_irqsave(&priv->lock, flags); 3429 spin_lock_irqsave(&priv->lock, flags);
5819 iwl3945_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 3430 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5820 spin_unlock_irqrestore(&priv->lock, flags); 3431 spin_unlock_irqrestore(&priv->lock, flags);
5821 3432
5822 iwl3945_hw_txq_ctx_stop(priv); 3433 iwl3945_hw_txq_ctx_stop(priv);
5823 iwl3945_hw_rxq_stop(priv); 3434 iwl3945_hw_rxq_stop(priv);
5824 3435
5825 spin_lock_irqsave(&priv->lock, flags); 3436 spin_lock_irqsave(&priv->lock, flags);
5826 if (!iwl3945_grab_nic_access(priv)) { 3437 if (!iwl_grab_nic_access(priv)) {
5827 iwl3945_write_prph(priv, APMG_CLK_DIS_REG, 3438 iwl_write_prph(priv, APMG_CLK_DIS_REG,
5828 APMG_CLK_VAL_DMA_CLK_RQT); 3439 APMG_CLK_VAL_DMA_CLK_RQT);
5829 iwl3945_release_nic_access(priv); 3440 iwl_release_nic_access(priv);
5830 } 3441 }
5831 spin_unlock_irqrestore(&priv->lock, flags); 3442 spin_unlock_irqrestore(&priv->lock, flags);
5832 3443
5833 udelay(5); 3444 udelay(5);
5834 3445
5835 iwl3945_hw_nic_stop_master(priv); 3446 if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
5836 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 3447 priv->cfg->ops->lib->apm_ops.stop(priv);
5837 iwl3945_hw_nic_reset(priv); 3448 else
3449 priv->cfg->ops->lib->apm_ops.reset(priv);
5838 3450
5839 exit: 3451 exit:
5840 memset(&priv->card_alive, 0, sizeof(struct iwl3945_alive_resp)); 3452 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
5841 3453
5842 if (priv->ibss_beacon) 3454 if (priv->ibss_beacon)
5843 dev_kfree_skb(priv->ibss_beacon); 3455 dev_kfree_skb(priv->ibss_beacon);
@@ -5847,7 +3459,7 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5847 iwl3945_clear_free_frames(priv); 3459 iwl3945_clear_free_frames(priv);
5848} 3460}
5849 3461
5850static void iwl3945_down(struct iwl3945_priv *priv) 3462static void iwl3945_down(struct iwl_priv *priv)
5851{ 3463{
5852 mutex_lock(&priv->mutex); 3464 mutex_lock(&priv->mutex);
5853 __iwl3945_down(priv); 3465 __iwl3945_down(priv);
@@ -5858,58 +3470,58 @@ static void iwl3945_down(struct iwl3945_priv *priv)
5858 3470
5859#define MAX_HW_RESTARTS 5 3471#define MAX_HW_RESTARTS 5
5860 3472
5861static int __iwl3945_up(struct iwl3945_priv *priv) 3473static int __iwl3945_up(struct iwl_priv *priv)
5862{ 3474{
5863 int rc, i; 3475 int rc, i;
5864 3476
5865 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 3477 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5866 IWL_WARNING("Exit pending; will not bring the NIC up\n"); 3478 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
5867 return -EIO; 3479 return -EIO;
5868 } 3480 }
5869 3481
5870 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { 3482 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
5871 IWL_WARNING("Radio disabled by SW RF kill (module " 3483 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
5872 "parameter)\n"); 3484 "parameter)\n");
5873 return -ENODEV; 3485 return -ENODEV;
5874 } 3486 }
5875 3487
5876 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 3488 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
5877 IWL_ERROR("ucode not available for device bring up\n"); 3489 IWL_ERR(priv, "ucode not available for device bring up\n");
5878 return -EIO; 3490 return -EIO;
5879 } 3491 }
5880 3492
5881 /* If platform's RF_KILL switch is NOT set to KILL */ 3493 /* If platform's RF_KILL switch is NOT set to KILL */
5882 if (iwl3945_read32(priv, CSR_GP_CNTRL) & 3494 if (iwl_read32(priv, CSR_GP_CNTRL) &
5883 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 3495 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5884 clear_bit(STATUS_RF_KILL_HW, &priv->status); 3496 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5885 else { 3497 else {
5886 set_bit(STATUS_RF_KILL_HW, &priv->status); 3498 set_bit(STATUS_RF_KILL_HW, &priv->status);
5887 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) { 3499 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
5888 IWL_WARNING("Radio disabled by HW RF Kill switch\n"); 3500 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
5889 return -ENODEV; 3501 return -ENODEV;
5890 } 3502 }
5891 } 3503 }
5892 3504
5893 iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF); 3505 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5894 3506
5895 rc = iwl3945_hw_nic_init(priv); 3507 rc = iwl3945_hw_nic_init(priv);
5896 if (rc) { 3508 if (rc) {
5897 IWL_ERROR("Unable to int nic\n"); 3509 IWL_ERR(priv, "Unable to int nic\n");
5898 return rc; 3510 return rc;
5899 } 3511 }
5900 3512
5901 /* make sure rfkill handshake bits are cleared */ 3513 /* make sure rfkill handshake bits are cleared */
5902 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3514 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5903 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, 3515 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
5904 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3516 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5905 3517
5906 /* clear (again), then enable host interrupts */ 3518 /* clear (again), then enable host interrupts */
5907 iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF); 3519 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5908 iwl3945_enable_interrupts(priv); 3520 iwl3945_enable_interrupts(priv);
5909 3521
5910 /* really make sure rfkill handshake bits are cleared */ 3522 /* really make sure rfkill handshake bits are cleared */
5911 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3523 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5912 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3524 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5913 3525
5914 /* Copy original ucode data image from disk into backup cache. 3526 /* Copy original ucode data image from disk into backup cache.
5915 * This will be used to initialize the on-board processor's 3527 * This will be used to initialize the on-board processor's
@@ -5928,17 +3540,18 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
5928 /* load bootstrap state machine, 3540 /* load bootstrap state machine,
5929 * load bootstrap program into processor's memory, 3541 * load bootstrap program into processor's memory,
5930 * prepare to load the "initialize" uCode */ 3542 * prepare to load the "initialize" uCode */
5931 rc = iwl3945_load_bsm(priv); 3543 priv->cfg->ops->lib->load_ucode(priv);
5932 3544
5933 if (rc) { 3545 if (rc) {
5934 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); 3546 IWL_ERR(priv,
3547 "Unable to set up bootstrap uCode: %d\n", rc);
5935 continue; 3548 continue;
5936 } 3549 }
5937 3550
5938 /* start card; "initialize" will load runtime ucode */ 3551 /* start card; "initialize" will load runtime ucode */
5939 iwl3945_nic_start(priv); 3552 iwl3945_nic_start(priv);
5940 3553
5941 IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); 3554 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
5942 3555
5943 return 0; 3556 return 0;
5944 } 3557 }
@@ -5949,7 +3562,7 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
5949 3562
5950 /* tried to restart and config the device for as long as our 3563 /* tried to restart and config the device for as long as our
5951 * patience could withstand */ 3564 * patience could withstand */
5952 IWL_ERROR("Unable to initialize device after %d attempts.\n", i); 3565 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
5953 return -EIO; 3566 return -EIO;
5954} 3567}
5955 3568
@@ -5962,8 +3575,8 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
5962 3575
5963static void iwl3945_bg_init_alive_start(struct work_struct *data) 3576static void iwl3945_bg_init_alive_start(struct work_struct *data)
5964{ 3577{
5965 struct iwl3945_priv *priv = 3578 struct iwl_priv *priv =
5966 container_of(data, struct iwl3945_priv, init_alive_start.work); 3579 container_of(data, struct iwl_priv, init_alive_start.work);
5967 3580
5968 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3581 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5969 return; 3582 return;
@@ -5975,8 +3588,8 @@ static void iwl3945_bg_init_alive_start(struct work_struct *data)
5975 3588
5976static void iwl3945_bg_alive_start(struct work_struct *data) 3589static void iwl3945_bg_alive_start(struct work_struct *data)
5977{ 3590{
5978 struct iwl3945_priv *priv = 3591 struct iwl_priv *priv =
5979 container_of(data, struct iwl3945_priv, alive_start.work); 3592 container_of(data, struct iwl_priv, alive_start.work);
5980 3593
5981 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3594 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5982 return; 3595 return;
@@ -5986,66 +3599,31 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
5986 mutex_unlock(&priv->mutex); 3599 mutex_unlock(&priv->mutex);
5987} 3600}
5988 3601
5989static void iwl3945_bg_rf_kill(struct work_struct *work) 3602static void iwl3945_rfkill_poll(struct work_struct *data)
5990{ 3603{
5991 struct iwl3945_priv *priv = container_of(work, struct iwl3945_priv, rf_kill); 3604 struct iwl_priv *priv =
5992 3605 container_of(data, struct iwl_priv, rfkill_poll.work);
5993 wake_up_interruptible(&priv->wait_command_queue); 3606 unsigned long status = priv->status;
5994
5995 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5996 return;
5997 3607
5998 mutex_lock(&priv->mutex); 3608 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3609 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3610 else
3611 set_bit(STATUS_RF_KILL_HW, &priv->status);
5999 3612
6000 if (!iwl3945_is_rfkill(priv)) { 3613 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
6001 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, 3614 queue_work(priv->workqueue, &priv->rf_kill);
6002 "HW and/or SW RF Kill no longer active, restarting "
6003 "device\n");
6004 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6005 queue_work(priv->workqueue, &priv->restart);
6006 } else {
6007 3615
6008 if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) 3616 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
6009 IWL_DEBUG_RF_KILL("Can not turn radio back on - " 3617 round_jiffies_relative(2 * HZ));
6010 "disabled by SW switch\n");
6011 else
6012 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6013 "Kill switch must be turned off for "
6014 "wireless networking to work.\n");
6015 }
6016 3618
6017 mutex_unlock(&priv->mutex);
6018 iwl3945_rfkill_set_hw_state(priv);
6019} 3619}
6020 3620
6021#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 3621#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6022
6023static void iwl3945_bg_scan_check(struct work_struct *data)
6024{
6025 struct iwl3945_priv *priv =
6026 container_of(data, struct iwl3945_priv, scan_check.work);
6027
6028 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6029 return;
6030
6031 mutex_lock(&priv->mutex);
6032 if (test_bit(STATUS_SCANNING, &priv->status) ||
6033 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6034 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6035 "Scan completion watchdog resetting adapter (%dms)\n",
6036 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
6037
6038 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6039 iwl3945_send_scan_abort(priv);
6040 }
6041 mutex_unlock(&priv->mutex);
6042}
6043
6044static void iwl3945_bg_request_scan(struct work_struct *data) 3622static void iwl3945_bg_request_scan(struct work_struct *data)
6045{ 3623{
6046 struct iwl3945_priv *priv = 3624 struct iwl_priv *priv =
6047 container_of(data, struct iwl3945_priv, request_scan); 3625 container_of(data, struct iwl_priv, request_scan);
6048 struct iwl3945_host_cmd cmd = { 3626 struct iwl_host_cmd cmd = {
6049 .id = REPLY_SCAN_CMD, 3627 .id = REPLY_SCAN_CMD,
6050 .len = sizeof(struct iwl3945_scan_cmd), 3628 .len = sizeof(struct iwl3945_scan_cmd),
6051 .meta.flags = CMD_SIZE_HUGE, 3629 .meta.flags = CMD_SIZE_HUGE,
@@ -6061,8 +3639,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6061 3639
6062 mutex_lock(&priv->mutex); 3640 mutex_lock(&priv->mutex);
6063 3641
6064 if (!iwl3945_is_ready(priv)) { 3642 if (!iwl_is_ready(priv)) {
6065 IWL_WARNING("request scan called when driver not ready.\n"); 3643 IWL_WARN(priv, "request scan called when driver not ready.\n");
6066 goto done; 3644 goto done;
6067 } 3645 }
6068 3646
@@ -6074,34 +3652,36 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6074 /* This should never be called or scheduled if there is currently 3652 /* This should never be called or scheduled if there is currently
6075 * a scan active in the hardware. */ 3653 * a scan active in the hardware. */
6076 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 3654 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6077 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " 3655 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
6078 "Ignoring second request.\n"); 3656 "Ignoring second request.\n");
6079 rc = -EIO; 3657 rc = -EIO;
6080 goto done; 3658 goto done;
6081 } 3659 }
6082 3660
6083 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 3661 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6084 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); 3662 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
6085 goto done; 3663 goto done;
6086 } 3664 }
6087 3665
6088 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 3666 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6089 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 3667 IWL_DEBUG_HC(priv,
3668 "Scan request while abort pending. Queuing.\n");
6090 goto done; 3669 goto done;
6091 } 3670 }
6092 3671
6093 if (iwl3945_is_rfkill(priv)) { 3672 if (iwl_is_rfkill(priv)) {
6094 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); 3673 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
6095 goto done; 3674 goto done;
6096 } 3675 }
6097 3676
6098 if (!test_bit(STATUS_READY, &priv->status)) { 3677 if (!test_bit(STATUS_READY, &priv->status)) {
6099 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); 3678 IWL_DEBUG_HC(priv,
3679 "Scan request while uninitialized. Queuing.\n");
6100 goto done; 3680 goto done;
6101 } 3681 }
6102 3682
6103 if (!priv->scan_bands) { 3683 if (!priv->scan_bands) {
6104 IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); 3684 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
6105 goto done; 3685 goto done;
6106 } 3686 }
6107 3687
@@ -6119,14 +3699,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6119 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 3699 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6120 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 3700 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6121 3701
6122 if (iwl3945_is_associated(priv)) { 3702 if (iwl_is_associated(priv)) {
6123 u16 interval = 0; 3703 u16 interval = 0;
6124 u32 extra; 3704 u32 extra;
6125 u32 suspend_time = 100; 3705 u32 suspend_time = 100;
6126 u32 scan_suspend_time = 100; 3706 u32 scan_suspend_time = 100;
6127 unsigned long flags; 3707 unsigned long flags;
6128 3708
6129 IWL_DEBUG_INFO("Scanning while associated...\n"); 3709 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
6130 3710
6131 spin_lock_irqsave(&priv->lock, flags); 3711 spin_lock_irqsave(&priv->lock, flags);
6132 interval = priv->beacon_int; 3712 interval = priv->beacon_int;
@@ -6148,15 +3728,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6148 (extra | ((suspend_time % interval) * 1024)); 3728 (extra | ((suspend_time % interval) * 1024));
6149 3729
6150 scan->suspend_time = cpu_to_le32(scan_suspend_time); 3730 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6151 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", 3731 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
6152 scan_suspend_time, interval); 3732 scan_suspend_time, interval);
6153 } 3733 }
6154 3734
6155 /* We should add the ability for user to lock to PASSIVE ONLY */ 3735 /* We should add the ability for user to lock to PASSIVE ONLY */
6156 if (priv->one_direct_scan) { 3736 if (priv->one_direct_scan) {
6157 IWL_DEBUG_SCAN 3737 IWL_DEBUG_SCAN(priv, "Kicking off one direct scan for '%s'\n",
6158 ("Kicking off one direct scan for '%s'\n", 3738 print_ssid(ssid, priv->direct_ssid,
6159 print_ssid(ssid, priv->direct_ssid,
6160 priv->direct_ssid_len)); 3739 priv->direct_ssid_len));
6161 scan->direct_scan[0].id = WLAN_EID_SSID; 3740 scan->direct_scan[0].id = WLAN_EID_SSID;
6162 scan->direct_scan[0].len = priv->direct_ssid_len; 3741 scan->direct_scan[0].len = priv->direct_ssid_len;
@@ -6164,15 +3743,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6164 priv->direct_ssid, priv->direct_ssid_len); 3743 priv->direct_ssid, priv->direct_ssid_len);
6165 n_probes++; 3744 n_probes++;
6166 } else 3745 } else
6167 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); 3746 IWL_DEBUG_SCAN(priv, "Kicking off one indirect scan.\n");
6168 3747
6169 /* We don't build a direct scan probe request; the uCode will do 3748 /* We don't build a direct scan probe request; the uCode will do
6170 * that based on the direct_mask added to each channel entry */ 3749 * that based on the direct_mask added to each channel entry */
6171 scan->tx_cmd.len = cpu_to_le16(
6172 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
6173 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
6174 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 3750 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6175 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; 3751 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
6176 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 3752 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6177 3753
6178 /* flags + rate selection */ 3754 /* flags + rate selection */
@@ -6187,10 +3763,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6187 scan->good_CRC_th = IWL_GOOD_CRC_TH; 3763 scan->good_CRC_th = IWL_GOOD_CRC_TH;
6188 band = IEEE80211_BAND_5GHZ; 3764 band = IEEE80211_BAND_5GHZ;
6189 } else { 3765 } else {
6190 IWL_WARNING("Invalid scan band count\n"); 3766 IWL_WARN(priv, "Invalid scan band count\n");
6191 goto done; 3767 goto done;
6192 } 3768 }
6193 3769
3770 scan->tx_cmd.len = cpu_to_le16(
3771 iwl_fill_probe_req(priv, band,
3772 (struct ieee80211_mgmt *)scan->data,
3773 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
3774
6194 /* select Rx antennas */ 3775 /* select Rx antennas */
6195 scan->flags |= iwl3945_get_antenna_flags(priv); 3776 scan->flags |= iwl3945_get_antenna_flags(priv);
6196 3777
@@ -6203,7 +3784,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6203 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 3784 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6204 3785
6205 if (scan->channel_count == 0) { 3786 if (scan->channel_count == 0) {
6206 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count); 3787 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
6207 goto done; 3788 goto done;
6208 } 3789 }
6209 3790
@@ -6213,7 +3794,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6213 scan->len = cpu_to_le16(cmd.len); 3794 scan->len = cpu_to_le16(cmd.len);
6214 3795
6215 set_bit(STATUS_SCAN_HW, &priv->status); 3796 set_bit(STATUS_SCAN_HW, &priv->status);
6216 rc = iwl3945_send_cmd_sync(priv, &cmd); 3797 rc = iwl_send_cmd_sync(priv, &cmd);
6217 if (rc) 3798 if (rc)
6218 goto done; 3799 goto done;
6219 3800
@@ -6239,7 +3820,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6239 3820
6240static void iwl3945_bg_up(struct work_struct *data) 3821static void iwl3945_bg_up(struct work_struct *data)
6241{ 3822{
6242 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv, up); 3823 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
6243 3824
6244 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3825 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6245 return; 3826 return;
@@ -6247,12 +3828,12 @@ static void iwl3945_bg_up(struct work_struct *data)
6247 mutex_lock(&priv->mutex); 3828 mutex_lock(&priv->mutex);
6248 __iwl3945_up(priv); 3829 __iwl3945_up(priv);
6249 mutex_unlock(&priv->mutex); 3830 mutex_unlock(&priv->mutex);
6250 iwl3945_rfkill_set_hw_state(priv); 3831 iwl_rfkill_set_hw_state(priv);
6251} 3832}
6252 3833
6253static void iwl3945_bg_restart(struct work_struct *data) 3834static void iwl3945_bg_restart(struct work_struct *data)
6254{ 3835{
6255 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv, restart); 3836 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
6256 3837
6257 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3838 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6258 return; 3839 return;
@@ -6263,8 +3844,8 @@ static void iwl3945_bg_restart(struct work_struct *data)
6263 3844
6264static void iwl3945_bg_rx_replenish(struct work_struct *data) 3845static void iwl3945_bg_rx_replenish(struct work_struct *data)
6265{ 3846{
6266 struct iwl3945_priv *priv = 3847 struct iwl_priv *priv =
6267 container_of(data, struct iwl3945_priv, rx_replenish); 3848 container_of(data, struct iwl_priv, rx_replenish);
6268 3849
6269 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3850 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6270 return; 3851 return;
@@ -6276,18 +3857,18 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
6276 3857
6277#define IWL_DELAY_NEXT_SCAN (HZ*2) 3858#define IWL_DELAY_NEXT_SCAN (HZ*2)
6278 3859
6279static void iwl3945_post_associate(struct iwl3945_priv *priv) 3860static void iwl3945_post_associate(struct iwl_priv *priv)
6280{ 3861{
6281 int rc = 0; 3862 int rc = 0;
6282 struct ieee80211_conf *conf = NULL; 3863 struct ieee80211_conf *conf = NULL;
6283 3864
6284 if (priv->iw_mode == NL80211_IFTYPE_AP) { 3865 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6285 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 3866 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
6286 return; 3867 return;
6287 } 3868 }
6288 3869
6289 3870
6290 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n", 3871 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
6291 priv->assoc_id, priv->active_rxon.bssid_addr); 3872 priv->assoc_id, priv->active_rxon.bssid_addr);
6292 3873
6293 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3874 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -6296,26 +3877,26 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6296 if (!priv->vif || !priv->is_open) 3877 if (!priv->vif || !priv->is_open)
6297 return; 3878 return;
6298 3879
6299 iwl3945_scan_cancel_timeout(priv, 200); 3880 iwl_scan_cancel_timeout(priv, 200);
6300 3881
6301 conf = ieee80211_get_hw_conf(priv->hw); 3882 conf = ieee80211_get_hw_conf(priv->hw);
6302 3883
6303 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3884 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6304 iwl3945_commit_rxon(priv); 3885 iwl3945_commit_rxon(priv);
6305 3886
6306 memset(&priv->rxon_timing, 0, sizeof(struct iwl3945_rxon_time_cmd)); 3887 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
6307 iwl3945_setup_rxon_timing(priv); 3888 iwl3945_setup_rxon_timing(priv);
6308 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3889 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
6309 sizeof(priv->rxon_timing), &priv->rxon_timing); 3890 sizeof(priv->rxon_timing), &priv->rxon_timing);
6310 if (rc) 3891 if (rc)
6311 IWL_WARNING("REPLY_RXON_TIMING failed - " 3892 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
6312 "Attempting to continue.\n"); 3893 "Attempting to continue.\n");
6313 3894
6314 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3895 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6315 3896
6316 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3897 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6317 3898
6318 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", 3899 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
6319 priv->assoc_id, priv->beacon_int); 3900 priv->assoc_id, priv->beacon_int);
6320 3901
6321 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3902 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -6355,7 +3936,7 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6355 break; 3936 break;
6356 3937
6357 default: 3938 default:
6358 IWL_ERROR("%s Should not be called in %d mode\n", 3939 IWL_ERR(priv, "%s Should not be called in %d mode\n",
6359 __func__, priv->iw_mode); 3940 __func__, priv->iw_mode);
6360 break; 3941 break;
6361 } 3942 }
@@ -6366,45 +3947,8 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6366 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 3947 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6367} 3948}
6368 3949
6369static void iwl3945_bg_abort_scan(struct work_struct *work)
6370{
6371 struct iwl3945_priv *priv = container_of(work, struct iwl3945_priv, abort_scan);
6372
6373 if (!iwl3945_is_ready(priv))
6374 return;
6375
6376 mutex_lock(&priv->mutex);
6377
6378 set_bit(STATUS_SCAN_ABORTING, &priv->status);
6379 iwl3945_send_scan_abort(priv);
6380
6381 mutex_unlock(&priv->mutex);
6382}
6383
6384static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed); 3950static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
6385 3951
6386static void iwl3945_bg_scan_completed(struct work_struct *work)
6387{
6388 struct iwl3945_priv *priv =
6389 container_of(work, struct iwl3945_priv, scan_completed);
6390
6391 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
6392
6393 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6394 return;
6395
6396 if (test_bit(STATUS_CONF_PENDING, &priv->status))
6397 iwl3945_mac_config(priv->hw, 0);
6398
6399 ieee80211_scan_completed(priv->hw);
6400
6401 /* Since setting the TXPOWER may have been deferred while
6402 * performing the scan, fire one off */
6403 mutex_lock(&priv->mutex);
6404 iwl3945_hw_reg_send_txpower(priv);
6405 mutex_unlock(&priv->mutex);
6406}
6407
6408/***************************************************************************** 3952/*****************************************************************************
6409 * 3953 *
6410 * mac80211 entry point functions 3954 * mac80211 entry point functions
@@ -6415,36 +3959,22 @@ static void iwl3945_bg_scan_completed(struct work_struct *work)
6415 3959
6416static int iwl3945_mac_start(struct ieee80211_hw *hw) 3960static int iwl3945_mac_start(struct ieee80211_hw *hw)
6417{ 3961{
6418 struct iwl3945_priv *priv = hw->priv; 3962 struct iwl_priv *priv = hw->priv;
6419 int ret; 3963 int ret;
6420 3964
6421 IWL_DEBUG_MAC80211("enter\n"); 3965 IWL_DEBUG_MAC80211(priv, "enter\n");
6422
6423 if (pci_enable_device(priv->pci_dev)) {
6424 IWL_ERROR("Fail to pci_enable_device\n");
6425 return -ENODEV;
6426 }
6427 pci_restore_state(priv->pci_dev);
6428 pci_enable_msi(priv->pci_dev);
6429
6430 ret = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
6431 DRV_NAME, priv);
6432 if (ret) {
6433 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
6434 goto out_disable_msi;
6435 }
6436 3966
6437 /* we should be verifying the device is ready to be opened */ 3967 /* we should be verifying the device is ready to be opened */
6438 mutex_lock(&priv->mutex); 3968 mutex_lock(&priv->mutex);
6439 3969
6440 memset(&priv->staging_rxon, 0, sizeof(struct iwl3945_rxon_cmd)); 3970 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
6441 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 3971 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6442 * ucode filename and max sizes are card-specific. */ 3972 * ucode filename and max sizes are card-specific. */
6443 3973
6444 if (!priv->ucode_code.len) { 3974 if (!priv->ucode_code.len) {
6445 ret = iwl3945_read_ucode(priv); 3975 ret = iwl3945_read_ucode(priv);
6446 if (ret) { 3976 if (ret) {
6447 IWL_ERROR("Could not read microcode: %d\n", ret); 3977 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
6448 mutex_unlock(&priv->mutex); 3978 mutex_unlock(&priv->mutex);
6449 goto out_release_irq; 3979 goto out_release_irq;
6450 } 3980 }
@@ -6454,12 +3984,12 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
6454 3984
6455 mutex_unlock(&priv->mutex); 3985 mutex_unlock(&priv->mutex);
6456 3986
6457 iwl3945_rfkill_set_hw_state(priv); 3987 iwl_rfkill_set_hw_state(priv);
6458 3988
6459 if (ret) 3989 if (ret)
6460 goto out_release_irq; 3990 goto out_release_irq;
6461 3991
6462 IWL_DEBUG_INFO("Start UP work.\n"); 3992 IWL_DEBUG_INFO(priv, "Start UP work.\n");
6463 3993
6464 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 3994 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6465 return 0; 3995 return 0;
@@ -6471,86 +4001,87 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
6471 UCODE_READY_TIMEOUT); 4001 UCODE_READY_TIMEOUT);
6472 if (!ret) { 4002 if (!ret) {
6473 if (!test_bit(STATUS_READY, &priv->status)) { 4003 if (!test_bit(STATUS_READY, &priv->status)) {
6474 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n", 4004 IWL_ERR(priv,
6475 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 4005 "Wait for START_ALIVE timeout after %dms.\n",
4006 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6476 ret = -ETIMEDOUT; 4007 ret = -ETIMEDOUT;
6477 goto out_release_irq; 4008 goto out_release_irq;
6478 } 4009 }
6479 } 4010 }
6480 4011
4012 /* ucode is running and will send rfkill notifications,
4013 * no need to poll the killswitch state anymore */
4014 cancel_delayed_work(&priv->rfkill_poll);
4015
6481 priv->is_open = 1; 4016 priv->is_open = 1;
6482 IWL_DEBUG_MAC80211("leave\n"); 4017 IWL_DEBUG_MAC80211(priv, "leave\n");
6483 return 0; 4018 return 0;
6484 4019
6485out_release_irq: 4020out_release_irq:
6486 free_irq(priv->pci_dev->irq, priv);
6487out_disable_msi:
6488 pci_disable_msi(priv->pci_dev);
6489 pci_disable_device(priv->pci_dev);
6490 priv->is_open = 0; 4021 priv->is_open = 0;
6491 IWL_DEBUG_MAC80211("leave - failed\n"); 4022 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
6492 return ret; 4023 return ret;
6493} 4024}
6494 4025
6495static void iwl3945_mac_stop(struct ieee80211_hw *hw) 4026static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6496{ 4027{
6497 struct iwl3945_priv *priv = hw->priv; 4028 struct iwl_priv *priv = hw->priv;
6498 4029
6499 IWL_DEBUG_MAC80211("enter\n"); 4030 IWL_DEBUG_MAC80211(priv, "enter\n");
6500 4031
6501 if (!priv->is_open) { 4032 if (!priv->is_open) {
6502 IWL_DEBUG_MAC80211("leave - skip\n"); 4033 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
6503 return; 4034 return;
6504 } 4035 }
6505 4036
6506 priv->is_open = 0; 4037 priv->is_open = 0;
6507 4038
6508 if (iwl3945_is_ready_rf(priv)) { 4039 if (iwl_is_ready_rf(priv)) {
6509 /* stop mac, cancel any scan request and clear 4040 /* stop mac, cancel any scan request and clear
6510 * RXON_FILTER_ASSOC_MSK BIT 4041 * RXON_FILTER_ASSOC_MSK BIT
6511 */ 4042 */
6512 mutex_lock(&priv->mutex); 4043 mutex_lock(&priv->mutex);
6513 iwl3945_scan_cancel_timeout(priv, 100); 4044 iwl_scan_cancel_timeout(priv, 100);
6514 mutex_unlock(&priv->mutex); 4045 mutex_unlock(&priv->mutex);
6515 } 4046 }
6516 4047
6517 iwl3945_down(priv); 4048 iwl3945_down(priv);
6518 4049
6519 flush_workqueue(priv->workqueue); 4050 flush_workqueue(priv->workqueue);
6520 free_irq(priv->pci_dev->irq, priv);
6521 pci_disable_msi(priv->pci_dev);
6522 pci_save_state(priv->pci_dev);
6523 pci_disable_device(priv->pci_dev);
6524 4051
6525 IWL_DEBUG_MAC80211("leave\n"); 4052 /* start polling the killswitch state again */
4053 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
4054 round_jiffies_relative(2 * HZ));
4055
4056 IWL_DEBUG_MAC80211(priv, "leave\n");
6526} 4057}
6527 4058
6528static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 4059static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6529{ 4060{
6530 struct iwl3945_priv *priv = hw->priv; 4061 struct iwl_priv *priv = hw->priv;
6531 4062
6532 IWL_DEBUG_MAC80211("enter\n"); 4063 IWL_DEBUG_MAC80211(priv, "enter\n");
6533 4064
6534 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 4065 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6535 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 4066 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6536 4067
6537 if (iwl3945_tx_skb(priv, skb)) 4068 if (iwl3945_tx_skb(priv, skb))
6538 dev_kfree_skb_any(skb); 4069 dev_kfree_skb_any(skb);
6539 4070
6540 IWL_DEBUG_MAC80211("leave\n"); 4071 IWL_DEBUG_MAC80211(priv, "leave\n");
6541 return NETDEV_TX_OK; 4072 return NETDEV_TX_OK;
6542} 4073}
6543 4074
6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw, 4075static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6545 struct ieee80211_if_init_conf *conf) 4076 struct ieee80211_if_init_conf *conf)
6546{ 4077{
6547 struct iwl3945_priv *priv = hw->priv; 4078 struct iwl_priv *priv = hw->priv;
6548 unsigned long flags; 4079 unsigned long flags;
6549 4080
6550 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type); 4081 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
6551 4082
6552 if (priv->vif) { 4083 if (priv->vif) {
6553 IWL_DEBUG_MAC80211("leave - vif != NULL\n"); 4084 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
6554 return -EOPNOTSUPP; 4085 return -EOPNOTSUPP;
6555 } 4086 }
6556 4087
@@ -6563,16 +4094,16 @@ static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6563 mutex_lock(&priv->mutex); 4094 mutex_lock(&priv->mutex);
6564 4095
6565 if (conf->mac_addr) { 4096 if (conf->mac_addr) {
6566 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr); 4097 IWL_DEBUG_MAC80211(priv, "Set: %pM\n", conf->mac_addr);
6567 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 4098 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6568 } 4099 }
6569 4100
6570 if (iwl3945_is_ready(priv)) 4101 if (iwl_is_ready(priv))
6571 iwl3945_set_mode(priv, conf->type); 4102 iwl3945_set_mode(priv, conf->type);
6572 4103
6573 mutex_unlock(&priv->mutex); 4104 mutex_unlock(&priv->mutex);
6574 4105
6575 IWL_DEBUG_MAC80211("leave\n"); 4106 IWL_DEBUG_MAC80211(priv, "leave\n");
6576 return 0; 4107 return 0;
6577} 4108}
6578 4109
@@ -6585,24 +4116,25 @@ static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6585 */ 4116 */
6586static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed) 4117static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
6587{ 4118{
6588 struct iwl3945_priv *priv = hw->priv; 4119 struct iwl_priv *priv = hw->priv;
6589 const struct iwl3945_channel_info *ch_info; 4120 const struct iwl_channel_info *ch_info;
6590 struct ieee80211_conf *conf = &hw->conf; 4121 struct ieee80211_conf *conf = &hw->conf;
6591 unsigned long flags; 4122 unsigned long flags;
6592 int ret = 0; 4123 int ret = 0;
6593 4124
6594 mutex_lock(&priv->mutex); 4125 mutex_lock(&priv->mutex);
6595 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 4126 IWL_DEBUG_MAC80211(priv, "enter to channel %d\n",
4127 conf->channel->hw_value);
6596 4128
6597 if (!iwl3945_is_ready(priv)) { 4129 if (!iwl_is_ready(priv)) {
6598 IWL_DEBUG_MAC80211("leave - not ready\n"); 4130 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
6599 ret = -EIO; 4131 ret = -EIO;
6600 goto out; 4132 goto out;
6601 } 4133 }
6602 4134
6603 if (unlikely(!iwl3945_param_disable_hw_scan && 4135 if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
6604 test_bit(STATUS_SCANNING, &priv->status))) { 4136 test_bit(STATUS_SCANNING, &priv->status))) {
6605 IWL_DEBUG_MAC80211("leave - scanning\n"); 4137 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
6606 set_bit(STATUS_CONF_PENDING, &priv->status); 4138 set_bit(STATUS_CONF_PENDING, &priv->status);
6607 mutex_unlock(&priv->mutex); 4139 mutex_unlock(&priv->mutex);
6608 return 0; 4140 return 0;
@@ -6610,25 +4142,26 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
6610 4142
6611 spin_lock_irqsave(&priv->lock, flags); 4143 spin_lock_irqsave(&priv->lock, flags);
6612 4144
6613 ch_info = iwl3945_get_channel_info(priv, conf->channel->band, 4145 ch_info = iwl_get_channel_info(priv, conf->channel->band,
6614 conf->channel->hw_value); 4146 conf->channel->hw_value);
6615 if (!is_channel_valid(ch_info)) { 4147 if (!is_channel_valid(ch_info)) {
6616 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n", 4148 IWL_DEBUG_SCAN(priv,
6617 conf->channel->hw_value, conf->channel->band); 4149 "Channel %d [%d] is INVALID for this band.\n",
6618 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 4150 conf->channel->hw_value, conf->channel->band);
4151 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
6619 spin_unlock_irqrestore(&priv->lock, flags); 4152 spin_unlock_irqrestore(&priv->lock, flags);
6620 ret = -EINVAL; 4153 ret = -EINVAL;
6621 goto out; 4154 goto out;
6622 } 4155 }
6623 4156
6624 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value); 4157 iwl_set_rxon_channel(priv, conf->channel);
6625 4158
6626 iwl3945_set_flags_for_phymode(priv, conf->channel->band); 4159 iwl_set_flags_for_band(priv, conf->channel->band);
6627 4160
6628 /* The list of supported rates and rate mask can be different 4161 /* The list of supported rates and rate mask can be different
6629 * for each phymode; since the phymode may have changed, reset 4162 * for each phymode; since the phymode may have changed, reset
6630 * the rate mask to what mac80211 lists */ 4163 * the rate mask to what mac80211 lists */
6631 iwl3945_set_rate(priv); 4164 iwl_set_rate(priv);
6632 4165
6633 spin_unlock_irqrestore(&priv->lock, flags); 4166 spin_unlock_irqrestore(&priv->lock, flags);
6634 4167
@@ -6642,25 +4175,25 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
6642 iwl3945_radio_kill_sw(priv, !conf->radio_enabled); 4175 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
6643 4176
6644 if (!conf->radio_enabled) { 4177 if (!conf->radio_enabled) {
6645 IWL_DEBUG_MAC80211("leave - radio disabled\n"); 4178 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
6646 goto out; 4179 goto out;
6647 } 4180 }
6648 4181
6649 if (iwl3945_is_rfkill(priv)) { 4182 if (iwl_is_rfkill(priv)) {
6650 IWL_DEBUG_MAC80211("leave - RF kill\n"); 4183 IWL_DEBUG_MAC80211(priv, "leave - RF kill\n");
6651 ret = -EIO; 4184 ret = -EIO;
6652 goto out; 4185 goto out;
6653 } 4186 }
6654 4187
6655 iwl3945_set_rate(priv); 4188 iwl_set_rate(priv);
6656 4189
6657 if (memcmp(&priv->active_rxon, 4190 if (memcmp(&priv->active_rxon,
6658 &priv->staging_rxon, sizeof(priv->staging_rxon))) 4191 &priv->staging_rxon, sizeof(priv->staging_rxon)))
6659 iwl3945_commit_rxon(priv); 4192 iwl3945_commit_rxon(priv);
6660 else 4193 else
6661 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); 4194 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration\n");
6662 4195
6663 IWL_DEBUG_MAC80211("leave\n"); 4196 IWL_DEBUG_MAC80211(priv, "leave\n");
6664 4197
6665out: 4198out:
6666 clear_bit(STATUS_CONF_PENDING, &priv->status); 4199 clear_bit(STATUS_CONF_PENDING, &priv->status);
@@ -6668,7 +4201,7 @@ out:
6668 return ret; 4201 return ret;
6669} 4202}
6670 4203
6671static void iwl3945_config_ap(struct iwl3945_priv *priv) 4204static void iwl3945_config_ap(struct iwl_priv *priv)
6672{ 4205{
6673 int rc = 0; 4206 int rc = 0;
6674 4207
@@ -6676,19 +4209,20 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6676 return; 4209 return;
6677 4210
6678 /* The following should be done only at AP bring up */ 4211 /* The following should be done only at AP bring up */
6679 if (!(iwl3945_is_associated(priv))) { 4212 if (!(iwl_is_associated(priv))) {
6680 4213
6681 /* RXON - unassoc (to set timing command) */ 4214 /* RXON - unassoc (to set timing command) */
6682 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 4215 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6683 iwl3945_commit_rxon(priv); 4216 iwl3945_commit_rxon(priv);
6684 4217
6685 /* RXON Timing */ 4218 /* RXON Timing */
6686 memset(&priv->rxon_timing, 0, sizeof(struct iwl3945_rxon_time_cmd)); 4219 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
6687 iwl3945_setup_rxon_timing(priv); 4220 iwl3945_setup_rxon_timing(priv);
6688 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING, 4221 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
6689 sizeof(priv->rxon_timing), &priv->rxon_timing); 4222 sizeof(priv->rxon_timing),
4223 &priv->rxon_timing);
6690 if (rc) 4224 if (rc)
6691 IWL_WARNING("REPLY_RXON_TIMING failed - " 4225 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
6692 "Attempting to continue.\n"); 4226 "Attempting to continue.\n");
6693 4227
6694 /* FIXME: what should be the assoc_id for AP? */ 4228 /* FIXME: what should be the assoc_id for AP? */
@@ -6716,7 +4250,7 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6716 /* restore RXON assoc */ 4250 /* restore RXON assoc */
6717 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 4251 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6718 iwl3945_commit_rxon(priv); 4252 iwl3945_commit_rxon(priv);
6719 iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0); 4253 iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
6720 } 4254 }
6721 iwl3945_send_beacon_cmd(priv); 4255 iwl3945_send_beacon_cmd(priv);
6722 4256
@@ -6727,16 +4261,16 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6727 4261
6728static int iwl3945_mac_config_interface(struct ieee80211_hw *hw, 4262static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6729 struct ieee80211_vif *vif, 4263 struct ieee80211_vif *vif,
6730 struct ieee80211_if_conf *conf) 4264 struct ieee80211_if_conf *conf)
6731{ 4265{
6732 struct iwl3945_priv *priv = hw->priv; 4266 struct iwl_priv *priv = hw->priv;
6733 int rc; 4267 int rc;
6734 4268
6735 if (conf == NULL) 4269 if (conf == NULL)
6736 return -EIO; 4270 return -EIO;
6737 4271
6738 if (priv->vif != vif) { 4272 if (priv->vif != vif) {
6739 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n"); 4273 IWL_DEBUG_MAC80211(priv, "leave - priv->vif != vif\n");
6740 return 0; 4274 return 0;
6741 } 4275 }
6742 4276
@@ -6753,13 +4287,13 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6753 return rc; 4287 return rc;
6754 } 4288 }
6755 4289
6756 if (!iwl3945_is_alive(priv)) 4290 if (!iwl_is_alive(priv))
6757 return -EAGAIN; 4291 return -EAGAIN;
6758 4292
6759 mutex_lock(&priv->mutex); 4293 mutex_lock(&priv->mutex);
6760 4294
6761 if (conf->bssid) 4295 if (conf->bssid)
6762 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid); 4296 IWL_DEBUG_MAC80211(priv, "bssid: %pM\n", conf->bssid);
6763 4297
6764/* 4298/*
6765 * very dubious code was here; the probe filtering flag is never set: 4299 * very dubious code was here; the probe filtering flag is never set:
@@ -6772,7 +4306,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6772 if (!conf->bssid) { 4306 if (!conf->bssid) {
6773 conf->bssid = priv->mac_addr; 4307 conf->bssid = priv->mac_addr;
6774 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 4308 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
6775 IWL_DEBUG_MAC80211("bssid was set to: %pM\n", 4309 IWL_DEBUG_MAC80211(priv, "bssid was set to: %pM\n",
6776 conf->bssid); 4310 conf->bssid);
6777 } 4311 }
6778 if (priv->ibss_beacon) 4312 if (priv->ibss_beacon)
@@ -6781,17 +4315,17 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6781 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 4315 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
6782 } 4316 }
6783 4317
6784 if (iwl3945_is_rfkill(priv)) 4318 if (iwl_is_rfkill(priv))
6785 goto done; 4319 goto done;
6786 4320
6787 if (conf->bssid && !is_zero_ether_addr(conf->bssid) && 4321 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
6788 !is_multicast_ether_addr(conf->bssid)) { 4322 !is_multicast_ether_addr(conf->bssid)) {
6789 /* If there is currently a HW scan going on in the background 4323 /* If there is currently a HW scan going on in the background
6790 * then we need to cancel it else the RXON below will fail. */ 4324 * then we need to cancel it else the RXON below will fail. */
6791 if (iwl3945_scan_cancel_timeout(priv, 100)) { 4325 if (iwl_scan_cancel_timeout(priv, 100)) {
6792 IWL_WARNING("Aborted scan still in progress " 4326 IWL_WARN(priv, "Aborted scan still in progress "
6793 "after 100ms\n"); 4327 "after 100ms\n");
6794 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 4328 IWL_DEBUG_MAC80211(priv, "leaving:scan abort failed\n");
6795 mutex_unlock(&priv->mutex); 4329 mutex_unlock(&priv->mutex);
6796 return -EAGAIN; 4330 return -EAGAIN;
6797 } 4331 }
@@ -6813,75 +4347,29 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6813 } 4347 }
6814 4348
6815 } else { 4349 } else {
6816 iwl3945_scan_cancel_timeout(priv, 100); 4350 iwl_scan_cancel_timeout(priv, 100);
6817 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 4351 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6818 iwl3945_commit_rxon(priv); 4352 iwl3945_commit_rxon(priv);
6819 } 4353 }
6820 4354
6821 done: 4355 done:
6822 IWL_DEBUG_MAC80211("leave\n"); 4356 IWL_DEBUG_MAC80211(priv, "leave\n");
6823 mutex_unlock(&priv->mutex); 4357 mutex_unlock(&priv->mutex);
6824 4358
6825 return 0; 4359 return 0;
6826} 4360}
6827 4361
6828static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6829 unsigned int changed_flags,
6830 unsigned int *total_flags,
6831 int mc_count, struct dev_addr_list *mc_list)
6832{
6833 struct iwl3945_priv *priv = hw->priv;
6834 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
6835
6836 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
6837 changed_flags, *total_flags);
6838
6839 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
6840 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
6841 *filter_flags |= RXON_FILTER_PROMISC_MSK;
6842 else
6843 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
6844 }
6845 if (changed_flags & FIF_ALLMULTI) {
6846 if (*total_flags & FIF_ALLMULTI)
6847 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
6848 else
6849 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
6850 }
6851 if (changed_flags & FIF_CONTROL) {
6852 if (*total_flags & FIF_CONTROL)
6853 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
6854 else
6855 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
6856 }
6857 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
6858 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
6859 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
6860 else
6861 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
6862 }
6863
6864 /* We avoid iwl_commit_rxon here to commit the new filter flags
6865 * since mac80211 will call ieee80211_hw_config immediately.
6866 * (mc_list is not supported at this time). Otherwise, we need to
6867 * queue a background iwl_commit_rxon work.
6868 */
6869
6870 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
6871 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6872}
6873
6874static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw, 4362static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6875 struct ieee80211_if_init_conf *conf) 4363 struct ieee80211_if_init_conf *conf)
6876{ 4364{
6877 struct iwl3945_priv *priv = hw->priv; 4365 struct iwl_priv *priv = hw->priv;
6878 4366
6879 IWL_DEBUG_MAC80211("enter\n"); 4367 IWL_DEBUG_MAC80211(priv, "enter\n");
6880 4368
6881 mutex_lock(&priv->mutex); 4369 mutex_lock(&priv->mutex);
6882 4370
6883 if (iwl3945_is_ready_rf(priv)) { 4371 if (iwl_is_ready_rf(priv)) {
6884 iwl3945_scan_cancel_timeout(priv, 100); 4372 iwl_scan_cancel_timeout(priv, 100);
6885 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 4373 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6886 iwl3945_commit_rxon(priv); 4374 iwl3945_commit_rxon(priv);
6887 } 4375 }
@@ -6891,7 +4379,7 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6891 } 4379 }
6892 mutex_unlock(&priv->mutex); 4380 mutex_unlock(&priv->mutex);
6893 4381
6894 IWL_DEBUG_MAC80211("leave\n"); 4382 IWL_DEBUG_MAC80211(priv, "leave\n");
6895} 4383}
6896 4384
6897#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 4385#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -6901,21 +4389,23 @@ static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6901 struct ieee80211_bss_conf *bss_conf, 4389 struct ieee80211_bss_conf *bss_conf,
6902 u32 changes) 4390 u32 changes)
6903{ 4391{
6904 struct iwl3945_priv *priv = hw->priv; 4392 struct iwl_priv *priv = hw->priv;
6905 4393
6906 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes); 4394 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
6907 4395
6908 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 4396 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6909 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n", 4397 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
6910 bss_conf->use_short_preamble); 4398 bss_conf->use_short_preamble);
6911 if (bss_conf->use_short_preamble) 4399 if (bss_conf->use_short_preamble)
6912 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4400 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6913 else 4401 else
6914 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4402 priv->staging_rxon.flags &=
4403 ~RXON_FLG_SHORT_PREAMBLE_MSK;
6915 } 4404 }
6916 4405
6917 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 4406 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6918 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 4407 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n",
4408 bss_conf->use_cts_prot);
6919 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 4409 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
6920 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 4410 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
6921 else 4411 else
@@ -6923,7 +4413,7 @@ static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6923 } 4413 }
6924 4414
6925 if (changes & BSS_CHANGED_ASSOC) { 4415 if (changes & BSS_CHANGED_ASSOC) {
6926 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc); 4416 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
6927 /* This should never happen as this function should 4417 /* This should never happen as this function should
6928 * never be called from interrupt context. */ 4418 * never be called from interrupt context. */
6929 if (WARN_ON_ONCE(in_interrupt())) 4419 if (WARN_ON_ONCE(in_interrupt()))
@@ -6931,10 +4421,9 @@ static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6931 if (bss_conf->assoc) { 4421 if (bss_conf->assoc) {
6932 priv->assoc_id = bss_conf->aid; 4422 priv->assoc_id = bss_conf->aid;
6933 priv->beacon_int = bss_conf->beacon_int; 4423 priv->beacon_int = bss_conf->beacon_int;
6934 priv->timestamp0 = bss_conf->timestamp & 0xFFFFFFFF; 4424 priv->timestamp = bss_conf->timestamp;
6935 priv->timestamp1 = (bss_conf->timestamp >> 32) &
6936 0xFFFFFFFF;
6937 priv->assoc_capability = bss_conf->assoc_capability; 4425 priv->assoc_capability = bss_conf->assoc_capability;
4426 priv->power_data.dtim_period = bss_conf->dtim_period;
6938 priv->next_scan_jiffies = jiffies + 4427 priv->next_scan_jiffies = jiffies +
6939 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 4428 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6940 mutex_lock(&priv->mutex); 4429 mutex_lock(&priv->mutex);
@@ -6942,30 +4431,40 @@ static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6942 mutex_unlock(&priv->mutex); 4431 mutex_unlock(&priv->mutex);
6943 } else { 4432 } else {
6944 priv->assoc_id = 0; 4433 priv->assoc_id = 0;
6945 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc); 4434 IWL_DEBUG_MAC80211(priv,
4435 "DISASSOC %d\n", bss_conf->assoc);
6946 } 4436 }
6947 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) { 4437 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
6948 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes); 4438 IWL_DEBUG_MAC80211(priv,
4439 "Associated Changes %d\n", changes);
6949 iwl3945_send_rxon_assoc(priv); 4440 iwl3945_send_rxon_assoc(priv);
6950 } 4441 }
6951 4442
6952} 4443}
6953 4444
6954static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 4445static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw,
4446 struct cfg80211_scan_request *req)
6955{ 4447{
6956 int rc = 0; 4448 int rc = 0;
6957 unsigned long flags; 4449 unsigned long flags;
6958 struct iwl3945_priv *priv = hw->priv; 4450 struct iwl_priv *priv = hw->priv;
4451 size_t len = 0;
4452 u8 *ssid = NULL;
6959 DECLARE_SSID_BUF(ssid_buf); 4453 DECLARE_SSID_BUF(ssid_buf);
6960 4454
6961 IWL_DEBUG_MAC80211("enter\n"); 4455 IWL_DEBUG_MAC80211(priv, "enter\n");
4456
4457 if (req->n_ssids) {
4458 ssid = req->ssids[0].ssid;
4459 len = req->ssids[0].ssid_len;
4460 }
6962 4461
6963 mutex_lock(&priv->mutex); 4462 mutex_lock(&priv->mutex);
6964 spin_lock_irqsave(&priv->lock, flags); 4463 spin_lock_irqsave(&priv->lock, flags);
6965 4464
6966 if (!iwl3945_is_ready_rf(priv)) { 4465 if (!iwl_is_ready_rf(priv)) {
6967 rc = -EIO; 4466 rc = -EIO;
6968 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); 4467 IWL_DEBUG_MAC80211(priv, "leave - not ready or exit pending\n");
6969 goto out_unlock; 4468 goto out_unlock;
6970 } 4469 }
6971 4470
@@ -6983,19 +4482,18 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6983 goto out_unlock; 4482 goto out_unlock;
6984 } 4483 }
6985 if (len) { 4484 if (len) {
6986 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ", 4485 IWL_DEBUG_SCAN(priv, "direct scan for %s [%zd]\n ",
6987 print_ssid(ssid_buf, ssid, len), (int)len); 4486 print_ssid(ssid_buf, ssid, len), len);
6988 4487
6989 priv->one_direct_scan = 1; 4488 priv->one_direct_scan = 1;
6990 priv->direct_ssid_len = (u8) 4489 priv->direct_ssid_len = len;
6991 min((u8) len, (u8) IW_ESSID_MAX_SIZE); 4490 memcpy(priv->direct_ssid, ssid, len);
6992 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
6993 } else 4491 } else
6994 priv->one_direct_scan = 0; 4492 priv->one_direct_scan = 0;
6995 4493
6996 rc = iwl3945_scan_initiate(priv); 4494 rc = iwl3945_scan_initiate(priv);
6997 4495
6998 IWL_DEBUG_MAC80211("leave\n"); 4496 IWL_DEBUG_MAC80211(priv, "leave\n");
6999 4497
7000out_unlock: 4498out_unlock:
7001 spin_unlock_irqrestore(&priv->lock, flags); 4499 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7005,80 +4503,80 @@ out_unlock:
7005} 4503}
7006 4504
7007static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 4505static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7008 const u8 *local_addr, const u8 *addr, 4506 struct ieee80211_vif *vif,
7009 struct ieee80211_key_conf *key) 4507 struct ieee80211_sta *sta,
4508 struct ieee80211_key_conf *key)
7010{ 4509{
7011 struct iwl3945_priv *priv = hw->priv; 4510 struct iwl_priv *priv = hw->priv;
7012 int rc = 0; 4511 const u8 *addr;
4512 int ret;
7013 u8 sta_id; 4513 u8 sta_id;
7014 4514
7015 IWL_DEBUG_MAC80211("enter\n"); 4515 IWL_DEBUG_MAC80211(priv, "enter\n");
7016 4516
7017 if (!iwl3945_param_hwcrypto) { 4517 if (iwl3945_mod_params.sw_crypto) {
7018 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 4518 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
7019 return -EOPNOTSUPP; 4519 return -EOPNOTSUPP;
7020 } 4520 }
7021 4521
7022 if (is_zero_ether_addr(addr)) 4522 addr = sta ? sta->addr : iwl_bcast_addr;
7023 /* only support pairwise keys */
7024 return -EOPNOTSUPP;
7025
7026 sta_id = iwl3945_hw_find_station(priv, addr); 4523 sta_id = iwl3945_hw_find_station(priv, addr);
7027 if (sta_id == IWL_INVALID_STATION) { 4524 if (sta_id == IWL_INVALID_STATION) {
7028 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n", 4525 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
7029 addr); 4526 addr);
7030 return -EINVAL; 4527 return -EINVAL;
7031 } 4528 }
7032 4529
7033 mutex_lock(&priv->mutex); 4530 mutex_lock(&priv->mutex);
7034 4531
7035 iwl3945_scan_cancel_timeout(priv, 100); 4532 iwl_scan_cancel_timeout(priv, 100);
7036 4533
7037 switch (cmd) { 4534 switch (cmd) {
7038 case SET_KEY: 4535 case SET_KEY:
7039 rc = iwl3945_update_sta_key_info(priv, key, sta_id); 4536 ret = iwl3945_update_sta_key_info(priv, key, sta_id);
7040 if (!rc) { 4537 if (!ret) {
7041 iwl3945_set_rxon_hwcrypto(priv, 1); 4538 iwl_set_rxon_hwcrypto(priv, 1);
7042 iwl3945_commit_rxon(priv); 4539 iwl3945_commit_rxon(priv);
7043 key->hw_key_idx = sta_id; 4540 key->hw_key_idx = sta_id;
7044 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); 4541 IWL_DEBUG_MAC80211(priv,
4542 "set_key success, using hwcrypto\n");
7045 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 4543 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7046 } 4544 }
7047 break; 4545 break;
7048 case DISABLE_KEY: 4546 case DISABLE_KEY:
7049 rc = iwl3945_clear_sta_key_info(priv, sta_id); 4547 ret = iwl3945_clear_sta_key_info(priv, sta_id);
7050 if (!rc) { 4548 if (!ret) {
7051 iwl3945_set_rxon_hwcrypto(priv, 0); 4549 iwl_set_rxon_hwcrypto(priv, 0);
7052 iwl3945_commit_rxon(priv); 4550 iwl3945_commit_rxon(priv);
7053 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 4551 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
7054 } 4552 }
7055 break; 4553 break;
7056 default: 4554 default:
7057 rc = -EINVAL; 4555 ret = -EINVAL;
7058 } 4556 }
7059 4557
7060 IWL_DEBUG_MAC80211("leave\n"); 4558 IWL_DEBUG_MAC80211(priv, "leave\n");
7061 mutex_unlock(&priv->mutex); 4559 mutex_unlock(&priv->mutex);
7062 4560
7063 return rc; 4561 return ret;
7064} 4562}
7065 4563
7066static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 4564static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7067 const struct ieee80211_tx_queue_params *params) 4565 const struct ieee80211_tx_queue_params *params)
7068{ 4566{
7069 struct iwl3945_priv *priv = hw->priv; 4567 struct iwl_priv *priv = hw->priv;
7070 unsigned long flags; 4568 unsigned long flags;
7071 int q; 4569 int q;
7072 4570
7073 IWL_DEBUG_MAC80211("enter\n"); 4571 IWL_DEBUG_MAC80211(priv, "enter\n");
7074 4572
7075 if (!iwl3945_is_ready_rf(priv)) { 4573 if (!iwl_is_ready_rf(priv)) {
7076 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 4574 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
7077 return -EIO; 4575 return -EIO;
7078 } 4576 }
7079 4577
7080 if (queue >= AC_NUM) { 4578 if (queue >= AC_NUM) {
7081 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4579 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
7082 return 0; 4580 return 0;
7083 } 4581 }
7084 4582
@@ -7100,28 +4598,28 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7100 mutex_lock(&priv->mutex); 4598 mutex_lock(&priv->mutex);
7101 if (priv->iw_mode == NL80211_IFTYPE_AP) 4599 if (priv->iw_mode == NL80211_IFTYPE_AP)
7102 iwl3945_activate_qos(priv, 1); 4600 iwl3945_activate_qos(priv, 1);
7103 else if (priv->assoc_id && iwl3945_is_associated(priv)) 4601 else if (priv->assoc_id && iwl_is_associated(priv))
7104 iwl3945_activate_qos(priv, 0); 4602 iwl3945_activate_qos(priv, 0);
7105 4603
7106 mutex_unlock(&priv->mutex); 4604 mutex_unlock(&priv->mutex);
7107 4605
7108 IWL_DEBUG_MAC80211("leave\n"); 4606 IWL_DEBUG_MAC80211(priv, "leave\n");
7109 return 0; 4607 return 0;
7110} 4608}
7111 4609
7112static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw, 4610static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7113 struct ieee80211_tx_queue_stats *stats) 4611 struct ieee80211_tx_queue_stats *stats)
7114{ 4612{
7115 struct iwl3945_priv *priv = hw->priv; 4613 struct iwl_priv *priv = hw->priv;
7116 int i, avail; 4614 int i, avail;
7117 struct iwl3945_tx_queue *txq; 4615 struct iwl_tx_queue *txq;
7118 struct iwl3945_queue *q; 4616 struct iwl_queue *q;
7119 unsigned long flags; 4617 unsigned long flags;
7120 4618
7121 IWL_DEBUG_MAC80211("enter\n"); 4619 IWL_DEBUG_MAC80211(priv, "enter\n");
7122 4620
7123 if (!iwl3945_is_ready_rf(priv)) { 4621 if (!iwl_is_ready_rf(priv)) {
7124 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 4622 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
7125 return -EIO; 4623 return -EIO;
7126 } 4624 }
7127 4625
@@ -7130,7 +4628,7 @@ static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7130 for (i = 0; i < AC_NUM; i++) { 4628 for (i = 0; i < AC_NUM; i++) {
7131 txq = &priv->txq[i]; 4629 txq = &priv->txq[i];
7132 q = &txq->q; 4630 q = &txq->q;
7133 avail = iwl3945_queue_space(q); 4631 avail = iwl_queue_space(q);
7134 4632
7135 stats[i].len = q->n_window - avail; 4633 stats[i].len = q->n_window - avail;
7136 stats[i].limit = q->n_window - q->high_mark; 4634 stats[i].limit = q->n_window - q->high_mark;
@@ -7139,34 +4637,24 @@ static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7139 } 4637 }
7140 spin_unlock_irqrestore(&priv->lock, flags); 4638 spin_unlock_irqrestore(&priv->lock, flags);
7141 4639
7142 IWL_DEBUG_MAC80211("leave\n"); 4640 IWL_DEBUG_MAC80211(priv, "leave\n");
7143
7144 return 0;
7145}
7146
7147static int iwl3945_mac_get_stats(struct ieee80211_hw *hw,
7148 struct ieee80211_low_level_stats *stats)
7149{
7150 IWL_DEBUG_MAC80211("enter\n");
7151 IWL_DEBUG_MAC80211("leave\n");
7152 4641
7153 return 0; 4642 return 0;
7154} 4643}
7155 4644
7156static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw) 4645static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7157{ 4646{
7158 struct iwl3945_priv *priv = hw->priv; 4647 struct iwl_priv *priv = hw->priv;
7159 unsigned long flags; 4648 unsigned long flags;
7160 4649
7161 mutex_lock(&priv->mutex); 4650 mutex_lock(&priv->mutex);
7162 IWL_DEBUG_MAC80211("enter\n"); 4651 IWL_DEBUG_MAC80211(priv, "enter\n");
7163 4652
7164 iwl3945_reset_qos(priv); 4653 iwl_reset_qos(priv);
7165 4654
7166 spin_lock_irqsave(&priv->lock, flags); 4655 spin_lock_irqsave(&priv->lock, flags);
7167 priv->assoc_id = 0; 4656 priv->assoc_id = 0;
7168 priv->assoc_capability = 0; 4657 priv->assoc_capability = 0;
7169 priv->call_post_assoc_from_beacon = 0;
7170 4658
7171 /* new association get rid of ibss beacon skb */ 4659 /* new association get rid of ibss beacon skb */
7172 if (priv->ibss_beacon) 4660 if (priv->ibss_beacon)
@@ -7175,15 +4663,14 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7175 priv->ibss_beacon = NULL; 4663 priv->ibss_beacon = NULL;
7176 4664
7177 priv->beacon_int = priv->hw->conf.beacon_int; 4665 priv->beacon_int = priv->hw->conf.beacon_int;
7178 priv->timestamp1 = 0; 4666 priv->timestamp = 0;
7179 priv->timestamp0 = 0;
7180 if ((priv->iw_mode == NL80211_IFTYPE_STATION)) 4667 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
7181 priv->beacon_int = 0; 4668 priv->beacon_int = 0;
7182 4669
7183 spin_unlock_irqrestore(&priv->lock, flags); 4670 spin_unlock_irqrestore(&priv->lock, flags);
7184 4671
7185 if (!iwl3945_is_ready_rf(priv)) { 4672 if (!iwl_is_ready_rf(priv)) {
7186 IWL_DEBUG_MAC80211("leave - not ready\n"); 4673 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
7187 mutex_unlock(&priv->mutex); 4674 mutex_unlock(&priv->mutex);
7188 return; 4675 return;
7189 } 4676 }
@@ -7192,7 +4679,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7192 * clear RXON_FILTER_ASSOC_MSK bit 4679 * clear RXON_FILTER_ASSOC_MSK bit
7193 */ 4680 */
7194 if (priv->iw_mode != NL80211_IFTYPE_AP) { 4681 if (priv->iw_mode != NL80211_IFTYPE_AP) {
7195 iwl3945_scan_cancel_timeout(priv, 100); 4682 iwl_scan_cancel_timeout(priv, 100);
7196 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 4683 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7197 iwl3945_commit_rxon(priv); 4684 iwl3945_commit_rxon(priv);
7198 } 4685 }
@@ -7200,33 +4687,33 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7200 /* Per mac80211.h: This is only used in IBSS mode... */ 4687 /* Per mac80211.h: This is only used in IBSS mode... */
7201 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 4688 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7202 4689
7203 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 4690 IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
7204 mutex_unlock(&priv->mutex); 4691 mutex_unlock(&priv->mutex);
7205 return; 4692 return;
7206 } 4693 }
7207 4694
7208 iwl3945_set_rate(priv); 4695 iwl_set_rate(priv);
7209 4696
7210 mutex_unlock(&priv->mutex); 4697 mutex_unlock(&priv->mutex);
7211 4698
7212 IWL_DEBUG_MAC80211("leave\n"); 4699 IWL_DEBUG_MAC80211(priv, "leave\n");
7213 4700
7214} 4701}
7215 4702
7216static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 4703static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7217{ 4704{
7218 struct iwl3945_priv *priv = hw->priv; 4705 struct iwl_priv *priv = hw->priv;
7219 unsigned long flags; 4706 unsigned long flags;
7220 4707
7221 IWL_DEBUG_MAC80211("enter\n"); 4708 IWL_DEBUG_MAC80211(priv, "enter\n");
7222 4709
7223 if (!iwl3945_is_ready_rf(priv)) { 4710 if (!iwl_is_ready_rf(priv)) {
7224 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 4711 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
7225 return -EIO; 4712 return -EIO;
7226 } 4713 }
7227 4714
7228 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 4715 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7229 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 4716 IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
7230 return -EIO; 4717 return -EIO;
7231 } 4718 }
7232 4719
@@ -7239,10 +4726,10 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7239 4726
7240 priv->assoc_id = 0; 4727 priv->assoc_id = 0;
7241 4728
7242 IWL_DEBUG_MAC80211("leave\n"); 4729 IWL_DEBUG_MAC80211(priv, "leave\n");
7243 spin_unlock_irqrestore(&priv->lock, flags); 4730 spin_unlock_irqrestore(&priv->lock, flags);
7244 4731
7245 iwl3945_reset_qos(priv); 4732 iwl_reset_qos(priv);
7246 4733
7247 iwl3945_post_associate(priv); 4734 iwl3945_post_associate(priv);
7248 4735
@@ -7256,7 +4743,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7256 * 4743 *
7257 *****************************************************************************/ 4744 *****************************************************************************/
7258 4745
7259#ifdef CONFIG_IWL3945_DEBUG 4746#ifdef CONFIG_IWLWIFI_DEBUG
7260 4747
7261/* 4748/*
7262 * The following adds a new attribute to the sysfs representation 4749 * The following adds a new attribute to the sysfs representation
@@ -7265,38 +4752,41 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7265 * 4752 *
7266 * See the level definitions in iwl for details. 4753 * See the level definitions in iwl for details.
7267 */ 4754 */
7268 4755static ssize_t show_debug_level(struct device *d,
7269static ssize_t show_debug_level(struct device_driver *d, char *buf) 4756 struct device_attribute *attr, char *buf)
7270{ 4757{
7271 return sprintf(buf, "0x%08X\n", iwl3945_debug_level); 4758 struct iwl_priv *priv = d->driver_data;
4759
4760 return sprintf(buf, "0x%08X\n", priv->debug_level);
7272} 4761}
7273static ssize_t store_debug_level(struct device_driver *d, 4762static ssize_t store_debug_level(struct device *d,
4763 struct device_attribute *attr,
7274 const char *buf, size_t count) 4764 const char *buf, size_t count)
7275{ 4765{
7276 char *p = (char *)buf; 4766 struct iwl_priv *priv = d->driver_data;
7277 u32 val; 4767 unsigned long val;
4768 int ret;
7278 4769
7279 val = simple_strtoul(p, &p, 0); 4770 ret = strict_strtoul(buf, 0, &val);
7280 if (p == buf) 4771 if (ret)
7281 printk(KERN_INFO DRV_NAME 4772 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
7282 ": %s is not in hex or decimal form.\n", buf);
7283 else 4773 else
7284 iwl3945_debug_level = val; 4774 priv->debug_level = val;
7285 4775
7286 return strnlen(buf, count); 4776 return strnlen(buf, count);
7287} 4777}
7288 4778
7289static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 4779static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
7290 show_debug_level, store_debug_level); 4780 show_debug_level, store_debug_level);
7291 4781
7292#endif /* CONFIG_IWL3945_DEBUG */ 4782#endif /* CONFIG_IWLWIFI_DEBUG */
7293 4783
7294static ssize_t show_temperature(struct device *d, 4784static ssize_t show_temperature(struct device *d,
7295 struct device_attribute *attr, char *buf) 4785 struct device_attribute *attr, char *buf)
7296{ 4786{
7297 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4787 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7298 4788
7299 if (!iwl3945_is_alive(priv)) 4789 if (!iwl_is_alive(priv))
7300 return -EAGAIN; 4790 return -EAGAIN;
7301 4791
7302 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); 4792 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
@@ -7307,22 +4797,21 @@ static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7307static ssize_t show_tx_power(struct device *d, 4797static ssize_t show_tx_power(struct device *d,
7308 struct device_attribute *attr, char *buf) 4798 struct device_attribute *attr, char *buf)
7309{ 4799{
7310 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4800 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7311 return sprintf(buf, "%d\n", priv->user_txpower_limit); 4801 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
7312} 4802}
7313 4803
7314static ssize_t store_tx_power(struct device *d, 4804static ssize_t store_tx_power(struct device *d,
7315 struct device_attribute *attr, 4805 struct device_attribute *attr,
7316 const char *buf, size_t count) 4806 const char *buf, size_t count)
7317{ 4807{
7318 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4808 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7319 char *p = (char *)buf; 4809 char *p = (char *)buf;
7320 u32 val; 4810 u32 val;
7321 4811
7322 val = simple_strtoul(p, &p, 10); 4812 val = simple_strtoul(p, &p, 10);
7323 if (p == buf) 4813 if (p == buf)
7324 printk(KERN_INFO DRV_NAME 4814 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
7325 ": %s is not in decimal form.\n", buf);
7326 else 4815 else
7327 iwl3945_hw_reg_set_txpower(priv, val); 4816 iwl3945_hw_reg_set_txpower(priv, val);
7328 4817
@@ -7334,7 +4823,7 @@ static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
7334static ssize_t show_flags(struct device *d, 4823static ssize_t show_flags(struct device *d,
7335 struct device_attribute *attr, char *buf) 4824 struct device_attribute *attr, char *buf)
7336{ 4825{
7337 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4826 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7338 4827
7339 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); 4828 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
7340} 4829}
@@ -7343,16 +4832,16 @@ static ssize_t store_flags(struct device *d,
7343 struct device_attribute *attr, 4832 struct device_attribute *attr,
7344 const char *buf, size_t count) 4833 const char *buf, size_t count)
7345{ 4834{
7346 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4835 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7347 u32 flags = simple_strtoul(buf, NULL, 0); 4836 u32 flags = simple_strtoul(buf, NULL, 0);
7348 4837
7349 mutex_lock(&priv->mutex); 4838 mutex_lock(&priv->mutex);
7350 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 4839 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7351 /* Cancel any currently running scans... */ 4840 /* Cancel any currently running scans... */
7352 if (iwl3945_scan_cancel_timeout(priv, 100)) 4841 if (iwl_scan_cancel_timeout(priv, 100))
7353 IWL_WARNING("Could not cancel scan.\n"); 4842 IWL_WARN(priv, "Could not cancel scan.\n");
7354 else { 4843 else {
7355 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", 4844 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
7356 flags); 4845 flags);
7357 priv->staging_rxon.flags = cpu_to_le32(flags); 4846 priv->staging_rxon.flags = cpu_to_le32(flags);
7358 iwl3945_commit_rxon(priv); 4847 iwl3945_commit_rxon(priv);
@@ -7368,7 +4857,7 @@ static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
7368static ssize_t show_filter_flags(struct device *d, 4857static ssize_t show_filter_flags(struct device *d,
7369 struct device_attribute *attr, char *buf) 4858 struct device_attribute *attr, char *buf)
7370{ 4859{
7371 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4860 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7372 4861
7373 return sprintf(buf, "0x%04X\n", 4862 return sprintf(buf, "0x%04X\n",
7374 le32_to_cpu(priv->active_rxon.filter_flags)); 4863 le32_to_cpu(priv->active_rxon.filter_flags));
@@ -7378,16 +4867,16 @@ static ssize_t store_filter_flags(struct device *d,
7378 struct device_attribute *attr, 4867 struct device_attribute *attr,
7379 const char *buf, size_t count) 4868 const char *buf, size_t count)
7380{ 4869{
7381 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 4870 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7382 u32 filter_flags = simple_strtoul(buf, NULL, 0); 4871 u32 filter_flags = simple_strtoul(buf, NULL, 0);
7383 4872
7384 mutex_lock(&priv->mutex); 4873 mutex_lock(&priv->mutex);
7385 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 4874 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7386 /* Cancel any currently running scans... */ 4875 /* Cancel any currently running scans... */
7387 if (iwl3945_scan_cancel_timeout(priv, 100)) 4876 if (iwl_scan_cancel_timeout(priv, 100))
7388 IWL_WARNING("Could not cancel scan.\n"); 4877 IWL_WARN(priv, "Could not cancel scan.\n");
7389 else { 4878 else {
7390 IWL_DEBUG_INFO("Committing rxon.filter_flags = " 4879 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
7391 "0x%04X\n", filter_flags); 4880 "0x%04X\n", filter_flags);
7392 priv->staging_rxon.filter_flags = 4881 priv->staging_rxon.filter_flags =
7393 cpu_to_le32(filter_flags); 4882 cpu_to_le32(filter_flags);
@@ -7407,8 +4896,8 @@ static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7407static ssize_t show_measurement(struct device *d, 4896static ssize_t show_measurement(struct device *d,
7408 struct device_attribute *attr, char *buf) 4897 struct device_attribute *attr, char *buf)
7409{ 4898{
7410 struct iwl3945_priv *priv = dev_get_drvdata(d); 4899 struct iwl_priv *priv = dev_get_drvdata(d);
7411 struct iwl3945_spectrum_notification measure_report; 4900 struct iwl_spectrum_notification measure_report;
7412 u32 size = sizeof(measure_report), len = 0, ofs = 0; 4901 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7413 u8 *data = (u8 *)&measure_report; 4902 u8 *data = (u8 *)&measure_report;
7414 unsigned long flags; 4903 unsigned long flags;
@@ -7440,7 +4929,7 @@ static ssize_t store_measurement(struct device *d,
7440 struct device_attribute *attr, 4929 struct device_attribute *attr,
7441 const char *buf, size_t count) 4930 const char *buf, size_t count)
7442{ 4931{
7443 struct iwl3945_priv *priv = dev_get_drvdata(d); 4932 struct iwl_priv *priv = dev_get_drvdata(d);
7444 struct ieee80211_measurement_params params = { 4933 struct ieee80211_measurement_params params = {
7445 .channel = le16_to_cpu(priv->active_rxon.channel), 4934 .channel = le16_to_cpu(priv->active_rxon.channel),
7446 .start_time = cpu_to_le64(priv->last_tsf), 4935 .start_time = cpu_to_le64(priv->last_tsf),
@@ -7464,7 +4953,7 @@ static ssize_t store_measurement(struct device *d,
7464 type = simple_strtoul(p + 1, NULL, 0); 4953 type = simple_strtoul(p + 1, NULL, 0);
7465 } 4954 }
7466 4955
7467 IWL_DEBUG_INFO("Invoking measurement of type %d on " 4956 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
7468 "channel %d (for '%s')\n", type, params.channel, buf); 4957 "channel %d (for '%s')\n", type, params.channel, buf);
7469 iwl3945_get_measurement(priv, &params, type); 4958 iwl3945_get_measurement(priv, &params, type);
7470 4959
@@ -7479,7 +4968,7 @@ static ssize_t store_retry_rate(struct device *d,
7479 struct device_attribute *attr, 4968 struct device_attribute *attr,
7480 const char *buf, size_t count) 4969 const char *buf, size_t count)
7481{ 4970{
7482 struct iwl3945_priv *priv = dev_get_drvdata(d); 4971 struct iwl_priv *priv = dev_get_drvdata(d);
7483 4972
7484 priv->retry_rate = simple_strtoul(buf, NULL, 0); 4973 priv->retry_rate = simple_strtoul(buf, NULL, 0);
7485 if (priv->retry_rate <= 0) 4974 if (priv->retry_rate <= 0)
@@ -7491,50 +4980,77 @@ static ssize_t store_retry_rate(struct device *d,
7491static ssize_t show_retry_rate(struct device *d, 4980static ssize_t show_retry_rate(struct device *d,
7492 struct device_attribute *attr, char *buf) 4981 struct device_attribute *attr, char *buf)
7493{ 4982{
7494 struct iwl3945_priv *priv = dev_get_drvdata(d); 4983 struct iwl_priv *priv = dev_get_drvdata(d);
7495 return sprintf(buf, "%d", priv->retry_rate); 4984 return sprintf(buf, "%d", priv->retry_rate);
7496} 4985}
7497 4986
7498static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, 4987static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
7499 store_retry_rate); 4988 store_retry_rate);
7500 4989
4990
7501static ssize_t store_power_level(struct device *d, 4991static ssize_t store_power_level(struct device *d,
7502 struct device_attribute *attr, 4992 struct device_attribute *attr,
7503 const char *buf, size_t count) 4993 const char *buf, size_t count)
7504{ 4994{
7505 struct iwl3945_priv *priv = dev_get_drvdata(d); 4995 struct iwl_priv *priv = dev_get_drvdata(d);
7506 int rc; 4996 int ret;
7507 int mode; 4997 unsigned long mode;
4998
7508 4999
7509 mode = simple_strtoul(buf, NULL, 0);
7510 mutex_lock(&priv->mutex); 5000 mutex_lock(&priv->mutex);
7511 5001
7512 if (!iwl3945_is_ready(priv)) { 5002 if (!iwl_is_ready(priv)) {
7513 rc = -EAGAIN; 5003 ret = -EAGAIN;
7514 goto out; 5004 goto out;
7515 } 5005 }
7516 5006
7517 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) 5007 ret = strict_strtoul(buf, 10, &mode);
7518 mode = IWL_POWER_AC; 5008 if (ret)
7519 else 5009 goto out;
7520 mode |= IWL_POWER_ENABLED;
7521 5010
7522 if (mode != priv->power_mode) { 5011 ret = iwl_power_set_user_mode(priv, mode);
7523 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode)); 5012 if (ret) {
7524 if (rc) { 5013 IWL_DEBUG_MAC80211(priv, "failed setting power mode.\n");
7525 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 5014 goto out;
7526 goto out;
7527 }
7528 priv->power_mode = mode;
7529 } 5015 }
7530 5016 ret = count;
7531 rc = count;
7532 5017
7533 out: 5018 out:
7534 mutex_unlock(&priv->mutex); 5019 mutex_unlock(&priv->mutex);
7535 return rc; 5020 return ret;
5021}
5022
5023static ssize_t show_power_level(struct device *d,
5024 struct device_attribute *attr, char *buf)
5025{
5026 struct iwl_priv *priv = dev_get_drvdata(d);
5027 int mode = priv->power_data.user_power_setting;
5028 int system = priv->power_data.system_power_setting;
5029 int level = priv->power_data.power_mode;
5030 char *p = buf;
5031
5032 switch (system) {
5033 case IWL_POWER_SYS_AUTO:
5034 p += sprintf(p, "SYSTEM:auto");
5035 break;
5036 case IWL_POWER_SYS_AC:
5037 p += sprintf(p, "SYSTEM:ac");
5038 break;
5039 case IWL_POWER_SYS_BATTERY:
5040 p += sprintf(p, "SYSTEM:battery");
5041 break;
5042 }
5043
5044 p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
5045 "fixed" : "auto");
5046 p += sprintf(p, "\tINDEX:%d", level);
5047 p += sprintf(p, "\n");
5048 return p - buf + 1;
7536} 5049}
7537 5050
5051static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR,
5052 show_power_level, store_power_level);
5053
7538#define MAX_WX_STRING 80 5054#define MAX_WX_STRING 80
7539 5055
7540/* Values are in microsecond */ 5056/* Values are in microsecond */
@@ -7553,41 +5069,6 @@ static const s32 period_duration[] = {
7553 1000000 5069 1000000
7554}; 5070};
7555 5071
7556static ssize_t show_power_level(struct device *d,
7557 struct device_attribute *attr, char *buf)
7558{
7559 struct iwl3945_priv *priv = dev_get_drvdata(d);
7560 int level = IWL_POWER_LEVEL(priv->power_mode);
7561 char *p = buf;
7562
7563 p += sprintf(p, "%d ", level);
7564 switch (level) {
7565 case IWL_POWER_MODE_CAM:
7566 case IWL_POWER_AC:
7567 p += sprintf(p, "(AC)");
7568 break;
7569 case IWL_POWER_BATTERY:
7570 p += sprintf(p, "(BATTERY)");
7571 break;
7572 default:
7573 p += sprintf(p,
7574 "(Timeout %dms, Period %dms)",
7575 timeout_duration[level - 1] / 1000,
7576 period_duration[level - 1] / 1000);
7577 }
7578
7579 if (!(priv->power_mode & IWL_POWER_ENABLED))
7580 p += sprintf(p, " OFF\n");
7581 else
7582 p += sprintf(p, " \n");
7583
7584 return p - buf + 1;
7585
7586}
7587
7588static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7589 store_power_level);
7590
7591static ssize_t show_channels(struct device *d, 5072static ssize_t show_channels(struct device *d,
7592 struct device_attribute *attr, char *buf) 5073 struct device_attribute *attr, char *buf)
7593{ 5074{
@@ -7600,17 +5081,17 @@ static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
7600static ssize_t show_statistics(struct device *d, 5081static ssize_t show_statistics(struct device *d,
7601 struct device_attribute *attr, char *buf) 5082 struct device_attribute *attr, char *buf)
7602{ 5083{
7603 struct iwl3945_priv *priv = dev_get_drvdata(d); 5084 struct iwl_priv *priv = dev_get_drvdata(d);
7604 u32 size = sizeof(struct iwl3945_notif_statistics); 5085 u32 size = sizeof(struct iwl3945_notif_statistics);
7605 u32 len = 0, ofs = 0; 5086 u32 len = 0, ofs = 0;
7606 u8 *data = (u8 *)&priv->statistics; 5087 u8 *data = (u8 *)&priv->statistics_39;
7607 int rc = 0; 5088 int rc = 0;
7608 5089
7609 if (!iwl3945_is_alive(priv)) 5090 if (!iwl_is_alive(priv))
7610 return -EAGAIN; 5091 return -EAGAIN;
7611 5092
7612 mutex_lock(&priv->mutex); 5093 mutex_lock(&priv->mutex);
7613 rc = iwl3945_send_statistics_request(priv); 5094 rc = iwl_send_statistics_request(priv, 0);
7614 mutex_unlock(&priv->mutex); 5095 mutex_unlock(&priv->mutex);
7615 5096
7616 if (rc) { 5097 if (rc) {
@@ -7638,34 +5119,34 @@ static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7638static ssize_t show_antenna(struct device *d, 5119static ssize_t show_antenna(struct device *d,
7639 struct device_attribute *attr, char *buf) 5120 struct device_attribute *attr, char *buf)
7640{ 5121{
7641 struct iwl3945_priv *priv = dev_get_drvdata(d); 5122 struct iwl_priv *priv = dev_get_drvdata(d);
7642 5123
7643 if (!iwl3945_is_alive(priv)) 5124 if (!iwl_is_alive(priv))
7644 return -EAGAIN; 5125 return -EAGAIN;
7645 5126
7646 return sprintf(buf, "%d\n", priv->antenna); 5127 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
7647} 5128}
7648 5129
7649static ssize_t store_antenna(struct device *d, 5130static ssize_t store_antenna(struct device *d,
7650 struct device_attribute *attr, 5131 struct device_attribute *attr,
7651 const char *buf, size_t count) 5132 const char *buf, size_t count)
7652{ 5133{
5134 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
7653 int ant; 5135 int ant;
7654 struct iwl3945_priv *priv = dev_get_drvdata(d);
7655 5136
7656 if (count == 0) 5137 if (count == 0)
7657 return 0; 5138 return 0;
7658 5139
7659 if (sscanf(buf, "%1i", &ant) != 1) { 5140 if (sscanf(buf, "%1i", &ant) != 1) {
7660 IWL_DEBUG_INFO("not in hex or decimal form.\n"); 5141 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
7661 return count; 5142 return count;
7662 } 5143 }
7663 5144
7664 if ((ant >= 0) && (ant <= 2)) { 5145 if ((ant >= 0) && (ant <= 2)) {
7665 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); 5146 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
7666 priv->antenna = (enum iwl3945_antenna)ant; 5147 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
7667 } else 5148 } else
7668 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); 5149 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
7669 5150
7670 5151
7671 return count; 5152 return count;
@@ -7676,8 +5157,8 @@ static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7676static ssize_t show_status(struct device *d, 5157static ssize_t show_status(struct device *d,
7677 struct device_attribute *attr, char *buf) 5158 struct device_attribute *attr, char *buf)
7678{ 5159{
7679 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data; 5160 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7680 if (!iwl3945_is_alive(priv)) 5161 if (!iwl_is_alive(priv))
7681 return -EAGAIN; 5162 return -EAGAIN;
7682 return sprintf(buf, "0x%08x\n", (int)priv->status); 5163 return sprintf(buf, "0x%08x\n", (int)priv->status);
7683} 5164}
@@ -7691,7 +5172,7 @@ static ssize_t dump_error_log(struct device *d,
7691 char *p = (char *)buf; 5172 char *p = (char *)buf;
7692 5173
7693 if (p[0] == '1') 5174 if (p[0] == '1')
7694 iwl3945_dump_nic_error_log((struct iwl3945_priv *)d->driver_data); 5175 iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
7695 5176
7696 return strnlen(buf, count); 5177 return strnlen(buf, count);
7697} 5178}
@@ -7705,7 +5186,7 @@ static ssize_t dump_event_log(struct device *d,
7705 char *p = (char *)buf; 5186 char *p = (char *)buf;
7706 5187
7707 if (p[0] == '1') 5188 if (p[0] == '1')
7708 iwl3945_dump_nic_event_log((struct iwl3945_priv *)d->driver_data); 5189 iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
7709 5190
7710 return strnlen(buf, count); 5191 return strnlen(buf, count);
7711} 5192}
@@ -7718,7 +5199,7 @@ static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7718 * 5199 *
7719 *****************************************************************************/ 5200 *****************************************************************************/
7720 5201
7721static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv) 5202static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
7722{ 5203{
7723 priv->workqueue = create_workqueue(DRV_NAME); 5204 priv->workqueue = create_workqueue(DRV_NAME);
7724 5205
@@ -7727,14 +5208,15 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7727 INIT_WORK(&priv->up, iwl3945_bg_up); 5208 INIT_WORK(&priv->up, iwl3945_bg_up);
7728 INIT_WORK(&priv->restart, iwl3945_bg_restart); 5209 INIT_WORK(&priv->restart, iwl3945_bg_restart);
7729 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 5210 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
7730 INIT_WORK(&priv->scan_completed, iwl3945_bg_scan_completed); 5211 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
7731 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
7732 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7733 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7734 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 5212 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7735 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 5213 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7736 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 5214 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7737 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check); 5215 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
5216 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
5217 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
5218 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
5219 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
7738 5220
7739 iwl3945_hw_setup_deferred_work(priv); 5221 iwl3945_hw_setup_deferred_work(priv);
7740 5222
@@ -7742,7 +5224,7 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7742 iwl3945_irq_tasklet, (unsigned long)priv); 5224 iwl3945_irq_tasklet, (unsigned long)priv);
7743} 5225}
7744 5226
7745static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv) 5227static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
7746{ 5228{
7747 iwl3945_hw_cancel_deferred_work(priv); 5229 iwl3945_hw_cancel_deferred_work(priv);
7748 5230
@@ -7768,7 +5250,9 @@ static struct attribute *iwl3945_sysfs_entries[] = {
7768 &dev_attr_status.attr, 5250 &dev_attr_status.attr,
7769 &dev_attr_temperature.attr, 5251 &dev_attr_temperature.attr,
7770 &dev_attr_tx_power.attr, 5252 &dev_attr_tx_power.attr,
7771 5253#ifdef CONFIG_IWLWIFI_DEBUG
5254 &dev_attr_debug_level.attr,
5255#endif
7772 NULL 5256 NULL
7773}; 5257};
7774 5258
@@ -7785,9 +5269,8 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7785 .remove_interface = iwl3945_mac_remove_interface, 5269 .remove_interface = iwl3945_mac_remove_interface,
7786 .config = iwl3945_mac_config, 5270 .config = iwl3945_mac_config,
7787 .config_interface = iwl3945_mac_config_interface, 5271 .config_interface = iwl3945_mac_config_interface,
7788 .configure_filter = iwl3945_configure_filter, 5272 .configure_filter = iwl_configure_filter,
7789 .set_key = iwl3945_mac_set_key, 5273 .set_key = iwl3945_mac_set_key,
7790 .get_stats = iwl3945_mac_get_stats,
7791 .get_tx_stats = iwl3945_mac_get_tx_stats, 5274 .get_tx_stats = iwl3945_mac_get_tx_stats,
7792 .conf_tx = iwl3945_mac_conf_tx, 5275 .conf_tx = iwl3945_mac_conf_tx,
7793 .reset_tsf = iwl3945_mac_reset_tsf, 5276 .reset_tsf = iwl3945_mac_reset_tsf,
@@ -7795,59 +5278,136 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7795 .hw_scan = iwl3945_mac_hw_scan 5278 .hw_scan = iwl3945_mac_hw_scan
7796}; 5279};
7797 5280
5281static int iwl3945_init_drv(struct iwl_priv *priv)
5282{
5283 int ret;
5284 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
5285
5286 priv->retry_rate = 1;
5287 priv->ibss_beacon = NULL;
5288
5289 spin_lock_init(&priv->lock);
5290 spin_lock_init(&priv->power_data.lock);
5291 spin_lock_init(&priv->sta_lock);
5292 spin_lock_init(&priv->hcmd_lock);
5293
5294 INIT_LIST_HEAD(&priv->free_frames);
5295
5296 mutex_init(&priv->mutex);
5297
5298 /* Clear the driver's (not device's) station table */
5299 iwl3945_clear_stations_table(priv);
5300
5301 priv->data_retry_limit = -1;
5302 priv->ieee_channels = NULL;
5303 priv->ieee_rates = NULL;
5304 priv->band = IEEE80211_BAND_2GHZ;
5305
5306 priv->iw_mode = NL80211_IFTYPE_STATION;
5307
5308 iwl_reset_qos(priv);
5309
5310 priv->qos_data.qos_active = 0;
5311 priv->qos_data.qos_cap.val = 0;
5312
5313 priv->rates_mask = IWL_RATES_MASK;
5314 /* If power management is turned on, default to CAM mode */
5315 priv->power_mode = IWL_POWER_MODE_CAM;
5316 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
5317
5318 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
5319 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
5320 eeprom->version);
5321 ret = -EINVAL;
5322 goto err;
5323 }
5324 ret = iwl_init_channel_map(priv);
5325 if (ret) {
5326 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
5327 goto err;
5328 }
5329
5330 /* Set up txpower settings in driver for all channels */
5331 if (iwl3945_txpower_set_from_eeprom(priv)) {
5332 ret = -EIO;
5333 goto err_free_channel_map;
5334 }
5335
5336 ret = iwlcore_init_geos(priv);
5337 if (ret) {
5338 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
5339 goto err_free_channel_map;
5340 }
5341 iwl3945_init_hw_rates(priv, priv->ieee_rates);
5342
5343 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
5344 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5345 &priv->bands[IEEE80211_BAND_2GHZ];
5346 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
5347 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5348 &priv->bands[IEEE80211_BAND_5GHZ];
5349
5350 return 0;
5351
5352err_free_channel_map:
5353 iwl_free_channel_map(priv);
5354err:
5355 return ret;
5356}
5357
7798static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 5358static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7799{ 5359{
7800 int err = 0; 5360 int err = 0;
7801 struct iwl3945_priv *priv; 5361 struct iwl_priv *priv;
7802 struct ieee80211_hw *hw; 5362 struct ieee80211_hw *hw;
7803 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data); 5363 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
5364 struct iwl3945_eeprom *eeprom;
7804 unsigned long flags; 5365 unsigned long flags;
7805 5366
7806 /*********************** 5367 /***********************
7807 * 1. Allocating HW data 5368 * 1. Allocating HW data
7808 * ********************/ 5369 * ********************/
7809 5370
7810 /* Disabling hardware scan means that mac80211 will perform scans 5371 /* mac80211 allocates memory for this device instance, including
7811 * "the hard way", rather than using device's scan. */ 5372 * space for this driver's private structure */
7812 if (iwl3945_param_disable_hw_scan) { 5373 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
7813 IWL_DEBUG_INFO("Disabling hw_scan\n"); 5374 if (hw == NULL) {
7814 iwl3945_hw_ops.hw_scan = NULL; 5375 printk(KERN_ERR DRV_NAME "Can not allocate network device\n");
5376 err = -ENOMEM;
5377 goto out;
7815 } 5378 }
5379 priv = hw->priv;
5380 SET_IEEE80211_DEV(hw, &pdev->dev);
7816 5381
7817 if ((iwl3945_param_queues_num > IWL39_MAX_NUM_QUEUES) || 5382 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
7818 (iwl3945_param_queues_num < IWL_MIN_NUM_QUEUES)) { 5383 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
7819 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 5384 IWL_ERR(priv,
7820 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); 5385 "invalid queues_num, should be between %d and %d\n",
5386 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
7821 err = -EINVAL; 5387 err = -EINVAL;
7822 goto out; 5388 goto out;
7823 } 5389 }
7824 5390
7825 /* mac80211 allocates memory for this device instance, including 5391 /*
7826 * space for this driver's private structure */ 5392 * Disabling hardware scan means that mac80211 will perform scans
7827 hw = ieee80211_alloc_hw(sizeof(struct iwl3945_priv), &iwl3945_hw_ops); 5393 * "the hard way", rather than using device's scan.
7828 if (hw == NULL) { 5394 */
7829 IWL_ERROR("Can not allocate network device\n"); 5395 if (iwl3945_mod_params.disable_hw_scan) {
7830 err = -ENOMEM; 5396 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
7831 goto out; 5397 iwl3945_hw_ops.hw_scan = NULL;
7832 } 5398 }
7833 5399
7834 SET_IEEE80211_DEV(hw, &pdev->dev);
7835 5400
7836 priv = hw->priv; 5401 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
7837 priv->hw = hw;
7838 priv->pci_dev = pdev;
7839 priv->cfg = cfg; 5402 priv->cfg = cfg;
5403 priv->pci_dev = pdev;
7840 5404
7841 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); 5405#ifdef CONFIG_IWLWIFI_DEBUG
7842 hw->rate_control_algorithm = "iwl-3945-rs"; 5406 priv->debug_level = iwl3945_mod_params.debug;
7843 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7844
7845 /* Select antenna (may be helpful if only one antenna is connected) */
7846 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna;
7847#ifdef CONFIG_IWL3945_DEBUG
7848 iwl3945_debug_level = iwl3945_param_debug;
7849 atomic_set(&priv->restrict_refcnt, 0); 5407 atomic_set(&priv->restrict_refcnt, 0);
7850#endif 5408#endif
5409 hw->rate_control_algorithm = "iwl-3945-rs";
5410 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7851 5411
7852 /* Tell mac80211 our characteristics */ 5412 /* Tell mac80211 our characteristics */
7853 hw->flags = IEEE80211_HW_SIGNAL_DBM | 5413 hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -7857,7 +5417,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7857 BIT(NL80211_IFTYPE_STATION) | 5417 BIT(NL80211_IFTYPE_STATION) |
7858 BIT(NL80211_IFTYPE_ADHOC); 5418 BIT(NL80211_IFTYPE_ADHOC);
7859 5419
7860 hw->wiphy->fw_handles_regulatory = true; 5420 hw->wiphy->custom_regulatory = true;
5421
5422 hw->wiphy->max_scan_ssids = 1;
7861 5423
7862 /* 4 EDCA QOS priorities */ 5424 /* 4 EDCA QOS priorities */
7863 hw->queues = 4; 5425 hw->queues = 4;
@@ -7876,7 +5438,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7876 if (!err) 5438 if (!err)
7877 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 5439 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7878 if (err) { 5440 if (err) {
7879 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 5441 IWL_WARN(priv, "No suitable DMA available.\n");
7880 goto out_pci_disable_device; 5442 goto out_pci_disable_device;
7881 } 5443 }
7882 5444
@@ -7894,98 +5456,58 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7894 goto out_pci_release_regions; 5456 goto out_pci_release_regions;
7895 } 5457 }
7896 5458
7897 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", 5459 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
7898 (unsigned long long) pci_resource_len(pdev, 0)); 5460 (unsigned long long) pci_resource_len(pdev, 0));
7899 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 5461 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
7900 5462
7901 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5463 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7902 * PCI Tx retries from interfering with C3 CPU state */ 5464 * PCI Tx retries from interfering with C3 CPU state */
7903 pci_write_config_byte(pdev, 0x41, 0x00); 5465 pci_write_config_byte(pdev, 0x41, 0x00);
7904 5466
7905 /* nic init */ 5467 /* amp init */
7906 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 5468 err = priv->cfg->ops->lib->apm_ops.init(priv);
7907 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7908
7909 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7910 err = iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
7911 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7912 if (err < 0) { 5469 if (err < 0) {
7913 IWL_DEBUG_INFO("Failed to init the card\n"); 5470 IWL_DEBUG_INFO(priv, "Failed to init APMG\n");
7914 goto out_remove_sysfs; 5471 goto out_iounmap;
7915 } 5472 }
7916 5473
7917 /*********************** 5474 /***********************
7918 * 4. Read EEPROM 5475 * 4. Read EEPROM
7919 * ********************/ 5476 * ********************/
5477
7920 /* Read the EEPROM */ 5478 /* Read the EEPROM */
7921 err = iwl3945_eeprom_init(priv); 5479 err = iwl_eeprom_init(priv);
7922 if (err) { 5480 if (err) {
7923 IWL_ERROR("Unable to init EEPROM\n"); 5481 IWL_ERR(priv, "Unable to init EEPROM\n");
7924 goto out_remove_sysfs; 5482 goto out_remove_sysfs;
7925 } 5483 }
7926 /* MAC Address location in EEPROM same for 3945/4965 */ 5484 /* MAC Address location in EEPROM same for 3945/4965 */
7927 get_eeprom_mac(priv, priv->mac_addr); 5485 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
7928 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr); 5486 memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN);
5487 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
7929 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 5488 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
7930 5489
7931 /*********************** 5490 /***********************
7932 * 5. Setup HW Constants 5491 * 5. Setup HW Constants
7933 * ********************/ 5492 * ********************/
7934 /* Device-specific setup */ 5493 /* Device-specific setup */
7935 if (iwl3945_hw_set_hw_setting(priv)) { 5494 if (iwl3945_hw_set_hw_params(priv)) {
7936 IWL_ERROR("failed to set hw settings\n"); 5495 IWL_ERR(priv, "failed to set hw settings\n");
7937 goto out_iounmap; 5496 goto out_iounmap;
7938 } 5497 }
7939 5498
7940 /*********************** 5499 /***********************
7941 * 6. Setup priv 5500 * 6. Setup priv
7942 * ********************/ 5501 * ********************/
7943 priv->retry_rate = 1;
7944 priv->ibss_beacon = NULL;
7945
7946 spin_lock_init(&priv->lock);
7947 spin_lock_init(&priv->power_data.lock);
7948 spin_lock_init(&priv->sta_lock);
7949 spin_lock_init(&priv->hcmd_lock);
7950 5502
7951 INIT_LIST_HEAD(&priv->free_frames); 5503 err = iwl3945_init_drv(priv);
7952 mutex_init(&priv->mutex);
7953
7954 /* Clear the driver's (not device's) station table */
7955 iwl3945_clear_stations_table(priv);
7956
7957 priv->data_retry_limit = -1;
7958 priv->ieee_channels = NULL;
7959 priv->ieee_rates = NULL;
7960 priv->band = IEEE80211_BAND_2GHZ;
7961
7962 priv->iw_mode = NL80211_IFTYPE_STATION;
7963
7964 iwl3945_reset_qos(priv);
7965
7966 priv->qos_data.qos_active = 0;
7967 priv->qos_data.qos_cap.val = 0;
7968
7969
7970 priv->rates_mask = IWL_RATES_MASK;
7971 /* If power management is turned on, default to AC mode */
7972 priv->power_mode = IWL_POWER_AC;
7973 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
7974
7975 err = iwl3945_init_channel_map(priv);
7976 if (err) { 5504 if (err) {
7977 IWL_ERROR("initializing regulatory failed: %d\n", err); 5505 IWL_ERR(priv, "initializing driver failed\n");
7978 goto out_release_irq; 5506 goto out_free_geos;
7979 }
7980
7981 err = iwl3945_init_geos(priv);
7982 if (err) {
7983 IWL_ERROR("initializing geos failed: %d\n", err);
7984 goto out_free_channel_map;
7985 } 5507 }
7986 5508
7987 printk(KERN_INFO DRV_NAME 5509 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
7988 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 5510 priv->cfg->name);
7989 5511
7990 /*********************************** 5512 /***********************************
7991 * 7. Initialize Module Parameters 5513 * 7. Initialize Module Parameters
@@ -7993,9 +5515,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7993 5515
7994 /* Initialize module parameter values here */ 5516 /* Initialize module parameter values here */
7995 /* Disable radio (SW RF KILL) via parameter when loading driver */ 5517 /* Disable radio (SW RF KILL) via parameter when loading driver */
7996 if (iwl3945_param_disable) { 5518 if (iwl3945_mod_params.disable) {
7997 set_bit(STATUS_RF_KILL_SW, &priv->status); 5519 set_bit(STATUS_RF_KILL_SW, &priv->status);
7998 IWL_DEBUG_INFO("Radio disabled.\n"); 5520 IWL_DEBUG_INFO(priv, "Radio disabled.\n");
7999 } 5521 }
8000 5522
8001 5523
@@ -8007,56 +5529,62 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8007 iwl3945_disable_interrupts(priv); 5529 iwl3945_disable_interrupts(priv);
8008 spin_unlock_irqrestore(&priv->lock, flags); 5530 spin_unlock_irqrestore(&priv->lock, flags);
8009 5531
5532 pci_enable_msi(priv->pci_dev);
5533
5534 err = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
5535 DRV_NAME, priv);
5536 if (err) {
5537 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
5538 goto out_disable_msi;
5539 }
5540
8010 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); 5541 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8011 if (err) { 5542 if (err) {
8012 IWL_ERROR("failed to create sysfs device attributes\n"); 5543 IWL_ERR(priv, "failed to create sysfs device attributes\n");
8013 goto out_free_geos; 5544 goto out_release_irq;
8014 } 5545 }
8015 5546
8016 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6); 5547 iwl_set_rxon_channel(priv,
5548 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
8017 iwl3945_setup_deferred_work(priv); 5549 iwl3945_setup_deferred_work(priv);
8018 iwl3945_setup_rx_handlers(priv); 5550 iwl3945_setup_rx_handlers(priv);
8019 5551
8020 /***********************
8021 * 9. Conclude
8022 * ********************/
8023 pci_save_state(pdev);
8024 pci_disable_device(pdev);
8025
8026 /********************************* 5552 /*********************************
8027 * 10. Setup and Register mac80211 5553 * 9. Setup and Register mac80211
8028 * *******************************/ 5554 * *******************************/
8029 5555
8030 err = ieee80211_register_hw(priv->hw); 5556 err = ieee80211_register_hw(priv->hw);
8031 if (err) { 5557 if (err) {
8032 IWL_ERROR("Failed to register network device (error %d)\n", err); 5558 IWL_ERR(priv, "Failed to register network device: %d\n", err);
8033 goto out_remove_sysfs; 5559 goto out_remove_sysfs;
8034 } 5560 }
8035 5561
8036 priv->hw->conf.beacon_int = 100; 5562 priv->hw->conf.beacon_int = 100;
8037 priv->mac80211_registered = 1; 5563 priv->mac80211_registered = 1;
8038 5564
8039 5565 err = iwl_rfkill_init(priv);
8040 err = iwl3945_rfkill_init(priv);
8041 if (err) 5566 if (err)
8042 IWL_ERROR("Unable to initialize RFKILL system. " 5567 IWL_ERR(priv, "Unable to initialize RFKILL system. "
8043 "Ignoring error: %d\n", err); 5568 "Ignoring error: %d\n", err);
8044 5569
5570 /* Start monitoring the killswitch */
5571 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5572 2 * HZ);
5573
8045 return 0; 5574 return 0;
8046 5575
8047 out_remove_sysfs: 5576 out_remove_sysfs:
8048 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 5577 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8049 out_free_geos: 5578 out_free_geos:
8050 iwl3945_free_geos(priv); 5579 iwlcore_free_geos(priv);
8051 out_free_channel_map:
8052 iwl3945_free_channel_map(priv);
8053
8054 5580
8055 out_release_irq: 5581 out_release_irq:
5582 free_irq(priv->pci_dev->irq, priv);
8056 destroy_workqueue(priv->workqueue); 5583 destroy_workqueue(priv->workqueue);
8057 priv->workqueue = NULL; 5584 priv->workqueue = NULL;
8058 iwl3945_unset_hw_setting(priv); 5585 iwl3945_unset_hw_params(priv);
8059 5586 out_disable_msi:
5587 pci_disable_msi(priv->pci_dev);
8060 out_iounmap: 5588 out_iounmap:
8061 pci_iounmap(pdev, priv->hw_base); 5589 pci_iounmap(pdev, priv->hw_base);
8062 out_pci_release_regions: 5590 out_pci_release_regions:
@@ -8072,17 +5600,22 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8072 5600
8073static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) 5601static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8074{ 5602{
8075 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 5603 struct iwl_priv *priv = pci_get_drvdata(pdev);
8076 unsigned long flags; 5604 unsigned long flags;
8077 5605
8078 if (!priv) 5606 if (!priv)
8079 return; 5607 return;
8080 5608
8081 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 5609 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
8082 5610
8083 set_bit(STATUS_EXIT_PENDING, &priv->status); 5611 set_bit(STATUS_EXIT_PENDING, &priv->status);
8084 5612
8085 iwl3945_down(priv); 5613 if (priv->mac80211_registered) {
5614 ieee80211_unregister_hw(priv->hw);
5615 priv->mac80211_registered = 0;
5616 } else {
5617 iwl3945_down(priv);
5618 }
8086 5619
8087 /* make sure we flush any pending irq or 5620 /* make sure we flush any pending irq or
8088 * tasklet for the driver 5621 * tasklet for the driver
@@ -8095,19 +5628,18 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8095 5628
8096 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 5629 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8097 5630
8098 iwl3945_rfkill_unregister(priv); 5631 iwl_rfkill_unregister(priv);
5632 cancel_delayed_work(&priv->rfkill_poll);
5633
8099 iwl3945_dealloc_ucode_pci(priv); 5634 iwl3945_dealloc_ucode_pci(priv);
8100 5635
8101 if (priv->rxq.bd) 5636 if (priv->rxq.bd)
8102 iwl3945_rx_queue_free(priv, &priv->rxq); 5637 iwl_rx_queue_free(priv, &priv->rxq);
8103 iwl3945_hw_txq_ctx_free(priv); 5638 iwl3945_hw_txq_ctx_free(priv);
8104 5639
8105 iwl3945_unset_hw_setting(priv); 5640 iwl3945_unset_hw_params(priv);
8106 iwl3945_clear_stations_table(priv); 5641 iwl3945_clear_stations_table(priv);
8107 5642
8108 if (priv->mac80211_registered)
8109 ieee80211_unregister_hw(priv->hw);
8110
8111 /*netif_stop_queue(dev); */ 5643 /*netif_stop_queue(dev); */
8112 flush_workqueue(priv->workqueue); 5644 flush_workqueue(priv->workqueue);
8113 5645
@@ -8117,13 +5649,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8117 destroy_workqueue(priv->workqueue); 5649 destroy_workqueue(priv->workqueue);
8118 priv->workqueue = NULL; 5650 priv->workqueue = NULL;
8119 5651
5652 free_irq(pdev->irq, priv);
5653 pci_disable_msi(pdev);
5654
8120 pci_iounmap(pdev, priv->hw_base); 5655 pci_iounmap(pdev, priv->hw_base);
8121 pci_release_regions(pdev); 5656 pci_release_regions(pdev);
8122 pci_disable_device(pdev); 5657 pci_disable_device(pdev);
8123 pci_set_drvdata(pdev, NULL); 5658 pci_set_drvdata(pdev, NULL);
8124 5659
8125 iwl3945_free_channel_map(priv); 5660 iwl_free_channel_map(priv);
8126 iwl3945_free_geos(priv); 5661 iwlcore_free_geos(priv);
8127 kfree(priv->scan); 5662 kfree(priv->scan);
8128 if (priv->ibss_beacon) 5663 if (priv->ibss_beacon)
8129 dev_kfree_skb(priv->ibss_beacon); 5664 dev_kfree_skb(priv->ibss_beacon);
@@ -8135,27 +5670,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8135 5670
8136static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state) 5671static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8137{ 5672{
8138 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 5673 struct iwl_priv *priv = pci_get_drvdata(pdev);
8139 5674
8140 if (priv->is_open) { 5675 if (priv->is_open) {
8141 set_bit(STATUS_IN_SUSPEND, &priv->status); 5676 set_bit(STATUS_IN_SUSPEND, &priv->status);
8142 iwl3945_mac_stop(priv->hw); 5677 iwl3945_mac_stop(priv->hw);
8143 priv->is_open = 1; 5678 priv->is_open = 1;
8144 } 5679 }
8145 5680 pci_save_state(pdev);
8146 /* pci driver assumes state will be saved in this function. 5681 pci_disable_device(pdev);
8147 * pci state is saved and device disabled when interface is
8148 * stopped, so at this time pci device will always be disabled -
8149 * whether interface was started or not. saving pci state now will
8150 * cause saved state be that of a disabled device, which will cause
8151 * problems during resume in that we will end up with a disabled device.
8152 *
8153 * indicate that the current saved state (from when interface was
8154 * stopped) is valid. if interface was never up at time of suspend
8155 * then the saved state will still be valid as it was saved during
8156 * .probe. */
8157 pdev->state_saved = true;
8158
8159 pci_set_power_state(pdev, PCI_D3hot); 5682 pci_set_power_state(pdev, PCI_D3hot);
8160 5683
8161 return 0; 5684 return 0;
@@ -8163,9 +5686,14 @@ static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8163 5686
8164static int iwl3945_pci_resume(struct pci_dev *pdev) 5687static int iwl3945_pci_resume(struct pci_dev *pdev)
8165{ 5688{
8166 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 5689 struct iwl_priv *priv = pci_get_drvdata(pdev);
5690 int ret;
8167 5691
8168 pci_set_power_state(pdev, PCI_D0); 5692 pci_set_power_state(pdev, PCI_D0);
5693 ret = pci_enable_device(pdev);
5694 if (ret)
5695 return ret;
5696 pci_restore_state(pdev);
8169 5697
8170 if (priv->is_open) 5698 if (priv->is_open)
8171 iwl3945_mac_start(priv->hw); 5699 iwl3945_mac_start(priv->hw);
@@ -8176,114 +5704,6 @@ static int iwl3945_pci_resume(struct pci_dev *pdev)
8176 5704
8177#endif /* CONFIG_PM */ 5705#endif /* CONFIG_PM */
8178 5706
8179/*************** RFKILL FUNCTIONS **********/
8180#ifdef CONFIG_IWL3945_RFKILL
8181/* software rf-kill from user */
8182static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
8183{
8184 struct iwl3945_priv *priv = data;
8185 int err = 0;
8186
8187 if (!priv->rfkill)
8188 return 0;
8189
8190 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
8191 return 0;
8192
8193 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state);
8194 mutex_lock(&priv->mutex);
8195
8196 switch (state) {
8197 case RFKILL_STATE_UNBLOCKED:
8198 if (iwl3945_is_rfkill_hw(priv)) {
8199 err = -EBUSY;
8200 goto out_unlock;
8201 }
8202 iwl3945_radio_kill_sw(priv, 0);
8203 break;
8204 case RFKILL_STATE_SOFT_BLOCKED:
8205 iwl3945_radio_kill_sw(priv, 1);
8206 break;
8207 default:
8208 IWL_WARNING("we received unexpected RFKILL state %d\n", state);
8209 break;
8210 }
8211out_unlock:
8212 mutex_unlock(&priv->mutex);
8213
8214 return err;
8215}
8216
8217int iwl3945_rfkill_init(struct iwl3945_priv *priv)
8218{
8219 struct device *device = wiphy_dev(priv->hw->wiphy);
8220 int ret = 0;
8221
8222 BUG_ON(device == NULL);
8223
8224 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
8225 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
8226 if (!priv->rfkill) {
8227 IWL_ERROR("Unable to allocate rfkill device.\n");
8228 ret = -ENOMEM;
8229 goto error;
8230 }
8231
8232 priv->rfkill->name = priv->cfg->name;
8233 priv->rfkill->data = priv;
8234 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
8235 priv->rfkill->toggle_radio = iwl3945_rfkill_soft_rf_kill;
8236 priv->rfkill->user_claim_unsupported = 1;
8237
8238 priv->rfkill->dev.class->suspend = NULL;
8239 priv->rfkill->dev.class->resume = NULL;
8240
8241 ret = rfkill_register(priv->rfkill);
8242 if (ret) {
8243 IWL_ERROR("Unable to register rfkill: %d\n", ret);
8244 goto freed_rfkill;
8245 }
8246
8247 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8248 return ret;
8249
8250freed_rfkill:
8251 if (priv->rfkill != NULL)
8252 rfkill_free(priv->rfkill);
8253 priv->rfkill = NULL;
8254
8255error:
8256 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8257 return ret;
8258}
8259
8260void iwl3945_rfkill_unregister(struct iwl3945_priv *priv)
8261{
8262 if (priv->rfkill)
8263 rfkill_unregister(priv->rfkill);
8264
8265 priv->rfkill = NULL;
8266}
8267
8268/* set rf-kill to the right state. */
8269void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv)
8270{
8271
8272 if (!priv->rfkill)
8273 return;
8274
8275 if (iwl3945_is_rfkill_hw(priv)) {
8276 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
8277 return;
8278 }
8279
8280 if (!iwl3945_is_rfkill_sw(priv))
8281 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
8282 else
8283 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
8284}
8285#endif
8286
8287/***************************************************************************** 5707/*****************************************************************************
8288 * 5708 *
8289 * driver and module entry point 5709 * driver and module entry point
@@ -8310,29 +5730,19 @@ static int __init iwl3945_init(void)
8310 5730
8311 ret = iwl3945_rate_control_register(); 5731 ret = iwl3945_rate_control_register();
8312 if (ret) { 5732 if (ret) {
8313 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret); 5733 printk(KERN_ERR DRV_NAME
5734 "Unable to register rate control algorithm: %d\n", ret);
8314 return ret; 5735 return ret;
8315 } 5736 }
8316 5737
8317 ret = pci_register_driver(&iwl3945_driver); 5738 ret = pci_register_driver(&iwl3945_driver);
8318 if (ret) { 5739 if (ret) {
8319 IWL_ERROR("Unable to initialize PCI module\n"); 5740 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
8320 goto error_register; 5741 goto error_register;
8321 } 5742 }
8322#ifdef CONFIG_IWL3945_DEBUG
8323 ret = driver_create_file(&iwl3945_driver.driver, &driver_attr_debug_level);
8324 if (ret) {
8325 IWL_ERROR("Unable to create driver sysfs file\n");
8326 goto error_debug;
8327 }
8328#endif
8329 5743
8330 return ret; 5744 return ret;
8331 5745
8332#ifdef CONFIG_IWL3945_DEBUG
8333error_debug:
8334 pci_unregister_driver(&iwl3945_driver);
8335#endif
8336error_register: 5746error_register:
8337 iwl3945_rate_control_unregister(); 5747 iwl3945_rate_control_unregister();
8338 return ret; 5748 return ret;
@@ -8340,29 +5750,29 @@ error_register:
8340 5750
8341static void __exit iwl3945_exit(void) 5751static void __exit iwl3945_exit(void)
8342{ 5752{
8343#ifdef CONFIG_IWL3945_DEBUG
8344 driver_remove_file(&iwl3945_driver.driver, &driver_attr_debug_level);
8345#endif
8346 pci_unregister_driver(&iwl3945_driver); 5753 pci_unregister_driver(&iwl3945_driver);
8347 iwl3945_rate_control_unregister(); 5754 iwl3945_rate_control_unregister();
8348} 5755}
8349 5756
8350MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); 5757MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
8351 5758
8352module_param_named(antenna, iwl3945_param_antenna, int, 0444); 5759module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
8353MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 5760MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
8354module_param_named(disable, iwl3945_param_disable, int, 0444); 5761module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
8355MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 5762MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8356module_param_named(hwcrypto, iwl3945_param_hwcrypto, int, 0444); 5763module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
8357MODULE_PARM_DESC(hwcrypto, 5764MODULE_PARM_DESC(swcrypto,
8358 "using hardware crypto engine (default 0 [software])\n"); 5765 "using software crypto (default 1 [software])\n");
8359module_param_named(debug, iwl3945_param_debug, uint, 0444); 5766module_param_named(debug, iwl3945_mod_params.debug, uint, 0444);
8360MODULE_PARM_DESC(debug, "debug output mask"); 5767MODULE_PARM_DESC(debug, "debug output mask");
8361module_param_named(disable_hw_scan, iwl3945_param_disable_hw_scan, int, 0444); 5768module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
8362MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 5769MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
8363 5770
8364module_param_named(queues_num, iwl3945_param_queues_num, int, 0444); 5771module_param_named(queues_num, iwl3945_mod_params.num_of_queues, int, 0444);
8365MODULE_PARM_DESC(queues_num, "number of hw queues."); 5772MODULE_PARM_DESC(queues_num, "number of hw queues.");
8366 5773
5774module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444);
5775MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
5776
8367module_exit(iwl3945_exit); 5777module_exit(iwl3945_exit);
8368module_init(iwl3945_init); 5778module_init(iwl3945_init);
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 02080a3682a..0b691858450 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -4,8 +4,10 @@ libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o \
4usb8xxx-objs += if_usb.o 4usb8xxx-objs += if_usb.o
5libertas_cs-objs += if_cs.o 5libertas_cs-objs += if_cs.o
6libertas_sdio-objs += if_sdio.o 6libertas_sdio-objs += if_sdio.o
7libertas_spi-objs += if_spi.o
7 8
8obj-$(CONFIG_LIBERTAS) += libertas.o 9obj-$(CONFIG_LIBERTAS) += libertas.o
9obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o 10obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o
10obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o 11obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o
11obj-$(CONFIG_LIBERTAS_SDIO) += libertas_sdio.o 12obj-$(CONFIG_LIBERTAS_SDIO) += libertas_sdio.o
13obj-$(CONFIG_LIBERTAS_SPI) += libertas_spi.o
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index ec4efd7ff3c..50e28a0cdfe 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -629,7 +629,7 @@ static ssize_t lbs_rdrf_write(struct file *file,
629 res = -EFAULT; 629 res = -EFAULT;
630 goto out_unlock; 630 goto out_unlock;
631 } 631 }
632 priv->rf_offset = simple_strtoul((char *)buf, NULL, 16); 632 priv->rf_offset = simple_strtoul(buf, NULL, 16);
633 res = count; 633 res = count;
634out_unlock: 634out_unlock:
635 free_page(addr); 635 free_page(addr);
@@ -680,12 +680,12 @@ out_unlock:
680} 680}
681 681
682struct lbs_debugfs_files { 682struct lbs_debugfs_files {
683 char *name; 683 const char *name;
684 int perm; 684 int perm;
685 struct file_operations fops; 685 struct file_operations fops;
686}; 686};
687 687
688static struct lbs_debugfs_files debugfs_files[] = { 688static const struct lbs_debugfs_files debugfs_files[] = {
689 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, 689 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
690 { "getscantable", 0444, FOPS(lbs_getscantable, 690 { "getscantable", 0444, FOPS(lbs_getscantable,
691 write_file_dummy), }, 691 write_file_dummy), },
@@ -693,7 +693,7 @@ static struct lbs_debugfs_files debugfs_files[] = {
693 lbs_sleepparams_write), }, 693 lbs_sleepparams_write), },
694}; 694};
695 695
696static struct lbs_debugfs_files debugfs_events_files[] = { 696static const struct lbs_debugfs_files debugfs_events_files[] = {
697 {"low_rssi", 0644, FOPS(lbs_lowrssi_read, 697 {"low_rssi", 0644, FOPS(lbs_lowrssi_read,
698 lbs_lowrssi_write), }, 698 lbs_lowrssi_write), },
699 {"low_snr", 0644, FOPS(lbs_lowsnr_read, 699 {"low_snr", 0644, FOPS(lbs_lowsnr_read,
@@ -708,7 +708,7 @@ static struct lbs_debugfs_files debugfs_events_files[] = {
708 lbs_highsnr_write), }, 708 lbs_highsnr_write), },
709}; 709};
710 710
711static struct lbs_debugfs_files debugfs_regs_files[] = { 711static const struct lbs_debugfs_files debugfs_regs_files[] = {
712 {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, 712 {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), },
713 {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, 713 {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), },
714 {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, 714 {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), },
@@ -735,7 +735,7 @@ void lbs_debugfs_remove(void)
735void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) 735void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
736{ 736{
737 int i; 737 int i;
738 struct lbs_debugfs_files *files; 738 const struct lbs_debugfs_files *files;
739 if (!lbs_dir) 739 if (!lbs_dir)
740 goto exit; 740 goto exit;
741 741
@@ -938,7 +938,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
938 return (ssize_t)cnt; 938 return (ssize_t)cnt;
939} 939}
940 940
941static struct file_operations lbs_debug_fops = { 941static const struct file_operations lbs_debug_fops = {
942 .owner = THIS_MODULE, 942 .owner = THIS_MODULE,
943 .open = open_file_generic, 943 .open = open_file_generic,
944 .write = lbs_debugfs_write, 944 .write = lbs_debugfs_write,
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index c364e4c01d1..6388b05df4f 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -41,6 +41,7 @@
41#define LBS_DEB_HEX 0x00200000 41#define LBS_DEB_HEX 0x00200000
42#define LBS_DEB_SDIO 0x00400000 42#define LBS_DEB_SDIO 0x00400000
43#define LBS_DEB_SYSFS 0x00800000 43#define LBS_DEB_SYSFS 0x00800000
44#define LBS_DEB_SPI 0x01000000
44 45
45extern unsigned int lbs_debug; 46extern unsigned int lbs_debug;
46 47
@@ -84,6 +85,7 @@ do { if ((lbs_debug & (grp)) == (grp)) \
84#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) 85#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args)
85#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args) 86#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
86#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args) 87#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
88#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args)
87 89
88#define lbs_pr_info(format, args...) \ 90#define lbs_pr_info(format, args...) \
89 printk(KERN_INFO DRV_NAME": " format, ## args) 91 printk(KERN_INFO DRV_NAME": " format, ## args)
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 277ff1975bd..d4457ef808a 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -66,6 +66,7 @@
66#define CMD_802_11_LED_GPIO_CTRL 0x004e 66#define CMD_802_11_LED_GPIO_CTRL 0x004e
67#define CMD_802_11_EEPROM_ACCESS 0x0059 67#define CMD_802_11_EEPROM_ACCESS 0x0059
68#define CMD_802_11_BAND_CONFIG 0x0058 68#define CMD_802_11_BAND_CONFIG 0x0058
69#define CMD_GSPI_BUS_CONFIG 0x005a
69#define CMD_802_11D_DOMAIN_INFO 0x005b 70#define CMD_802_11D_DOMAIN_INFO 0x005b
70#define CMD_802_11_KEY_MATERIAL 0x005e 71#define CMD_802_11_KEY_MATERIAL 0x005e
71#define CMD_802_11_SLEEP_PARAMS 0x0066 72#define CMD_802_11_SLEEP_PARAMS 0x0066
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index f6a79a653b7..a899aeb676b 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -221,6 +221,14 @@ struct cmd_ds_mac_multicast_adr {
221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
222} __attribute__ ((packed)); 222} __attribute__ ((packed));
223 223
224struct cmd_ds_gspi_bus_config {
225 struct cmd_header hdr;
226 __le16 action;
227 __le16 bus_delay_mode;
228 __le16 host_time_delay_to_read_port;
229 __le16 host_time_delay_to_read_register;
230} __attribute__ ((packed));
231
224struct cmd_ds_802_11_authenticate { 232struct cmd_ds_802_11_authenticate {
225 u8 macaddr[ETH_ALEN]; 233 u8 macaddr[ETH_ALEN];
226 u8 authtype; 234 u8 authtype;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 842a08d1f10..8f8934a5ba3 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -151,7 +151,7 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
151 for (i = 0; i < 100000; i++) { 151 for (i = 0; i < 100000; i++) {
152 u8 val = if_cs_read8(card, addr); 152 u8 val = if_cs_read8(card, addr);
153 if (val == reg) 153 if (val == reg)
154 return i; 154 return 0;
155 udelay(5); 155 udelay(5);
156 } 156 }
157 return -ETIME; 157 return -ETIME;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
new file mode 100644
index 00000000000..07311e71af9
--- /dev/null
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -0,0 +1,1218 @@
1/*
2 * linux/drivers/net/wireless/libertas/if_spi.c
3 *
4 * Driver for Marvell SPI WLAN cards.
5 *
6 * Copyright 2008 Analog Devices Inc.
7 *
8 * Authors:
9 * Andrey Yurovsky <andrey@cozybit.com>
10 * Colin McCabe <colin@cozybit.com>
11 *
12 * Inspired by if_sdio.c, Copyright 2007-2008 Pierre Ossman
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 */
19
20#include <linux/moduleparam.h>
21#include <linux/firmware.h>
22#include <linux/gpio.h>
23#include <linux/jiffies.h>
24#include <linux/kthread.h>
25#include <linux/list.h>
26#include <linux/netdevice.h>
27#include <linux/spi/libertas_spi.h>
28#include <linux/spi/spi.h>
29
30#include "host.h"
31#include "decl.h"
32#include "defs.h"
33#include "dev.h"
34#include "if_spi.h"
35
36struct if_spi_packet {
37 struct list_head list;
38 u16 blen;
39 u8 buffer[0] __attribute__((aligned(4)));
40};
41
42struct if_spi_card {
43 struct spi_device *spi;
44 struct lbs_private *priv;
45 struct libertas_spi_platform_data *pdata;
46
47 char helper_fw_name[FIRMWARE_NAME_MAX];
48 char main_fw_name[FIRMWARE_NAME_MAX];
49
50 /* The card ID and card revision, as reported by the hardware. */
51 u16 card_id;
52 u8 card_rev;
53
54 /* Pin number for our GPIO chip-select. */
55 /* TODO: Once the generic SPI layer has some additional features, we
56 * should take this out and use the normal chip select here.
57 * We need support for chip select delays, and not dropping chipselect
58 * after each word. */
59 int gpio_cs;
60
61 /* The last time that we initiated an SPU operation */
62 unsigned long prev_xfer_time;
63
64 int use_dummy_writes;
65 unsigned long spu_port_delay;
66 unsigned long spu_reg_delay;
67
68 /* Handles all SPI communication (except for FW load) */
69 struct task_struct *spi_thread;
70 int run_thread;
71
72 /* Used to wake up the spi_thread */
73 struct semaphore spi_ready;
74 struct semaphore spi_thread_terminated;
75
76 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
77
78 /* A buffer of incoming packets from libertas core.
79 * Since we can't sleep in hw_host_to_card, we have to buffer
80 * them. */
81 struct list_head cmd_packet_list;
82 struct list_head data_packet_list;
83
84 /* Protects cmd_packet_list and data_packet_list */
85 spinlock_t buffer_lock;
86};
87
88static void free_if_spi_card(struct if_spi_card *card)
89{
90 struct list_head *cursor, *next;
91 struct if_spi_packet *packet;
92
93 BUG_ON(card->run_thread);
94 list_for_each_safe(cursor, next, &card->cmd_packet_list) {
95 packet = container_of(cursor, struct if_spi_packet, list);
96 list_del(&packet->list);
97 kfree(packet);
98 }
99 list_for_each_safe(cursor, next, &card->data_packet_list) {
100 packet = container_of(cursor, struct if_spi_packet, list);
101 list_del(&packet->list);
102 kfree(packet);
103 }
104 spi_set_drvdata(card->spi, NULL);
105 kfree(card);
106}
107
108static struct chip_ident chip_id_to_device_name[] = {
109 { .chip_id = 0x04, .name = 8385 },
110 { .chip_id = 0x0b, .name = 8686 },
111};
112
113/*
114 * SPI Interface Unit Routines
115 *
116 * The SPU sits between the host and the WLAN module.
117 * All communication with the firmware is through SPU transactions.
118 *
119 * First we have to put a SPU register name on the bus. Then we can
120 * either read from or write to that register.
121 *
122 * For 16-bit transactions, byte order on the bus is big-endian.
123 * We don't have to worry about that here, though.
124 * The translation takes place in the SPI routines.
125 */
126
127static void spu_transaction_init(struct if_spi_card *card)
128{
129 if (!time_after(jiffies, card->prev_xfer_time + 1)) {
130 /* Unfortunately, the SPU requires a delay between successive
131 * transactions. If our last transaction was more than a jiffy
132 * ago, we have obviously already delayed enough.
133 * If not, we have to busy-wait to be on the safe side. */
134 ndelay(400);
135 }
136 gpio_set_value(card->gpio_cs, 0); /* assert CS */
137}
138
139static void spu_transaction_finish(struct if_spi_card *card)
140{
141 gpio_set_value(card->gpio_cs, 1); /* drop CS */
142 card->prev_xfer_time = jiffies;
143}
144
145/* Write out a byte buffer to an SPI register,
146 * using a series of 16-bit transfers. */
147static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
148{
149 int err = 0;
150 u16 reg_out = reg | IF_SPI_WRITE_OPERATION_MASK;
151
152 /* You must give an even number of bytes to the SPU, even if it
153 * doesn't care about the last one. */
154 BUG_ON(len & 0x1);
155
156 spu_transaction_init(card);
157
158 /* write SPU register index */
159 err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16));
160 if (err)
161 goto out;
162
163 err = spi_write(card->spi, buf, len);
164
165out:
166 spu_transaction_finish(card);
167 return err;
168}
169
170static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
171{
172 return spu_write(card, reg, (u8 *)&val, sizeof(u16));
173}
174
175static inline int spu_write_u32(struct if_spi_card *card, u16 reg, u32 val)
176{
177 /* The lower 16 bits are written first. */
178 u16 out[2];
179 out[0] = val & 0xffff;
180 out[1] = (val & 0xffff0000) >> 16;
181 return spu_write(card, reg, (u8 *)&out, sizeof(u32));
182}
183
184static inline int spu_reg_is_port_reg(u16 reg)
185{
186 switch (reg) {
187 case IF_SPI_IO_RDWRPORT_REG:
188 case IF_SPI_CMD_RDWRPORT_REG:
189 case IF_SPI_DATA_RDWRPORT_REG:
190 return 1;
191 default:
192 return 0;
193 }
194}
195
196static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
197{
198 unsigned int i, delay;
199 int err = 0;
200 u16 zero = 0;
201 u16 reg_out = reg | IF_SPI_READ_OPERATION_MASK;
202
203 /* You must take an even number of bytes from the SPU, even if you
204 * don't care about the last one. */
205 BUG_ON(len & 0x1);
206
207 spu_transaction_init(card);
208
209 /* write SPU register index */
210 err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16));
211 if (err)
212 goto out;
213
214 delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay :
215 card->spu_reg_delay;
216 if (card->use_dummy_writes) {
217 /* Clock in dummy cycles while the SPU fills the FIFO */
218 for (i = 0; i < delay / 16; ++i) {
219 err = spi_write(card->spi, (u8 *)&zero, sizeof(u16));
220 if (err)
221 return err;
222 }
223 } else {
224 /* Busy-wait while the SPU fills the FIFO */
225 ndelay(100 + (delay * 10));
226 }
227
228 /* read in data */
229 err = spi_read(card->spi, buf, len);
230
231out:
232 spu_transaction_finish(card);
233 return err;
234}
235
236/* Read 16 bits from an SPI register */
237static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
238{
239 return spu_read(card, reg, (u8 *)val, sizeof(u16));
240}
241
242/* Read 32 bits from an SPI register.
243 * The low 16 bits are read first. */
244static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
245{
246 u16 buf[2];
247 int err;
248 err = spu_read(card, reg, (u8 *)buf, sizeof(u32));
249 if (!err)
250 *val = buf[0] | (buf[1] << 16);
251 return err;
252}
253
254/* Keep reading 16 bits from an SPI register until you get the correct result.
255 *
256 * If mask = 0, the correct result is any non-zero number.
257 * If mask != 0, the correct result is any number where
258 * number & target_mask == target
259 *
260 * Returns -ETIMEDOUT if a second passes without the correct result. */
261static int spu_wait_for_u16(struct if_spi_card *card, u16 reg,
262 u16 target_mask, u16 target)
263{
264 int err;
265 unsigned long timeout = jiffies + 5*HZ;
266 while (1) {
267 u16 val;
268 err = spu_read_u16(card, reg, &val);
269 if (err)
270 return err;
271 if (target_mask) {
272 if ((val & target_mask) == target)
273 return 0;
274 } else {
275 if (val)
276 return 0;
277 }
278 udelay(100);
279 if (time_after(jiffies, timeout)) {
280 lbs_pr_err("%s: timeout with val=%02x, "
281 "target_mask=%02x, target=%02x\n",
282 __func__, val, target_mask, target);
283 return -ETIMEDOUT;
284 }
285 }
286}
287
288/* Read 16 bits from an SPI register until you receive a specific value.
289 * Returns -ETIMEDOUT if a 4 tries pass without success. */
290static int spu_wait_for_u32(struct if_spi_card *card, u32 reg, u32 target)
291{
292 int err, try;
293 for (try = 0; try < 4; ++try) {
294 u32 val = 0;
295 err = spu_read_u32(card, reg, &val);
296 if (err)
297 return err;
298 if (val == target)
299 return 0;
300 mdelay(100);
301 }
302 return -ETIMEDOUT;
303}
304
305static int spu_set_interrupt_mode(struct if_spi_card *card,
306 int suppress_host_int,
307 int auto_int)
308{
309 int err = 0;
310
311 /* We can suppress a host interrupt by clearing the appropriate
312 * bit in the "host interrupt status mask" register */
313 if (suppress_host_int) {
314 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0);
315 if (err)
316 return err;
317 } else {
318 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG,
319 IF_SPI_HISM_TX_DOWNLOAD_RDY |
320 IF_SPI_HISM_RX_UPLOAD_RDY |
321 IF_SPI_HISM_CMD_DOWNLOAD_RDY |
322 IF_SPI_HISM_CARDEVENT |
323 IF_SPI_HISM_CMD_UPLOAD_RDY);
324 if (err)
325 return err;
326 }
327
328 /* If auto-interrupts are on, the completion of certain transactions
329 * will trigger an interrupt automatically. If auto-interrupts
330 * are off, we need to set the "Card Interrupt Cause" register to
331 * trigger a card interrupt. */
332 if (auto_int) {
333 err = spu_write_u16(card, IF_SPI_HOST_INT_CTRL_REG,
334 IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO |
335 IF_SPI_HICT_RX_UPLOAD_OVER_AUTO |
336 IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO |
337 IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO);
338 if (err)
339 return err;
340 } else {
341 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0);
342 if (err)
343 return err;
344 }
345 return err;
346}
347
348static int spu_get_chip_revision(struct if_spi_card *card,
349 u16 *card_id, u8 *card_rev)
350{
351 int err = 0;
352 u32 dev_ctrl;
353 err = spu_read_u32(card, IF_SPI_DEVICEID_CTRL_REG, &dev_ctrl);
354 if (err)
355 return err;
356 *card_id = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_ID(dev_ctrl);
357 *card_rev = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dev_ctrl);
358 return err;
359}
360
361static int spu_set_bus_mode(struct if_spi_card *card, u16 mode)
362{
363 int err = 0;
364 u16 rval;
365 /* set bus mode */
366 err = spu_write_u16(card, IF_SPI_SPU_BUS_MODE_REG, mode);
367 if (err)
368 return err;
369 /* Check that we were able to read back what we just wrote. */
370 err = spu_read_u16(card, IF_SPI_SPU_BUS_MODE_REG, &rval);
371 if (err)
372 return err;
373 if (rval != mode) {
374 lbs_pr_err("Can't read bus mode register.\n");
375 return -EIO;
376 }
377 return 0;
378}
379
380static int spu_init(struct if_spi_card *card, int use_dummy_writes)
381{
382 int err = 0;
383 u32 delay;
384
385 /* We have to start up in timed delay mode so that we can safely
386 * read the Delay Read Register. */
387 card->use_dummy_writes = 0;
388 err = spu_set_bus_mode(card,
389 IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING |
390 IF_SPI_BUS_MODE_DELAY_METHOD_TIMED |
391 IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA);
392 if (err)
393 return err;
394 card->spu_port_delay = 1000;
395 card->spu_reg_delay = 1000;
396 err = spu_read_u32(card, IF_SPI_DELAY_READ_REG, &delay);
397 if (err)
398 return err;
399 card->spu_port_delay = delay & 0x0000ffff;
400 card->spu_reg_delay = (delay & 0xffff0000) >> 16;
401
402 /* If dummy clock delay mode has been requested, switch to it now */
403 if (use_dummy_writes) {
404 card->use_dummy_writes = 1;
405 err = spu_set_bus_mode(card,
406 IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING |
407 IF_SPI_BUS_MODE_DELAY_METHOD_DUMMY_CLOCK |
408 IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA);
409 if (err)
410 return err;
411 }
412
413 lbs_deb_spi("Initialized SPU unit. "
414 "spu_port_delay=0x%04lx, spu_reg_delay=0x%04lx\n",
415 card->spu_port_delay, card->spu_reg_delay);
416 return err;
417}
418
419/*
420 * Firmware Loading
421 */
422
423static int if_spi_prog_helper_firmware(struct if_spi_card *card)
424{
425 int err = 0;
426 const struct firmware *firmware = NULL;
427 int bytes_remaining;
428 const u8 *fw;
429 u8 temp[HELPER_FW_LOAD_CHUNK_SZ];
430 struct spi_device *spi = card->spi;
431
432 lbs_deb_enter(LBS_DEB_SPI);
433
434 err = spu_set_interrupt_mode(card, 1, 0);
435 if (err)
436 goto out;
437 /* Get helper firmware image */
438 err = request_firmware(&firmware, card->helper_fw_name, &spi->dev);
439 if (err) {
440 lbs_pr_err("request_firmware failed with err = %d\n", err);
441 goto out;
442 }
443 bytes_remaining = firmware->size;
444 fw = firmware->data;
445
446 /* Load helper firmware image */
447 while (bytes_remaining > 0) {
448 /* Scratch pad 1 should contain the number of bytes we
449 * want to download to the firmware */
450 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG,
451 HELPER_FW_LOAD_CHUNK_SZ);
452 if (err)
453 goto release_firmware;
454
455 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
456 IF_SPI_HIST_CMD_DOWNLOAD_RDY,
457 IF_SPI_HIST_CMD_DOWNLOAD_RDY);
458 if (err)
459 goto release_firmware;
460
461 /* Feed the data into the command read/write port reg
462 * in chunks of 64 bytes */
463 memset(temp, 0, sizeof(temp));
464 memcpy(temp, fw,
465 min(bytes_remaining, HELPER_FW_LOAD_CHUNK_SZ));
466 mdelay(10);
467 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
468 temp, HELPER_FW_LOAD_CHUNK_SZ);
469 if (err)
470 goto release_firmware;
471
472 /* Interrupt the boot code */
473 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
474 if (err)
475 goto release_firmware;
476 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
477 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
478 if (err)
479 goto release_firmware;
480 bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ;
481 fw += HELPER_FW_LOAD_CHUNK_SZ;
482 }
483
484 /* Once the helper / single stage firmware download is complete,
485 * write 0 to scratch pad 1 and interrupt the
486 * bootloader. This completes the helper download. */
487 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK);
488 if (err)
489 goto release_firmware;
490 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
491 if (err)
492 goto release_firmware;
493 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
494 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
495 goto release_firmware;
496
497 lbs_deb_spi("waiting for helper to boot...\n");
498
499release_firmware:
500 release_firmware(firmware);
501out:
502 if (err)
503 lbs_pr_err("failed to load helper firmware (err=%d)\n", err);
504 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
505 return err;
506}
507
508/* Returns the length of the next packet the firmware expects us to send
509 * Sets crc_err if the previous transfer had a CRC error. */
510static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
511 int *crc_err)
512{
513 u16 len;
514 int err = 0;
515
516 /* wait until the host interrupt status register indicates
517 * that we are ready to download */
518 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
519 IF_SPI_HIST_CMD_DOWNLOAD_RDY,
520 IF_SPI_HIST_CMD_DOWNLOAD_RDY);
521 if (err) {
522 lbs_pr_err("timed out waiting for host_int_status\n");
523 return err;
524 }
525
526 /* Ask the device how many bytes of firmware it wants. */
527 err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len);
528 if (err)
529 return err;
530
531 if (len > IF_SPI_CMD_BUF_SIZE) {
532 lbs_pr_err("firmware load device requested a larger "
533 "tranfer than we are prepared to "
534 "handle. (len = %d)\n", len);
535 return -EIO;
536 }
537 if (len & 0x1) {
538 lbs_deb_spi("%s: crc error\n", __func__);
539 len &= ~0x1;
540 *crc_err = 1;
541 } else
542 *crc_err = 0;
543
544 return len;
545}
546
547static int if_spi_prog_main_firmware(struct if_spi_card *card)
548{
549 int len, prev_len;
550 int bytes, crc_err = 0, err = 0;
551 const struct firmware *firmware = NULL;
552 const u8 *fw;
553 struct spi_device *spi = card->spi;
554 u16 num_crc_errs;
555
556 lbs_deb_enter(LBS_DEB_SPI);
557
558 err = spu_set_interrupt_mode(card, 1, 0);
559 if (err)
560 goto out;
561
562 /* Get firmware image */
563 err = request_firmware(&firmware, card->main_fw_name, &spi->dev);
564 if (err) {
565 lbs_pr_err("%s: can't get firmware '%s' from kernel. "
566 "err = %d\n", __func__, card->main_fw_name, err);
567 goto out;
568 }
569
570 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0);
571 if (err) {
572 lbs_pr_err("%s: timed out waiting for initial "
573 "scratch reg = 0\n", __func__);
574 goto release_firmware;
575 }
576
577 num_crc_errs = 0;
578 prev_len = 0;
579 bytes = firmware->size;
580 fw = firmware->data;
581 while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) {
582 if (len < 0) {
583 err = len;
584 goto release_firmware;
585 }
586 if (bytes < 0) {
587 /* If there are no more bytes left, we would normally
588 * expect to have terminated with len = 0 */
589 lbs_pr_err("Firmware load wants more bytes "
590 "than we have to offer.\n");
591 break;
592 }
593 if (crc_err) {
594 /* Previous transfer failed. */
595 if (++num_crc_errs > MAX_MAIN_FW_LOAD_CRC_ERR) {
596 lbs_pr_err("Too many CRC errors encountered "
597 "in firmware load.\n");
598 err = -EIO;
599 goto release_firmware;
600 }
601 } else {
602 /* Previous transfer succeeded. Advance counters. */
603 bytes -= prev_len;
604 fw += prev_len;
605 }
606 if (bytes < len) {
607 memset(card->cmd_buffer, 0, len);
608 memcpy(card->cmd_buffer, fw, bytes);
609 } else
610 memcpy(card->cmd_buffer, fw, len);
611
612 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
613 if (err)
614 goto release_firmware;
615 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
616 card->cmd_buffer, len);
617 if (err)
618 goto release_firmware;
619 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG ,
620 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
621 if (err)
622 goto release_firmware;
623 prev_len = len;
624 }
625 if (bytes > prev_len) {
626 lbs_pr_err("firmware load wants fewer bytes than "
627 "we have to offer.\n");
628 }
629
630 /* Confirm firmware download */
631 err = spu_wait_for_u32(card, IF_SPI_SCRATCH_4_REG,
632 SUCCESSFUL_FW_DOWNLOAD_MAGIC);
633 if (err) {
634 lbs_pr_err("failed to confirm the firmware download\n");
635 goto release_firmware;
636 }
637
638release_firmware:
639 release_firmware(firmware);
640
641out:
642 if (err)
643 lbs_pr_err("failed to load firmware (err=%d)\n", err);
644 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
645 return err;
646}
647
648/*
649 * SPI Transfer Thread
650 *
651 * The SPI thread handles all SPI transfers, so there is no need for a lock.
652 */
653
654/* Move a command from the card to the host */
655static int if_spi_c2h_cmd(struct if_spi_card *card)
656{
657 struct lbs_private *priv = card->priv;
658 unsigned long flags;
659 int err = 0;
660 u16 len;
661 u8 i;
662
663 /* We need a buffer big enough to handle whatever people send to
664 * hw_host_to_card */
665 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_CMD_BUFFER_SIZE);
666 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_UPLD_SIZE);
667
668 /* It's just annoying if the buffer size isn't a multiple of 4, because
669 * then we might have len < IF_SPI_CMD_BUF_SIZE but
670 * ALIGN(len, 4) > IF_SPI_CMD_BUF_SIZE */
671 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0);
672
673 lbs_deb_enter(LBS_DEB_SPI);
674
675 /* How many bytes are there to read? */
676 err = spu_read_u16(card, IF_SPI_SCRATCH_2_REG, &len);
677 if (err)
678 goto out;
679 if (!len) {
680 lbs_pr_err("%s: error: card has no data for host\n",
681 __func__);
682 err = -EINVAL;
683 goto out;
684 } else if (len > IF_SPI_CMD_BUF_SIZE) {
685 lbs_pr_err("%s: error: response packet too large: "
686 "%d bytes, but maximum is %d\n",
687 __func__, len, IF_SPI_CMD_BUF_SIZE);
688 err = -EINVAL;
689 goto out;
690 }
691
692 /* Read the data from the WLAN module into our command buffer */
693 err = spu_read(card, IF_SPI_CMD_RDWRPORT_REG,
694 card->cmd_buffer, ALIGN(len, 4));
695 if (err)
696 goto out;
697
698 spin_lock_irqsave(&priv->driver_lock, flags);
699 i = (priv->resp_idx == 0) ? 1 : 0;
700 BUG_ON(priv->resp_len[i]);
701 priv->resp_len[i] = len;
702 memcpy(priv->resp_buf[i], card->cmd_buffer, len);
703 lbs_notify_command_response(priv, i);
704 spin_unlock_irqrestore(&priv->driver_lock, flags);
705
706out:
707 if (err)
708 lbs_pr_err("%s: err=%d\n", __func__, err);
709 lbs_deb_leave(LBS_DEB_SPI);
710 return err;
711}
712
713/* Move data from the card to the host */
714static int if_spi_c2h_data(struct if_spi_card *card)
715{
716 struct sk_buff *skb;
717 char *data;
718 u16 len;
719 int err = 0;
720
721 lbs_deb_enter(LBS_DEB_SPI);
722
723 /* How many bytes are there to read? */
724 err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len);
725 if (err)
726 goto out;
727 if (!len) {
728 lbs_pr_err("%s: error: card has no data for host\n",
729 __func__);
730 err = -EINVAL;
731 goto out;
732 } else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
733 lbs_pr_err("%s: error: card has %d bytes of data, but "
734 "our maximum skb size is %u\n",
735 __func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
736 err = -EINVAL;
737 goto out;
738 }
739
740 /* TODO: should we allocate a smaller skb if we have less data? */
741 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
742 if (!skb) {
743 err = -ENOBUFS;
744 goto out;
745 }
746 skb_reserve(skb, IPFIELD_ALIGN_OFFSET);
747 data = skb_put(skb, len);
748
749 /* Read the data from the WLAN module into our skb... */
750 err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4));
751 if (err)
752 goto free_skb;
753
754 /* pass the SKB to libertas */
755 err = lbs_process_rxed_packet(card->priv, skb);
756 if (err)
757 goto free_skb;
758
759 /* success */
760 goto out;
761
762free_skb:
763 dev_kfree_skb(skb);
764out:
765 if (err)
766 lbs_pr_err("%s: err=%d\n", __func__, err);
767 lbs_deb_leave(LBS_DEB_SPI);
768 return err;
769}
770
771/* Move data or a command from the host to the card. */
772static void if_spi_h2c(struct if_spi_card *card,
773 struct if_spi_packet *packet, int type)
774{
775 int err = 0;
776 u16 int_type, port_reg;
777
778 switch (type) {
779 case MVMS_DAT:
780 int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
781 port_reg = IF_SPI_DATA_RDWRPORT_REG;
782 break;
783 case MVMS_CMD:
784 int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
785 port_reg = IF_SPI_CMD_RDWRPORT_REG;
786 break;
787 default:
788 lbs_pr_err("can't transfer buffer of type %d\n", type);
789 err = -EINVAL;
790 goto out;
791 }
792
793 /* Write the data to the card */
794 err = spu_write(card, port_reg, packet->buffer, packet->blen);
795 if (err)
796 goto out;
797
798out:
799 kfree(packet);
800
801 if (err)
802 lbs_pr_err("%s: error %d\n", __func__, err);
803}
804
805/* Inform the host about a card event */
806static void if_spi_e2h(struct if_spi_card *card)
807{
808 int err = 0;
809 unsigned long flags;
810 u32 cause;
811 struct lbs_private *priv = card->priv;
812
813 err = spu_read_u32(card, IF_SPI_SCRATCH_3_REG, &cause);
814 if (err)
815 goto out;
816
817 spin_lock_irqsave(&priv->driver_lock, flags);
818 lbs_queue_event(priv, cause & 0xff);
819 spin_unlock_irqrestore(&priv->driver_lock, flags);
820
821out:
822 if (err)
823 lbs_pr_err("%s: error %d\n", __func__, err);
824}
825
826static int lbs_spi_thread(void *data)
827{
828 int err;
829 struct if_spi_card *card = data;
830 u16 hiStatus;
831 unsigned long flags;
832 struct if_spi_packet *packet;
833
834 while (1) {
835 /* Wait to be woken up by one of two things. First, our ISR
836 * could tell us that something happened on the WLAN.
837 * Secondly, libertas could call hw_host_to_card with more
838 * data, which we might be able to send.
839 */
840 do {
841 err = down_interruptible(&card->spi_ready);
842 if (!card->run_thread) {
843 up(&card->spi_thread_terminated);
844 do_exit(0);
845 }
846 } while (err == EINTR);
847
848 /* Read the host interrupt status register to see what we
849 * can do. */
850 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
851 &hiStatus);
852 if (err) {
853 lbs_pr_err("I/O error\n");
854 goto err;
855 }
856
857 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY)
858 err = if_spi_c2h_cmd(card);
859 if (err)
860 goto err;
861 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY)
862 err = if_spi_c2h_data(card);
863 if (err)
864 goto err;
865 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY) {
866 /* This means two things. First of all,
867 * if there was a previous command sent, the card has
868 * successfully received it.
869 * Secondly, it is now ready to download another
870 * command.
871 */
872 lbs_host_to_card_done(card->priv);
873
874 /* Do we have any command packets from the host to
875 * send? */
876 packet = NULL;
877 spin_lock_irqsave(&card->buffer_lock, flags);
878 if (!list_empty(&card->cmd_packet_list)) {
879 packet = (struct if_spi_packet *)(card->
880 cmd_packet_list.next);
881 list_del(&packet->list);
882 }
883 spin_unlock_irqrestore(&card->buffer_lock, flags);
884
885 if (packet)
886 if_spi_h2c(card, packet, MVMS_CMD);
887 }
888 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
889 /* Do we have any data packets from the host to
890 * send? */
891 packet = NULL;
892 spin_lock_irqsave(&card->buffer_lock, flags);
893 if (!list_empty(&card->data_packet_list)) {
894 packet = (struct if_spi_packet *)(card->
895 data_packet_list.next);
896 list_del(&packet->list);
897 }
898 spin_unlock_irqrestore(&card->buffer_lock, flags);
899
900 if (packet)
901 if_spi_h2c(card, packet, MVMS_DAT);
902 }
903 if (hiStatus & IF_SPI_HIST_CARD_EVENT)
904 if_spi_e2h(card);
905
906err:
907 if (err)
908 lbs_pr_err("%s: got error %d\n", __func__, err);
909 }
910}
911
912/* Block until lbs_spi_thread thread has terminated */
913static void if_spi_terminate_spi_thread(struct if_spi_card *card)
914{
915 /* It would be nice to use kthread_stop here, but that function
916 * can't wake threads waiting for a semaphore. */
917 card->run_thread = 0;
918 up(&card->spi_ready);
919 down(&card->spi_thread_terminated);
920}
921
922/*
923 * Host to Card
924 *
925 * Called from Libertas to transfer some data to the WLAN device
926 * We can't sleep here. */
927static int if_spi_host_to_card(struct lbs_private *priv,
928 u8 type, u8 *buf, u16 nb)
929{
930 int err = 0;
931 unsigned long flags;
932 struct if_spi_card *card = priv->card;
933 struct if_spi_packet *packet;
934 u16 blen;
935
936 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
937
938 if (nb == 0) {
939 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
940 err = -EINVAL;
941 goto out;
942 }
943 blen = ALIGN(nb, 4);
944 packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
945 if (!packet) {
946 err = -ENOMEM;
947 goto out;
948 }
949 packet->blen = blen;
950 memcpy(packet->buffer, buf, nb);
951 memset(packet->buffer + nb, 0, blen - nb);
952
953 switch (type) {
954 case MVMS_CMD:
955 priv->dnld_sent = DNLD_CMD_SENT;
956 spin_lock_irqsave(&card->buffer_lock, flags);
957 list_add_tail(&packet->list, &card->cmd_packet_list);
958 spin_unlock_irqrestore(&card->buffer_lock, flags);
959 break;
960 case MVMS_DAT:
961 priv->dnld_sent = DNLD_DATA_SENT;
962 spin_lock_irqsave(&card->buffer_lock, flags);
963 list_add_tail(&packet->list, &card->data_packet_list);
964 spin_unlock_irqrestore(&card->buffer_lock, flags);
965 break;
966 default:
967 lbs_pr_err("can't transfer buffer of type %d", type);
968 err = -EINVAL;
969 break;
970 }
971
972 /* Wake up the spi thread */
973 up(&card->spi_ready);
974out:
975 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
976 return err;
977}
978
979/*
980 * Host Interrupts
981 *
982 * Service incoming interrupts from the WLAN device. We can't sleep here, so
983 * don't try to talk on the SPI bus, just wake up the SPI thread.
984 */
985static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
986{
987 struct if_spi_card *card = dev_id;
988
989 up(&card->spi_ready);
990 return IRQ_HANDLED;
991}
992
993/*
994 * SPI callbacks
995 */
996
997static int if_spi_calculate_fw_names(u16 card_id,
998 char *helper_fw, char *main_fw)
999{
1000 int i;
1001 for (i = 0; i < ARRAY_SIZE(chip_id_to_device_name); ++i) {
1002 if (card_id == chip_id_to_device_name[i].chip_id)
1003 break;
1004 }
1005 if (i == ARRAY_SIZE(chip_id_to_device_name)) {
1006 lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
1007 return -EAFNOSUPPORT;
1008 }
1009 snprintf(helper_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d_hlp.bin",
1010 chip_id_to_device_name[i].name);
1011 snprintf(main_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d.bin",
1012 chip_id_to_device_name[i].name);
1013 return 0;
1014}
1015
1016static int __devinit if_spi_probe(struct spi_device *spi)
1017{
1018 struct if_spi_card *card;
1019 struct lbs_private *priv = NULL;
1020 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
1021 int err = 0;
1022 u32 scratch;
1023
1024 lbs_deb_enter(LBS_DEB_SPI);
1025
1026 if (!pdata) {
1027 err = -EINVAL;
1028 goto out;
1029 }
1030
1031 if (pdata->setup) {
1032 err = pdata->setup(spi);
1033 if (err)
1034 goto out;
1035 }
1036
1037 /* Allocate card structure to represent this specific device */
1038 card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
1039 if (!card) {
1040 err = -ENOMEM;
1041 goto out;
1042 }
1043 spi_set_drvdata(spi, card);
1044 card->pdata = pdata;
1045 card->spi = spi;
1046 card->gpio_cs = pdata->gpio_cs;
1047 card->prev_xfer_time = jiffies;
1048
1049 sema_init(&card->spi_ready, 0);
1050 sema_init(&card->spi_thread_terminated, 0);
1051 INIT_LIST_HEAD(&card->cmd_packet_list);
1052 INIT_LIST_HEAD(&card->data_packet_list);
1053 spin_lock_init(&card->buffer_lock);
1054
1055 /* set up GPIO CS line. TODO: use regular CS line */
1056 err = gpio_request(card->gpio_cs, "if_spi_gpio_chip_select");
1057 if (err)
1058 goto free_card;
1059 err = gpio_direction_output(card->gpio_cs, 1);
1060 if (err)
1061 goto free_gpio;
1062
1063 /* Initialize the SPI Interface Unit */
1064 err = spu_init(card, pdata->use_dummy_writes);
1065 if (err)
1066 goto free_gpio;
1067 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
1068 if (err)
1069 goto free_gpio;
1070
1071 /* Firmware load */
1072 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
1073 if (err)
1074 goto free_gpio;
1075 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
1076 lbs_deb_spi("Firmware is already loaded for "
1077 "Marvell WLAN 802.11 adapter\n");
1078 else {
1079 err = if_spi_calculate_fw_names(card->card_id,
1080 card->helper_fw_name, card->main_fw_name);
1081 if (err)
1082 goto free_gpio;
1083
1084 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
1085 "(chip_id = 0x%04x, chip_rev = 0x%02x) "
1086 "attached to SPI bus_num %d, chip_select %d. "
1087 "spi->max_speed_hz=%d\n",
1088 card->card_id, card->card_rev,
1089 spi->master->bus_num, spi->chip_select,
1090 spi->max_speed_hz);
1091 err = if_spi_prog_helper_firmware(card);
1092 if (err)
1093 goto free_gpio;
1094 err = if_spi_prog_main_firmware(card);
1095 if (err)
1096 goto free_gpio;
1097 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
1098 }
1099
1100 err = spu_set_interrupt_mode(card, 0, 1);
1101 if (err)
1102 goto free_gpio;
1103
1104 /* Register our card with libertas.
1105 * This will call alloc_etherdev */
1106 priv = lbs_add_card(card, &spi->dev);
1107 if (!priv) {
1108 err = -ENOMEM;
1109 goto free_gpio;
1110 }
1111 card->priv = priv;
1112 priv->card = card;
1113 priv->hw_host_to_card = if_spi_host_to_card;
1114 priv->fw_ready = 1;
1115 priv->ps_supported = 1;
1116
1117 /* Initialize interrupt handling stuff. */
1118 card->run_thread = 1;
1119 card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
1120 if (IS_ERR(card->spi_thread)) {
1121 card->run_thread = 0;
1122 err = PTR_ERR(card->spi_thread);
1123 lbs_pr_err("error creating SPI thread: err=%d\n", err);
1124 goto remove_card;
1125 }
1126 err = request_irq(spi->irq, if_spi_host_interrupt,
1127 IRQF_TRIGGER_FALLING, "libertas_spi", card);
1128 if (err) {
1129 lbs_pr_err("can't get host irq line-- request_irq failed\n");
1130 goto terminate_thread;
1131 }
1132
1133 /* Start the card.
1134 * This will call register_netdev, and we'll start
1135 * getting interrupts... */
1136 err = lbs_start_card(priv);
1137 if (err)
1138 goto release_irq;
1139
1140 lbs_deb_spi("Finished initializing WLAN module.\n");
1141
1142 /* successful exit */
1143 goto out;
1144
1145release_irq:
1146 free_irq(spi->irq, card);
1147terminate_thread:
1148 if_spi_terminate_spi_thread(card);
1149remove_card:
1150 lbs_remove_card(priv); /* will call free_netdev */
1151free_gpio:
1152 gpio_free(card->gpio_cs);
1153free_card:
1154 free_if_spi_card(card);
1155out:
1156 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1157 return err;
1158}
1159
1160static int __devexit libertas_spi_remove(struct spi_device *spi)
1161{
1162 struct if_spi_card *card = spi_get_drvdata(spi);
1163 struct lbs_private *priv = card->priv;
1164
1165 lbs_deb_spi("libertas_spi_remove\n");
1166 lbs_deb_enter(LBS_DEB_SPI);
1167 priv->surpriseremoved = 1;
1168
1169 lbs_stop_card(priv);
1170 free_irq(spi->irq, card);
1171 if_spi_terminate_spi_thread(card);
1172 lbs_remove_card(priv); /* will call free_netdev */
1173 gpio_free(card->gpio_cs);
1174 if (card->pdata->teardown)
1175 card->pdata->teardown(spi);
1176 free_if_spi_card(card);
1177 lbs_deb_leave(LBS_DEB_SPI);
1178 return 0;
1179}
1180
1181static struct spi_driver libertas_spi_driver = {
1182 .probe = if_spi_probe,
1183 .remove = __devexit_p(libertas_spi_remove),
1184 .driver = {
1185 .name = "libertas_spi",
1186 .bus = &spi_bus_type,
1187 .owner = THIS_MODULE,
1188 },
1189};
1190
1191/*
1192 * Module functions
1193 */
1194
1195static int __init if_spi_init_module(void)
1196{
1197 int ret = 0;
1198 lbs_deb_enter(LBS_DEB_SPI);
1199 printk(KERN_INFO "libertas_spi: Libertas SPI driver\n");
1200 ret = spi_register_driver(&libertas_spi_driver);
1201 lbs_deb_leave(LBS_DEB_SPI);
1202 return ret;
1203}
1204
1205static void __exit if_spi_exit_module(void)
1206{
1207 lbs_deb_enter(LBS_DEB_SPI);
1208 spi_unregister_driver(&libertas_spi_driver);
1209 lbs_deb_leave(LBS_DEB_SPI);
1210}
1211
1212module_init(if_spi_init_module);
1213module_exit(if_spi_exit_module);
1214
1215MODULE_DESCRIPTION("Libertas SPI WLAN Driver");
1216MODULE_AUTHOR("Andrey Yurovsky <andrey@cozybit.com>, "
1217 "Colin McCabe <colin@cozybit.com>");
1218MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
new file mode 100644
index 00000000000..2103869cc5b
--- /dev/null
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -0,0 +1,208 @@
1/*
2 * linux/drivers/net/wireless/libertas/if_spi.c
3 *
4 * Driver for Marvell SPI WLAN cards.
5 *
6 * Copyright 2008 Analog Devices Inc.
7 *
8 * Authors:
9 * Andrey Yurovsky <andrey@cozybit.com>
10 * Colin McCabe <colin@cozybit.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
16 */
17
18#ifndef _LBS_IF_SPI_H_
19#define _LBS_IF_SPI_H_
20
21#define IPFIELD_ALIGN_OFFSET 2
22#define IF_SPI_CMD_BUF_SIZE 2400
23
24/***************** Firmware *****************/
25struct chip_ident {
26 u16 chip_id;
27 u16 name;
28};
29
30#define MAX_MAIN_FW_LOAD_CRC_ERR 10
31
32/* Chunk size when loading the helper firmware */
33#define HELPER_FW_LOAD_CHUNK_SZ 64
34
35/* Value to write to indicate end of helper firmware dnld */
36#define FIRMWARE_DNLD_OK 0x0000
37
38/* Value to check once the main firmware is downloaded */
39#define SUCCESSFUL_FW_DOWNLOAD_MAGIC 0x88888888
40
41/***************** SPI Interface Unit *****************/
42/* Masks used in SPI register read/write operations */
43#define IF_SPI_READ_OPERATION_MASK 0x0
44#define IF_SPI_WRITE_OPERATION_MASK 0x8000
45
46/* SPI register offsets. 4-byte aligned. */
47#define IF_SPI_DEVICEID_CTRL_REG 0x00 /* DeviceID controller reg */
48#define IF_SPI_IO_READBASE_REG 0x04 /* Read I/O base reg */
49#define IF_SPI_IO_WRITEBASE_REG 0x08 /* Write I/O base reg */
50#define IF_SPI_IO_RDWRPORT_REG 0x0C /* Read/Write I/O port reg */
51
52#define IF_SPI_CMD_READBASE_REG 0x10 /* Read command base reg */
53#define IF_SPI_CMD_WRITEBASE_REG 0x14 /* Write command base reg */
54#define IF_SPI_CMD_RDWRPORT_REG 0x18 /* Read/Write command port reg */
55
56#define IF_SPI_DATA_READBASE_REG 0x1C /* Read data base reg */
57#define IF_SPI_DATA_WRITEBASE_REG 0x20 /* Write data base reg */
58#define IF_SPI_DATA_RDWRPORT_REG 0x24 /* Read/Write data port reg */
59
60#define IF_SPI_SCRATCH_1_REG 0x28 /* Scratch reg 1 */
61#define IF_SPI_SCRATCH_2_REG 0x2C /* Scratch reg 2 */
62#define IF_SPI_SCRATCH_3_REG 0x30 /* Scratch reg 3 */
63#define IF_SPI_SCRATCH_4_REG 0x34 /* Scratch reg 4 */
64
65#define IF_SPI_TX_FRAME_SEQ_NUM_REG 0x38 /* Tx frame sequence number reg */
66#define IF_SPI_TX_FRAME_STATUS_REG 0x3C /* Tx frame status reg */
67
68#define IF_SPI_HOST_INT_CTRL_REG 0x40 /* Host interrupt controller reg */
69
70#define IF_SPI_CARD_INT_CAUSE_REG 0x44 /* Card interrupt cause reg */
71#define IF_SPI_CARD_INT_STATUS_REG 0x48 /* Card interupt status reg */
72#define IF_SPI_CARD_INT_EVENT_MASK_REG 0x4C /* Card interrupt event mask */
73#define IF_SPI_CARD_INT_STATUS_MASK_REG 0x50 /* Card interrupt status mask */
74
75#define IF_SPI_CARD_INT_RESET_SELECT_REG 0x54 /* Card interrupt reset select */
76
77#define IF_SPI_HOST_INT_CAUSE_REG 0x58 /* Host interrupt cause reg */
78#define IF_SPI_HOST_INT_STATUS_REG 0x5C /* Host interrupt status reg */
79#define IF_SPI_HOST_INT_EVENT_MASK_REG 0x60 /* Host interrupt event mask */
80#define IF_SPI_HOST_INT_STATUS_MASK_REG 0x64 /* Host interrupt status mask */
81#define IF_SPI_HOST_INT_RESET_SELECT_REG 0x68 /* Host interrupt reset select */
82
83#define IF_SPI_DELAY_READ_REG 0x6C /* Delay read reg */
84#define IF_SPI_SPU_BUS_MODE_REG 0x70 /* SPU BUS mode reg */
85
86/***************** IF_SPI_DEVICEID_CTRL_REG *****************/
87#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_ID(dc) ((dc & 0xffff0000)>>16)
88#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dc) (dc & 0x000000ff)
89
90/***************** IF_SPI_HOST_INT_CTRL_REG *****************/
91/** Host Interrupt Control bit : Wake up */
92#define IF_SPI_HICT_WAKE_UP (1<<0)
93/** Host Interrupt Control bit : WLAN ready */
94#define IF_SPI_HICT_WLAN_READY (1<<1)
95/*#define IF_SPI_HICT_FIFO_FIRST_HALF_EMPTY (1<<2) */
96/*#define IF_SPI_HICT_FIFO_SECOND_HALF_EMPTY (1<<3) */
97/*#define IF_SPI_HICT_IRQSRC_WLAN (1<<4) */
98/** Host Interrupt Control bit : Tx auto download */
99#define IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO (1<<5)
100/** Host Interrupt Control bit : Rx auto upload */
101#define IF_SPI_HICT_RX_UPLOAD_OVER_AUTO (1<<6)
102/** Host Interrupt Control bit : Command auto download */
103#define IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO (1<<7)
104/** Host Interrupt Control bit : Command auto upload */
105#define IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO (1<<8)
106
107/***************** IF_SPI_CARD_INT_CAUSE_REG *****************/
108/** Card Interrupt Case bit : Tx download over */
109#define IF_SPI_CIC_TX_DOWNLOAD_OVER (1<<0)
110/** Card Interrupt Case bit : Rx upload over */
111#define IF_SPI_CIC_RX_UPLOAD_OVER (1<<1)
112/** Card Interrupt Case bit : Command download over */
113#define IF_SPI_CIC_CMD_DOWNLOAD_OVER (1<<2)
114/** Card Interrupt Case bit : Host event */
115#define IF_SPI_CIC_HOST_EVENT (1<<3)
116/** Card Interrupt Case bit : Command upload over */
117#define IF_SPI_CIC_CMD_UPLOAD_OVER (1<<4)
118/** Card Interrupt Case bit : Power down */
119#define IF_SPI_CIC_POWER_DOWN (1<<5)
120
121/***************** IF_SPI_CARD_INT_STATUS_REG *****************/
122#define IF_SPI_CIS_TX_DOWNLOAD_OVER (1<<0)
123#define IF_SPI_CIS_RX_UPLOAD_OVER (1<<1)
124#define IF_SPI_CIS_CMD_DOWNLOAD_OVER (1<<2)
125#define IF_SPI_CIS_HOST_EVENT (1<<3)
126#define IF_SPI_CIS_CMD_UPLOAD_OVER (1<<4)
127#define IF_SPI_CIS_POWER_DOWN (1<<5)
128
129/***************** IF_SPI_HOST_INT_CAUSE_REG *****************/
130#define IF_SPI_HICU_TX_DOWNLOAD_RDY (1<<0)
131#define IF_SPI_HICU_RX_UPLOAD_RDY (1<<1)
132#define IF_SPI_HICU_CMD_DOWNLOAD_RDY (1<<2)
133#define IF_SPI_HICU_CARD_EVENT (1<<3)
134#define IF_SPI_HICU_CMD_UPLOAD_RDY (1<<4)
135#define IF_SPI_HICU_IO_WR_FIFO_OVERFLOW (1<<5)
136#define IF_SPI_HICU_IO_RD_FIFO_UNDERFLOW (1<<6)
137#define IF_SPI_HICU_DATA_WR_FIFO_OVERFLOW (1<<7)
138#define IF_SPI_HICU_DATA_RD_FIFO_UNDERFLOW (1<<8)
139#define IF_SPI_HICU_CMD_WR_FIFO_OVERFLOW (1<<9)
140#define IF_SPI_HICU_CMD_RD_FIFO_UNDERFLOW (1<<10)
141
142/***************** IF_SPI_HOST_INT_STATUS_REG *****************/
143/** Host Interrupt Status bit : Tx download ready */
144#define IF_SPI_HIST_TX_DOWNLOAD_RDY (1<<0)
145/** Host Interrupt Status bit : Rx upload ready */
146#define IF_SPI_HIST_RX_UPLOAD_RDY (1<<1)
147/** Host Interrupt Status bit : Command download ready */
148#define IF_SPI_HIST_CMD_DOWNLOAD_RDY (1<<2)
149/** Host Interrupt Status bit : Card event */
150#define IF_SPI_HIST_CARD_EVENT (1<<3)
151/** Host Interrupt Status bit : Command upload ready */
152#define IF_SPI_HIST_CMD_UPLOAD_RDY (1<<4)
153/** Host Interrupt Status bit : I/O write FIFO overflow */
154#define IF_SPI_HIST_IO_WR_FIFO_OVERFLOW (1<<5)
155/** Host Interrupt Status bit : I/O read FIFO underflow */
156#define IF_SPI_HIST_IO_RD_FIFO_UNDRFLOW (1<<6)
157/** Host Interrupt Status bit : Data write FIFO overflow */
158#define IF_SPI_HIST_DATA_WR_FIFO_OVERFLOW (1<<7)
159/** Host Interrupt Status bit : Data read FIFO underflow */
160#define IF_SPI_HIST_DATA_RD_FIFO_UNDERFLOW (1<<8)
161/** Host Interrupt Status bit : Command write FIFO overflow */
162#define IF_SPI_HIST_CMD_WR_FIFO_OVERFLOW (1<<9)
163/** Host Interrupt Status bit : Command read FIFO underflow */
164#define IF_SPI_HIST_CMD_RD_FIFO_UNDERFLOW (1<<10)
165
166/***************** IF_SPI_HOST_INT_STATUS_MASK_REG *****************/
167/** Host Interrupt Status Mask bit : Tx download ready */
168#define IF_SPI_HISM_TX_DOWNLOAD_RDY (1<<0)
169/** Host Interrupt Status Mask bit : Rx upload ready */
170#define IF_SPI_HISM_RX_UPLOAD_RDY (1<<1)
171/** Host Interrupt Status Mask bit : Command download ready */
172#define IF_SPI_HISM_CMD_DOWNLOAD_RDY (1<<2)
173/** Host Interrupt Status Mask bit : Card event */
174#define IF_SPI_HISM_CARDEVENT (1<<3)
175/** Host Interrupt Status Mask bit : Command upload ready */
176#define IF_SPI_HISM_CMD_UPLOAD_RDY (1<<4)
177/** Host Interrupt Status Mask bit : I/O write FIFO overflow */
178#define IF_SPI_HISM_IO_WR_FIFO_OVERFLOW (1<<5)
179/** Host Interrupt Status Mask bit : I/O read FIFO underflow */
180#define IF_SPI_HISM_IO_RD_FIFO_UNDERFLOW (1<<6)
181/** Host Interrupt Status Mask bit : Data write FIFO overflow */
182#define IF_SPI_HISM_DATA_WR_FIFO_OVERFLOW (1<<7)
183/** Host Interrupt Status Mask bit : Data write FIFO underflow */
184#define IF_SPI_HISM_DATA_RD_FIFO_UNDERFLOW (1<<8)
185/** Host Interrupt Status Mask bit : Command write FIFO overflow */
186#define IF_SPI_HISM_CMD_WR_FIFO_OVERFLOW (1<<9)
187/** Host Interrupt Status Mask bit : Command write FIFO underflow */
188#define IF_SPI_HISM_CMD_RD_FIFO_UNDERFLOW (1<<10)
189
190/***************** IF_SPI_SPU_BUS_MODE_REG *****************/
191/* SCK edge on which the WLAN module outputs data on MISO */
192#define IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_FALLING 0x8
193#define IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING 0x0
194
195/* In a SPU read operation, there is a delay between writing the SPU
196 * register name and getting back data from the WLAN module.
197 * This can be specified in terms of nanoseconds or in terms of dummy
198 * clock cycles which the master must output before receiving a response. */
199#define IF_SPI_BUS_MODE_DELAY_METHOD_DUMMY_CLOCK 0x4
200#define IF_SPI_BUS_MODE_DELAY_METHOD_TIMED 0x0
201
202/* Some different modes of SPI operation */
203#define IF_SPI_BUS_MODE_8_BIT_ADDRESS_16_BIT_DATA 0x00
204#define IF_SPI_BUS_MODE_8_BIT_ADDRESS_32_BIT_DATA 0x01
205#define IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA 0x02
206#define IF_SPI_BUS_MODE_16_BIT_ADDRESS_32_BIT_DATA 0x03
207
208#endif
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4e0007d2003..8a7eb2778eb 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1006,9 +1006,8 @@ void lbs_resume(struct lbs_private *priv)
1006EXPORT_SYMBOL_GPL(lbs_resume); 1006EXPORT_SYMBOL_GPL(lbs_resume);
1007 1007
1008/** 1008/**
1009 * @brief This function downloads firmware image, gets 1009 * @brief This function gets the HW spec from the firmware and sets
1010 * HW spec from firmware and set basic parameters to 1010 * some basic parameters.
1011 * firmware.
1012 * 1011 *
1013 * @param priv A pointer to struct lbs_private structure 1012 * @param priv A pointer to struct lbs_private structure
1014 * @return 0 or -1 1013 * @return 0 or -1
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 57f6c12cda2..00a57ed78af 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -692,7 +692,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
692 bss->wpa_ie_len); 692 bss->wpa_ie_len);
693 } else if (pos[1] >= MARVELL_MESH_IE_LENGTH && 693 } else if (pos[1] >= MARVELL_MESH_IE_LENGTH &&
694 pos[2] == 0x00 && pos[3] == 0x50 && 694 pos[2] == 0x00 && pos[3] == 0x50 &&
695 pos[4] == 0x43 && pos[4] == 0x04) { 695 pos[4] == 0x43 && pos[5] == 0x04) {
696 lbs_deb_scan("got mesh IE\n"); 696 lbs_deb_scan("got mesh IE\n");
697 bss->mesh = 1; 697 bss->mesh = 1;
698 } else { 698 } else {
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 3d3914c83b1..28790e03dc4 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -286,7 +286,7 @@ void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
286 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd)); 286 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
287} 287}
288 288
289void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid) 289void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
290{ 290{
291 struct cmd_ds_set_bssid cmd; 291 struct cmd_ds_set_bssid cmd;
292 292
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 8995cd7c29b..4cc42dd5a00 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -463,7 +463,7 @@ int lbtf_set_radio_control(struct lbtf_private *priv);
463int lbtf_update_hw_spec(struct lbtf_private *priv); 463int lbtf_update_hw_spec(struct lbtf_private *priv);
464int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv); 464int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv);
465void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode); 465void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode);
466void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid); 466void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid);
467int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr); 467int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr);
468 468
469int lbtf_set_channel(struct lbtf_private *priv, u8 channel); 469int lbtf_set_channel(struct lbtf_private *priv, u8 channel);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f83d69e813d..fce49ba061d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -779,6 +779,8 @@ static int __init init_mac80211_hwsim(void)
779 BIT(NL80211_IFTYPE_MESH_POINT); 779 BIT(NL80211_IFTYPE_MESH_POINT);
780 hw->ampdu_queues = 1; 780 hw->ampdu_queues = 1;
781 781
782 hw->flags = IEEE80211_HW_MFP_CAPABLE;
783
782 /* ask mac80211 to reserve space for magic */ 784 /* ask mac80211 to reserve space for magic */
783 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 785 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
784 hw->sta_data_size = sizeof(struct hwsim_sta_priv); 786 hw->sta_data_size = sizeof(struct hwsim_sta_priv);
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
new file mode 100644
index 00000000000..44411eb4e91
--- /dev/null
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -0,0 +1,120 @@
1config HERMES
2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
3 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
4 select WIRELESS_EXT
5 select FW_LOADER
6 select CRYPTO
7 select CRYPTO_MICHAEL_MIC
8 ---help---
9 A driver for 802.11b wireless cards based on the "Hermes" or
10 Intersil HFA384x (Prism 2) MAC controller. This includes the vast
11 majority of the PCMCIA 802.11b cards (which are nearly all rebadges)
12 - except for the Cisco/Aironet cards. Cards supported include the
13 Apple Airport (not a PCMCIA card), WavelanIEEE/Orinoco,
14 Cabletron/EnteraSys Roamabout, ELSA AirLancer, MELCO Buffalo, Avaya,
15 IBM High Rate Wireless, Farralon Syyline, Samsung MagicLAN, Netgear
16 MA401, LinkSys WPC-11, D-Link DWL-650, 3Com AirConnect, Intel
17 IPW2011, and Symbol Spectrum24 High Rate amongst others.
18
19 This option includes the guts of the driver, but in order to
20 actually use a card you will also need to enable support for PCMCIA
21 Hermes cards, PLX9052 based PCI adaptors or the Apple Airport below.
22
23 You will also very likely also need the Wireless Tools in order to
24 configure your card and that /etc/pcmcia/wireless.opts works :
25 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
26
27config HERMES_CACHE_FW_ON_INIT
28 bool "Cache Hermes firmware on driver initialisation"
29 depends on HERMES
30 default y
31 ---help---
32 Say Y to cache any firmware required by the Hermes drivers
33 on startup. The firmware will remain cached until the
34 driver is unloaded. The cache uses 64K of RAM.
35
36 Otherwise load the firmware from userspace as required. In
37 this case the driver should be unloaded and restarted
38 whenever the firmware is changed.
39
40 If you are not sure, say Y.
41
42config APPLE_AIRPORT
43 tristate "Apple Airport support (built-in)"
44 depends on PPC_PMAC && HERMES
45 help
46 Say Y here to support the Airport 802.11b wireless Ethernet hardware
47 built into the Macintosh iBook and other recent PowerPC-based
48 Macintosh machines. This is essentially a Lucent Orinoco card with
49 a non-standard interface.
50
51 This driver does not support the Airport Extreme (802.11b/g). Use
52 the BCM43xx driver for Airport Extreme cards.
53
54config PLX_HERMES
55 tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.)"
56 depends on PCI && HERMES
57 help
58 Enable support for PCMCIA cards supported by the "Hermes" (aka
59 orinoco) driver when used in PLX9052 based PCI adaptors. These
60 adaptors are not a full PCMCIA controller but act as a more limited
61 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
62 802.11b PCMCIA cards can be used in desktop machines. The Netgear
63 MA301 is such an adaptor.
64
65config TMD_HERMES
66 tristate "Hermes in TMD7160 based PCI adaptor support"
67 depends on PCI && HERMES
68 help
69 Enable support for PCMCIA cards supported by the "Hermes" (aka
70 orinoco) driver when used in TMD7160 based PCI adaptors. These
71 adaptors are not a full PCMCIA controller but act as a more limited
72 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
73 802.11b PCMCIA cards can be used in desktop machines.
74
75config NORTEL_HERMES
76 tristate "Nortel emobility PCI adaptor support"
77 depends on PCI && HERMES
78 help
79 Enable support for PCMCIA cards supported by the "Hermes" (aka
80 orinoco) driver when used in Nortel emobility PCI adaptors. These
81 adaptors are not full PCMCIA controllers, but act as a more limited
82 PCI <-> PCMCIA bridge.
83
84config PCI_HERMES
85 tristate "Prism 2.5 PCI 802.11b adaptor support"
86 depends on PCI && HERMES
87 help
88 Enable support for PCI and mini-PCI 802.11b wireless NICs based on
89 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
90 PCMCIA cards bundled with PCI<->PCMCIA adaptors which are also
91 common. Some of the built-in wireless adaptors in laptops are of
92 this variety.
93
94config PCMCIA_HERMES
95 tristate "Hermes PCMCIA card support"
96 depends on PCMCIA && HERMES
97 ---help---
98 A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
99 as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
100 EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and
101 others). It should also be usable on various Prism II based cards
102 such as the Linksys, D-Link and Farallon Skyline. It should also
103 work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN.
104
105 You will very likely need the Wireless Tools in order to
106 configure your card and that /etc/pcmcia/wireless.opts works:
107 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
108
109config PCMCIA_SPECTRUM
110 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
111 depends on PCMCIA && HERMES
112 ---help---
113
114 This is a driver for 802.11b cards using RAM-loadable Symbol
115 firmware, such as Symbol Wireless Networker LA4100, CompactFlash
116 cards by Socket Communications and Intel PRO/Wireless 2011B.
117
118 This driver requires firmware download on startup. Utilities
119 for downloading Symbol firmware are available at
120 <http://sourceforge.net/projects/orinoco/>
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 791366e08c5..1fc7409d669 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -1,8 +1,9 @@
1# 1#
2# Makefile for the orinoco wireless device drivers. 2# Makefile for the orinoco wireless device drivers.
3# 3#
4orinoco-objs := main.o fw.o hw.o mic.o scan.o wext.o hermes_dld.o hermes.o
4 5
5obj-$(CONFIG_HERMES) += orinoco.o hermes.o hermes_dld.o 6obj-$(CONFIG_HERMES) += orinoco.o
6obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o 7obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
7obj-$(CONFIG_APPLE_AIRPORT) += airport.o 8obj-$(CONFIG_APPLE_AIRPORT) += airport.o
8obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o 9obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index 28f1cae4843..8c4065f1b0d 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -3,10 +3,10 @@
3 * A driver for "Hermes" chipset based Apple Airport wireless 3 * A driver for "Hermes" chipset based Apple Airport wireless
4 * card. 4 * card.
5 * 5 *
6 * Copyright notice & release notes in file orinoco.c 6 * Copyright notice & release notes in file main.c
7 * 7 *
8 * Note specific to airport stub: 8 * Note specific to airport stub:
9 * 9 *
10 * 0.05 : first version of the new split driver 10 * 0.05 : first version of the new split driver
11 * 0.06 : fix possible hang on powerup, add sleep support 11 * 0.06 : fix possible hang on powerup, add sleep support
12 */ 12 */
@@ -60,7 +60,8 @@ airport_suspend(struct macio_dev *mdev, pm_message_t state)
60 orinoco_unlock(priv, &flags); 60 orinoco_unlock(priv, &flags);
61 61
62 disable_irq(dev->irq); 62 disable_irq(dev->irq);
63 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); 63 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
64 macio_get_of_node(mdev), 0, 0);
64 65
65 return 0; 66 return 0;
66} 67}
@@ -75,7 +76,8 @@ airport_resume(struct macio_dev *mdev)
75 76
76 printk(KERN_DEBUG "%s: Airport waking up\n", dev->name); 77 printk(KERN_DEBUG "%s: Airport waking up\n", dev->name);
77 78
78 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); 79 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
80 macio_get_of_node(mdev), 0, 1);
79 msleep(200); 81 msleep(200);
80 82
81 enable_irq(dev->irq); 83 enable_irq(dev->irq);
@@ -93,7 +95,7 @@ airport_resume(struct macio_dev *mdev)
93 95
94 priv->hw_unavailable--; 96 priv->hw_unavailable--;
95 97
96 if (priv->open && (! priv->hw_unavailable)) { 98 if (priv->open && (!priv->hw_unavailable)) {
97 err = __orinoco_up(dev); 99 err = __orinoco_up(dev);
98 if (err) 100 if (err)
99 printk(KERN_ERR "%s: Error %d restarting card on PBOOK_WAKE\n", 101 printk(KERN_ERR "%s: Error %d restarting card on PBOOK_WAKE\n",
@@ -127,7 +129,8 @@ airport_detach(struct macio_dev *mdev)
127 129
128 macio_release_resource(mdev, 0); 130 macio_release_resource(mdev, 0);
129 131
130 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); 132 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
133 macio_get_of_node(mdev), 0, 0);
131 ssleep(1); 134 ssleep(1);
132 135
133 macio_set_drvdata(mdev, NULL); 136 macio_set_drvdata(mdev, NULL);
@@ -153,9 +156,11 @@ static int airport_hard_reset(struct orinoco_private *priv)
153 * off. */ 156 * off. */
154 disable_irq(dev->irq); 157 disable_irq(dev->irq);
155 158
156 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 0); 159 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
160 macio_get_of_node(card->mdev), 0, 0);
157 ssleep(1); 161 ssleep(1);
158 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 1); 162 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
163 macio_get_of_node(card->mdev), 0, 1);
159 ssleep(1); 164 ssleep(1);
160 165
161 enable_irq(dev->irq); 166 enable_irq(dev->irq);
@@ -182,7 +187,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
182 /* Allocate space for private device-specific data */ 187 /* Allocate space for private device-specific data */
183 dev = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev, 188 dev = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
184 airport_hard_reset, NULL); 189 airport_hard_reset, NULL);
185 if (! dev) { 190 if (!dev) {
186 printk(KERN_ERR PFX "Cannot allocate network device\n"); 191 printk(KERN_ERR PFX "Cannot allocate network device\n");
187 return -ENODEV; 192 return -ENODEV;
188 } 193 }
@@ -214,9 +219,10 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
214 } 219 }
215 220
216 hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); 221 hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING);
217 222
218 /* Power up card */ 223 /* Power up card */
219 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); 224 pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
225 macio_get_of_node(mdev), 0, 1);
220 ssleep(1); 226 ssleep(1);
221 227
222 /* Reset it before we get the interrupt */ 228 /* Reset it before we get the interrupt */
@@ -248,7 +254,7 @@ MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
248MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); 254MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
249MODULE_LICENSE("Dual MPL/GPL"); 255MODULE_LICENSE("Dual MPL/GPL");
250 256
251static struct of_device_id airport_match[] = 257static struct of_device_id airport_match[] =
252{ 258{
253 { 259 {
254 .name = "radio", 260 .name = "radio",
@@ -256,10 +262,9 @@ static struct of_device_id airport_match[] =
256 {}, 262 {},
257}; 263};
258 264
259MODULE_DEVICE_TABLE (of, airport_match); 265MODULE_DEVICE_TABLE(of, airport_match);
260 266
261static struct macio_driver airport_driver = 267static struct macio_driver airport_driver = {
262{
263 .name = DRIVER_NAME, 268 .name = DRIVER_NAME,
264 .match_table = airport_match, 269 .match_table = airport_match,
265 .probe = airport_attach, 270 .probe = airport_attach,
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
new file mode 100644
index 00000000000..7d2292d6ce0
--- /dev/null
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -0,0 +1,340 @@
1/* Firmware file reading and download helpers
2 *
3 * See copyright notice in main.c
4 */
5#include <linux/kernel.h>
6#include <linux/firmware.h>
7
8#include "hermes.h"
9#include "hermes_dld.h"
10#include "orinoco.h"
11
12#include "fw.h"
13
14/* End markers (for Symbol firmware only) */
15#define TEXT_END 0x1A /* End of text header */
16
17struct fw_info {
18 char *pri_fw;
19 char *sta_fw;
20 char *ap_fw;
21 u32 pda_addr;
22 u16 pda_size;
23};
24
25static const struct fw_info orinoco_fw[] = {
26 { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
27 { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
28 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
29};
30
31/* Structure used to access fields in FW
32 * Make sure LE decoding macros are used
33 */
34struct orinoco_fw_header {
35 char hdr_vers[6]; /* ASCII string for header version */
36 __le16 headersize; /* Total length of header */
37 __le32 entry_point; /* NIC entry point */
38 __le32 blocks; /* Number of blocks to program */
39 __le32 block_offset; /* Offset of block data from eof header */
40 __le32 pdr_offset; /* Offset to PDR data from eof header */
41 __le32 pri_offset; /* Offset to primary plug data */
42 __le32 compat_offset; /* Offset to compatibility data*/
43 char signature[0]; /* FW signature length headersize-20 */
44} __attribute__ ((packed));
45
46/* Download either STA or AP firmware into the card. */
47static int
48orinoco_dl_firmware(struct orinoco_private *priv,
49 const struct fw_info *fw,
50 int ap)
51{
52 /* Plug Data Area (PDA) */
53 __le16 *pda;
54
55 hermes_t *hw = &priv->hw;
56 const struct firmware *fw_entry;
57 const struct orinoco_fw_header *hdr;
58 const unsigned char *first_block;
59 const unsigned char *end;
60 const char *firmware;
61 struct net_device *dev = priv->ndev;
62 int err = 0;
63
64 pda = kzalloc(fw->pda_size, GFP_KERNEL);
65 if (!pda)
66 return -ENOMEM;
67
68 if (ap)
69 firmware = fw->ap_fw;
70 else
71 firmware = fw->sta_fw;
72
73 printk(KERN_DEBUG "%s: Attempting to download firmware %s\n",
74 dev->name, firmware);
75
76 /* Read current plug data */
77 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
78 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
79 if (err)
80 goto free;
81
82 if (!priv->cached_fw) {
83 err = request_firmware(&fw_entry, firmware, priv->dev);
84
85 if (err) {
86 printk(KERN_ERR "%s: Cannot find firmware %s\n",
87 dev->name, firmware);
88 err = -ENOENT;
89 goto free;
90 }
91 } else
92 fw_entry = priv->cached_fw;
93
94 hdr = (const struct orinoco_fw_header *) fw_entry->data;
95
96 /* Enable aux port to allow programming */
97 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
98 printk(KERN_DEBUG "%s: Program init returned %d\n", dev->name, err);
99 if (err != 0)
100 goto abort;
101
102 /* Program data */
103 first_block = (fw_entry->data +
104 le16_to_cpu(hdr->headersize) +
105 le32_to_cpu(hdr->block_offset));
106 end = fw_entry->data + fw_entry->size;
107
108 err = hermes_program(hw, first_block, end);
109 printk(KERN_DEBUG "%s: Program returned %d\n", dev->name, err);
110 if (err != 0)
111 goto abort;
112
113 /* Update production data */
114 first_block = (fw_entry->data +
115 le16_to_cpu(hdr->headersize) +
116 le32_to_cpu(hdr->pdr_offset));
117
118 err = hermes_apply_pda_with_defaults(hw, first_block, pda);
119 printk(KERN_DEBUG "%s: Apply PDA returned %d\n", dev->name, err);
120 if (err)
121 goto abort;
122
123 /* Tell card we've finished */
124 err = hermesi_program_end(hw);
125 printk(KERN_DEBUG "%s: Program end returned %d\n", dev->name, err);
126 if (err != 0)
127 goto abort;
128
129 /* Check if we're running */
130 printk(KERN_DEBUG "%s: hermes_present returned %d\n",
131 dev->name, hermes_present(hw));
132
133abort:
134 /* If we requested the firmware, release it. */
135 if (!priv->cached_fw)
136 release_firmware(fw_entry);
137
138free:
139 kfree(pda);
140 return err;
141}
142
143/*
144 * Process a firmware image - stop the card, load the firmware, reset
145 * the card and make sure it responds. For the secondary firmware take
146 * care of the PDA - read it and then write it on top of the firmware.
147 */
148static int
149symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
150 const unsigned char *image, const unsigned char *end,
151 int secondary)
152{
153 hermes_t *hw = &priv->hw;
154 int ret = 0;
155 const unsigned char *ptr;
156 const unsigned char *first_block;
157
158 /* Plug Data Area (PDA) */
159 __le16 *pda = NULL;
160
161 /* Binary block begins after the 0x1A marker */
162 ptr = image;
163 while (*ptr++ != TEXT_END);
164 first_block = ptr;
165
166 /* Read the PDA from EEPROM */
167 if (secondary) {
168 pda = kzalloc(fw->pda_size, GFP_KERNEL);
169 if (!pda)
170 return -ENOMEM;
171
172 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
173 if (ret)
174 goto free;
175 }
176
177 /* Stop the firmware, so that it can be safely rewritten */
178 if (priv->stop_fw) {
179 ret = priv->stop_fw(priv, 1);
180 if (ret)
181 goto free;
182 }
183
184 /* Program the adapter with new firmware */
185 ret = hermes_program(hw, first_block, end);
186 if (ret)
187 goto free;
188
189 /* Write the PDA to the adapter */
190 if (secondary) {
191 size_t len = hermes_blocks_length(first_block);
192 ptr = first_block + len;
193 ret = hermes_apply_pda(hw, ptr, pda);
194 kfree(pda);
195 if (ret)
196 return ret;
197 }
198
199 /* Run the firmware */
200 if (priv->stop_fw) {
201 ret = priv->stop_fw(priv, 0);
202 if (ret)
203 return ret;
204 }
205
206 /* Reset hermes chip and make sure it responds */
207 ret = hermes_init(hw);
208
209 /* hermes_reset() should return 0 with the secondary firmware */
210 if (secondary && ret != 0)
211 return -ENODEV;
212
213 /* And this should work with any firmware */
214 if (!hermes_present(hw))
215 return -ENODEV;
216
217 return 0;
218
219free:
220 kfree(pda);
221 return ret;
222}
223
224
225/*
226 * Download the firmware into the card, this also does a PCMCIA soft
227 * reset on the card, to make sure it's in a sane state.
228 */
229static int
230symbol_dl_firmware(struct orinoco_private *priv,
231 const struct fw_info *fw)
232{
233 struct net_device *dev = priv->ndev;
234 int ret;
235 const struct firmware *fw_entry;
236
237 if (!priv->cached_pri_fw) {
238 if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) {
239 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
240 dev->name, fw->pri_fw);
241 return -ENOENT;
242 }
243 } else
244 fw_entry = priv->cached_pri_fw;
245
246 /* Load primary firmware */
247 ret = symbol_dl_image(priv, fw, fw_entry->data,
248 fw_entry->data + fw_entry->size, 0);
249
250 if (!priv->cached_pri_fw)
251 release_firmware(fw_entry);
252 if (ret) {
253 printk(KERN_ERR "%s: Primary firmware download failed\n",
254 dev->name);
255 return ret;
256 }
257
258 if (!priv->cached_fw) {
259 if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) {
260 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
261 dev->name, fw->sta_fw);
262 return -ENOENT;
263 }
264 } else
265 fw_entry = priv->cached_fw;
266
267 /* Load secondary firmware */
268 ret = symbol_dl_image(priv, fw, fw_entry->data,
269 fw_entry->data + fw_entry->size, 1);
270 if (!priv->cached_fw)
271 release_firmware(fw_entry);
272 if (ret) {
273 printk(KERN_ERR "%s: Secondary firmware download failed\n",
274 dev->name);
275 }
276
277 return ret;
278}
279
280int orinoco_download(struct orinoco_private *priv)
281{
282 int err = 0;
283 /* Reload firmware */
284 switch (priv->firmware_type) {
285 case FIRMWARE_TYPE_AGERE:
286 /* case FIRMWARE_TYPE_INTERSIL: */
287 err = orinoco_dl_firmware(priv,
288 &orinoco_fw[priv->firmware_type], 0);
289 break;
290
291 case FIRMWARE_TYPE_SYMBOL:
292 err = symbol_dl_firmware(priv,
293 &orinoco_fw[priv->firmware_type]);
294 break;
295 case FIRMWARE_TYPE_INTERSIL:
296 break;
297 }
298 /* TODO: if we fail we probably need to reinitialise
299 * the driver */
300
301 return err;
302}
303
304void orinoco_cache_fw(struct orinoco_private *priv, int ap)
305{
306#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
307 const struct firmware *fw_entry = NULL;
308 const char *pri_fw;
309 const char *fw;
310
311 pri_fw = orinoco_fw[priv->firmware_type].pri_fw;
312 if (ap)
313 fw = orinoco_fw[priv->firmware_type].ap_fw;
314 else
315 fw = orinoco_fw[priv->firmware_type].sta_fw;
316
317 if (pri_fw) {
318 if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0)
319 priv->cached_pri_fw = fw_entry;
320 }
321
322 if (fw) {
323 if (request_firmware(&fw_entry, fw, priv->dev) == 0)
324 priv->cached_fw = fw_entry;
325 }
326#endif
327}
328
329void orinoco_uncache_fw(struct orinoco_private *priv)
330{
331#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
332 if (priv->cached_pri_fw)
333 release_firmware(priv->cached_pri_fw);
334 if (priv->cached_fw)
335 release_firmware(priv->cached_fw);
336
337 priv->cached_pri_fw = NULL;
338 priv->cached_fw = NULL;
339#endif
340}
diff --git a/drivers/net/wireless/orinoco/fw.h b/drivers/net/wireless/orinoco/fw.h
new file mode 100644
index 00000000000..2290f0845d5
--- /dev/null
+++ b/drivers/net/wireless/orinoco/fw.h
@@ -0,0 +1,16 @@
1/* Firmware file reading and download helpers
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_FW_H_
6#define _ORINOCO_FW_H_
7
8/* Forward declations */
9struct orinoco_private;
10
11int orinoco_download(struct orinoco_private *priv);
12
13void orinoco_cache_fw(struct orinoco_private *priv, int ap);
14void orinoco_uncache_fw(struct orinoco_private *priv);
15
16#endif /* _ORINOCO_FW_H_ */
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index bfa375369df..f2c918c2572 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -15,7 +15,7 @@
15 * 15 *
16 * Copyright (C) 2000, David Gibson, Linuxcare Australia. 16 * Copyright (C) 2000, David Gibson, Linuxcare Australia.
17 * (C) Copyright David Gibson, IBM Corp. 2001-2003. 17 * (C) Copyright David Gibson, IBM Corp. 2001-2003.
18 * 18 *
19 * The contents of this file are subject to the Mozilla Public License 19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in 20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License 21 * compliance with the License. You may obtain a copy of the License
@@ -45,11 +45,6 @@
45 45
46#include "hermes.h" 46#include "hermes.h"
47 47
48MODULE_DESCRIPTION("Low-level driver helper for Lucent Hermes chipset and Prism II HFA384x wireless MAC controller");
49MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"
50 " & David Gibson <hermes@gibson.dropbear.id.au>");
51MODULE_LICENSE("Dual MPL/GPL");
52
53/* These are maximum timeouts. Most often, card wil react much faster */ 48/* These are maximum timeouts. Most often, card wil react much faster */
54#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */ 49#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
55#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */ 50#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
@@ -61,13 +56,13 @@ MODULE_LICENSE("Dual MPL/GPL");
61 */ 56 */
62 57
63#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \ 58#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \
64 printk(stuff);} while (0) 59 printk(stuff); } while (0)
65 60
66#undef HERMES_DEBUG 61#undef HERMES_DEBUG
67#ifdef HERMES_DEBUG 62#ifdef HERMES_DEBUG
68#include <stdarg.h> 63#include <stdarg.h>
69 64
70#define DEBUG(lvl, stuff...) if ( (lvl) <= HERMES_DEBUG) DMSG(stuff) 65#define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff)
71 66
72#else /* ! HERMES_DEBUG */ 67#else /* ! HERMES_DEBUG */
73 68
@@ -95,20 +90,19 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
95 90
96 /* First wait for the command register to unbusy */ 91 /* First wait for the command register to unbusy */
97 reg = hermes_read_regn(hw, CMD); 92 reg = hermes_read_regn(hw, CMD);
98 while ( (reg & HERMES_CMD_BUSY) && k ) { 93 while ((reg & HERMES_CMD_BUSY) && k) {
99 k--; 94 k--;
100 udelay(1); 95 udelay(1);
101 reg = hermes_read_regn(hw, CMD); 96 reg = hermes_read_regn(hw, CMD);
102 } 97 }
103 if (reg & HERMES_CMD_BUSY) { 98 if (reg & HERMES_CMD_BUSY)
104 return -EBUSY; 99 return -EBUSY;
105 }
106 100
107 hermes_write_regn(hw, PARAM2, param2); 101 hermes_write_regn(hw, PARAM2, param2);
108 hermes_write_regn(hw, PARAM1, param1); 102 hermes_write_regn(hw, PARAM1, param1);
109 hermes_write_regn(hw, PARAM0, param0); 103 hermes_write_regn(hw, PARAM0, param0);
110 hermes_write_regn(hw, CMD, cmd); 104 hermes_write_regn(hw, CMD, cmd);
111 105
112 return 0; 106 return 0;
113} 107}
114 108
@@ -191,23 +185,23 @@ int hermes_init(hermes_t *hw)
191 hermes_write_regn(hw, EVACK, 0xffff); 185 hermes_write_regn(hw, EVACK, 0xffff);
192 186
193 /* Normally it's a "can't happen" for the command register to 187 /* Normally it's a "can't happen" for the command register to
194 be busy when we go to issue a command because we are 188 be busy when we go to issue a command because we are
195 serializing all commands. However we want to have some 189 serializing all commands. However we want to have some
196 chance of resetting the card even if it gets into a stupid 190 chance of resetting the card even if it gets into a stupid
197 state, so we actually wait to see if the command register 191 state, so we actually wait to see if the command register
198 will unbusy itself here. */ 192 will unbusy itself here. */
199 k = CMD_BUSY_TIMEOUT; 193 k = CMD_BUSY_TIMEOUT;
200 reg = hermes_read_regn(hw, CMD); 194 reg = hermes_read_regn(hw, CMD);
201 while (k && (reg & HERMES_CMD_BUSY)) { 195 while (k && (reg & HERMES_CMD_BUSY)) {
202 if (reg == 0xffff) /* Special case - the card has probably been removed, 196 if (reg == 0xffff) /* Special case - the card has probably been
203 so don't wait for the timeout */ 197 removed, so don't wait for the timeout */
204 return -ENODEV; 198 return -ENODEV;
205 199
206 k--; 200 k--;
207 udelay(1); 201 udelay(1);
208 reg = hermes_read_regn(hw, CMD); 202 reg = hermes_read_regn(hw, CMD);
209 } 203 }
210 204
211 /* No need to explicitly handle the timeout - if we've timed 205 /* No need to explicitly handle the timeout - if we've timed
212 out hermes_issue_cmd() will probably return -EBUSY below */ 206 out hermes_issue_cmd() will probably return -EBUSY below */
213 207
@@ -228,7 +222,10 @@ EXPORT_SYMBOL(hermes_init);
228/* Issue a command to the chip, and (busy!) wait for it to 222/* Issue a command to the chip, and (busy!) wait for it to
229 * complete. 223 * complete.
230 * 224 *
231 * Returns: < 0 on internal error, 0 on success, > 0 on error returned by the firmware 225 * Returns:
226 * < 0 on internal error
227 * 0 on success
228 * > 0 on error returned by the firmware
232 * 229 *
233 * Callable from any context, but locking is your problem. */ 230 * Callable from any context, but locking is your problem. */
234int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, 231int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
@@ -241,13 +238,13 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
241 238
242 err = hermes_issue_cmd(hw, cmd, parm0, 0, 0); 239 err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
243 if (err) { 240 if (err) {
244 if (! hermes_present(hw)) { 241 if (!hermes_present(hw)) {
245 if (net_ratelimit()) 242 if (net_ratelimit())
246 printk(KERN_WARNING "hermes @ %p: " 243 printk(KERN_WARNING "hermes @ %p: "
247 "Card removed while issuing command " 244 "Card removed while issuing command "
248 "0x%04x.\n", hw->iobase, cmd); 245 "0x%04x.\n", hw->iobase, cmd);
249 err = -ENODEV; 246 err = -ENODEV;
250 } else 247 } else
251 if (net_ratelimit()) 248 if (net_ratelimit())
252 printk(KERN_ERR "hermes @ %p: " 249 printk(KERN_ERR "hermes @ %p: "
253 "Error %d issuing command 0x%04x.\n", 250 "Error %d issuing command 0x%04x.\n",
@@ -257,21 +254,21 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
257 254
258 reg = hermes_read_regn(hw, EVSTAT); 255 reg = hermes_read_regn(hw, EVSTAT);
259 k = CMD_COMPL_TIMEOUT; 256 k = CMD_COMPL_TIMEOUT;
260 while ( (! (reg & HERMES_EV_CMD)) && k) { 257 while ((!(reg & HERMES_EV_CMD)) && k) {
261 k--; 258 k--;
262 udelay(10); 259 udelay(10);
263 reg = hermes_read_regn(hw, EVSTAT); 260 reg = hermes_read_regn(hw, EVSTAT);
264 } 261 }
265 262
266 if (! hermes_present(hw)) { 263 if (!hermes_present(hw)) {
267 printk(KERN_WARNING "hermes @ %p: Card removed " 264 printk(KERN_WARNING "hermes @ %p: Card removed "
268 "while waiting for command 0x%04x completion.\n", 265 "while waiting for command 0x%04x completion.\n",
269 hw->iobase, cmd); 266 hw->iobase, cmd);
270 err = -ENODEV; 267 err = -ENODEV;
271 goto out; 268 goto out;
272 } 269 }
273 270
274 if (! (reg & HERMES_EV_CMD)) { 271 if (!(reg & HERMES_EV_CMD)) {
275 printk(KERN_ERR "hermes @ %p: Timeout waiting for " 272 printk(KERN_ERR "hermes @ %p: Timeout waiting for "
276 "command 0x%04x completion.\n", hw->iobase, cmd); 273 "command 0x%04x completion.\n", hw->iobase, cmd);
277 err = -ETIMEDOUT; 274 err = -ETIMEDOUT;
@@ -301,31 +298,30 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
301 int err = 0; 298 int err = 0;
302 int k; 299 int k;
303 u16 reg; 300 u16 reg;
304 301
305 if ( (size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX) ) 302 if ((size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX))
306 return -EINVAL; 303 return -EINVAL;
307 304
308 err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL); 305 err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
309 if (err) { 306 if (err)
310 return err; 307 return err;
311 }
312 308
313 reg = hermes_read_regn(hw, EVSTAT); 309 reg = hermes_read_regn(hw, EVSTAT);
314 k = ALLOC_COMPL_TIMEOUT; 310 k = ALLOC_COMPL_TIMEOUT;
315 while ( (! (reg & HERMES_EV_ALLOC)) && k) { 311 while ((!(reg & HERMES_EV_ALLOC)) && k) {
316 k--; 312 k--;
317 udelay(10); 313 udelay(10);
318 reg = hermes_read_regn(hw, EVSTAT); 314 reg = hermes_read_regn(hw, EVSTAT);
319 } 315 }
320 316
321 if (! hermes_present(hw)) { 317 if (!hermes_present(hw)) {
322 printk(KERN_WARNING "hermes @ %p: " 318 printk(KERN_WARNING "hermes @ %p: "
323 "Card removed waiting for frame allocation.\n", 319 "Card removed waiting for frame allocation.\n",
324 hw->iobase); 320 hw->iobase);
325 return -ENODEV; 321 return -ENODEV;
326 } 322 }
327 323
328 if (! (reg & HERMES_EV_ALLOC)) { 324 if (!(reg & HERMES_EV_ALLOC)) {
329 printk(KERN_ERR "hermes @ %p: " 325 printk(KERN_ERR "hermes @ %p: "
330 "Timeout waiting for frame allocation\n", 326 "Timeout waiting for frame allocation\n",
331 hw->iobase); 327 hw->iobase);
@@ -334,14 +330,17 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
334 330
335 *fid = hermes_read_regn(hw, ALLOCFID); 331 *fid = hermes_read_regn(hw, ALLOCFID);
336 hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC); 332 hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
337 333
338 return 0; 334 return 0;
339} 335}
340EXPORT_SYMBOL(hermes_allocate); 336EXPORT_SYMBOL(hermes_allocate);
341 337
342/* Set up a BAP to read a particular chunk of data from card's internal buffer. 338/* Set up a BAP to read a particular chunk of data from card's internal buffer.
343 * 339 *
344 * Returns: < 0 on internal failure (errno), 0 on success, >0 on error 340 * Returns:
341 * < 0 on internal failure (errno)
342 * 0 on success
343 * > 0 on error
345 * from firmware 344 * from firmware
346 * 345 *
347 * Callable from any context */ 346 * Callable from any context */
@@ -353,7 +352,7 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
353 u16 reg; 352 u16 reg;
354 353
355 /* Paranoia.. */ 354 /* Paranoia.. */
356 if ( (offset > HERMES_BAP_OFFSET_MAX) || (offset % 2) ) 355 if ((offset > HERMES_BAP_OFFSET_MAX) || (offset % 2))
357 return -EINVAL; 356 return -EINVAL;
358 357
359 k = HERMES_BAP_BUSY_TIMEOUT; 358 k = HERMES_BAP_BUSY_TIMEOUT;
@@ -374,7 +373,7 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
374 /* Wait for the BAP to be ready */ 373 /* Wait for the BAP to be ready */
375 k = HERMES_BAP_BUSY_TIMEOUT; 374 k = HERMES_BAP_BUSY_TIMEOUT;
376 reg = hermes_read_reg(hw, oreg); 375 reg = hermes_read_reg(hw, oreg);
377 while ( (reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) { 376 while ((reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
378 k--; 377 k--;
379 udelay(1); 378 udelay(1);
380 reg = hermes_read_reg(hw, oreg); 379 reg = hermes_read_reg(hw, oreg);
@@ -386,9 +385,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
386 (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error", 385 (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error",
387 reg, id, offset); 386 reg, id, offset);
388 387
389 if (reg & HERMES_OFFSET_BUSY) { 388 if (reg & HERMES_OFFSET_BUSY)
390 return -ETIMEDOUT; 389 return -ETIMEDOUT;
391 }
392 390
393 return -EIO; /* error or wrong offset */ 391 return -EIO; /* error or wrong offset */
394 } 392 }
@@ -400,7 +398,10 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
400 * BAP. Synchronization/serialization is the caller's problem. len 398 * BAP. Synchronization/serialization is the caller's problem. len
401 * must be even. 399 * must be even.
402 * 400 *
403 * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware 401 * Returns:
402 * < 0 on internal failure (errno)
403 * 0 on success
404 * > 0 on error from firmware
404 */ 405 */
405int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, 406int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
406 u16 id, u16 offset) 407 u16 id, u16 offset)
@@ -408,7 +409,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
408 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 409 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
409 int err = 0; 410 int err = 0;
410 411
411 if ( (len < 0) || (len % 2) ) 412 if ((len < 0) || (len % 2))
412 return -EINVAL; 413 return -EINVAL;
413 414
414 err = hermes_bap_seek(hw, bap, id, offset); 415 err = hermes_bap_seek(hw, bap, id, offset);
@@ -426,7 +427,10 @@ EXPORT_SYMBOL(hermes_bap_pread);
426/* Write a block of data to the chip's buffer, via the 427/* Write a block of data to the chip's buffer, via the
427 * BAP. Synchronization/serialization is the caller's problem. 428 * BAP. Synchronization/serialization is the caller's problem.
428 * 429 *
429 * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware 430 * Returns:
431 * < 0 on internal failure (errno)
432 * 0 on success
433 * > 0 on error from firmware
430 */ 434 */
431int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, 435int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
432 u16 id, u16 offset) 436 u16 id, u16 offset)
@@ -440,11 +444,11 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
440 err = hermes_bap_seek(hw, bap, id, offset); 444 err = hermes_bap_seek(hw, bap, id, offset);
441 if (err) 445 if (err)
442 goto out; 446 goto out;
443 447
444 /* Actually do the transfer */ 448 /* Actually do the transfer */
445 hermes_write_bytes(hw, dreg, buf, len); 449 hermes_write_bytes(hw, dreg, buf, len);
446 450
447 out: 451 out:
448 return err; 452 return err;
449} 453}
450EXPORT_SYMBOL(hermes_bap_pwrite); 454EXPORT_SYMBOL(hermes_bap_pwrite);
@@ -465,7 +469,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
465 u16 rlength, rtype; 469 u16 rlength, rtype;
466 unsigned nwords; 470 unsigned nwords;
467 471
468 if ( (bufsize < 0) || (bufsize % 2) ) 472 if ((bufsize < 0) || (bufsize % 2))
469 return -EINVAL; 473 return -EINVAL;
470 474
471 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL); 475 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
@@ -478,7 +482,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
478 482
479 rlength = hermes_read_reg(hw, dreg); 483 rlength = hermes_read_reg(hw, dreg);
480 484
481 if (! rlength) 485 if (!rlength)
482 return -ENODATA; 486 return -ENODATA;
483 487
484 rtype = hermes_read_reg(hw, dreg); 488 rtype = hermes_read_reg(hw, dreg);
@@ -503,7 +507,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
503} 507}
504EXPORT_SYMBOL(hermes_read_ltv); 508EXPORT_SYMBOL(hermes_read_ltv);
505 509
506int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 510int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
507 u16 length, const void *value) 511 u16 length, const void *value)
508{ 512{
509 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 513 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -530,15 +534,3 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
530 return err; 534 return err;
531} 535}
532EXPORT_SYMBOL(hermes_write_ltv); 536EXPORT_SYMBOL(hermes_write_ltv);
533
534static int __init init_hermes(void)
535{
536 return 0;
537}
538
539static void __exit exit_hermes(void)
540{
541}
542
543module_init(init_hermes);
544module_exit(exit_hermes);
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 8b13c8fef3d..c78c442a02c 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -15,7 +15,8 @@
15 * Copyright (C) 2000, David Gibson, Linuxcare Australia. 15 * Copyright (C) 2000, David Gibson, Linuxcare Australia.
16 * (C) Copyright David Gibson, IBM Corp. 2001-2003. 16 * (C) Copyright David Gibson, IBM Corp. 2001-2003.
17 * 17 *
18 * Portions taken from hfa384x.h, Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 18 * Portions taken from hfa384x.h.
19 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
19 * 20 *
20 * This file distributed under the GPL, version 2. 21 * This file distributed under the GPL, version 2.
21 */ 22 */
@@ -31,7 +32,7 @@
31*/ 32*/
32 33
33#include <linux/if_ether.h> 34#include <linux/if_ether.h>
34#include <asm/io.h> 35#include <linux/io.h>
35 36
36/* 37/*
37 * Limits and constants 38 * Limits and constants
@@ -203,7 +204,7 @@ struct hermes_tx_descriptor {
203 __le32 sw_support; 204 __le32 sw_support;
204 u8 retry_count; 205 u8 retry_count;
205 u8 tx_rate; 206 u8 tx_rate;
206 __le16 tx_control; 207 __le16 tx_control;
207} __attribute__ ((packed)); 208} __attribute__ ((packed));
208 209
209#define HERMES_TXSTAT_RETRYERR (0x0001) 210#define HERMES_TXSTAT_RETRYERR (0x0001)
@@ -298,7 +299,7 @@ struct symbol_scan_apinfo {
298 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ 299 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
299 __le16 essid_len; /* ESSID length */ 300 __le16 essid_len; /* ESSID length */
300 u8 essid[32]; /* ESSID of the network */ 301 u8 essid[32]; /* ESSID of the network */
301 __le16 rates[5]; /* Bit rate supported */ 302 __le16 rates[5]; /* Bit rate supported */
302 __le16 basic_rates; /* Basic rates bitmask */ 303 __le16 basic_rates; /* Basic rates bitmask */
303 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ 304 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
304 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ 305 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
@@ -344,14 +345,14 @@ struct agere_ext_scan_info {
344 u8 data[316]; 345 u8 data[316];
345} __attribute__ ((packed)); 346} __attribute__ ((packed));
346 347
347#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) 348#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
348#define HERMES_LINKSTATUS_CONNECTED (0x0001) 349#define HERMES_LINKSTATUS_CONNECTED (0x0001)
349#define HERMES_LINKSTATUS_DISCONNECTED (0x0002) 350#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
350#define HERMES_LINKSTATUS_AP_CHANGE (0x0003) 351#define HERMES_LINKSTATUS_AP_CHANGE (0x0003)
351#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004) 352#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004)
352#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005) 353#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005)
353#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006) 354#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
354 355
355struct hermes_linkstatus { 356struct hermes_linkstatus {
356 __le16 linkstatus; /* Link status */ 357 __le16 linkstatus; /* Link status */
357} __attribute__ ((packed)); 358} __attribute__ ((packed));
@@ -384,11 +385,12 @@ typedef struct hermes {
384 385
385/* Register access convenience macros */ 386/* Register access convenience macros */
386#define hermes_read_reg(hw, off) \ 387#define hermes_read_reg(hw, off) \
387 (ioread16((hw)->iobase + ( (off) << (hw)->reg_spacing ))) 388 (ioread16((hw)->iobase + ((off) << (hw)->reg_spacing)))
388#define hermes_write_reg(hw, off, val) \ 389#define hermes_write_reg(hw, off, val) \
389 (iowrite16((val), (hw)->iobase + ((off) << (hw)->reg_spacing))) 390 (iowrite16((val), (hw)->iobase + ((off) << (hw)->reg_spacing)))
390#define hermes_read_regn(hw, name) hermes_read_reg((hw), HERMES_##name) 391#define hermes_read_regn(hw, name) hermes_read_reg((hw), HERMES_##name)
391#define hermes_write_regn(hw, name, val) hermes_write_reg((hw), HERMES_##name, (val)) 392#define hermes_write_regn(hw, name, val) \
393 hermes_write_reg((hw), HERMES_##name, (val))
392 394
393/* Function prototypes */ 395/* Function prototypes */
394void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing); 396void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
@@ -430,7 +432,7 @@ static inline int hermes_enable_port(hermes_t *hw, int port)
430 432
431static inline int hermes_disable_port(hermes_t *hw, int port) 433static inline int hermes_disable_port(hermes_t *hw, int port)
432{ 434{
433 return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8), 435 return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
434 0, NULL); 436 0, NULL);
435} 437}
436 438
@@ -441,11 +443,12 @@ static inline int hermes_inquire(hermes_t *hw, u16 rid)
441 return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL); 443 return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
442} 444}
443 445
444#define HERMES_BYTES_TO_RECLEN(n) ( (((n)+1)/2) + 1 ) 446#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
445#define HERMES_RECLEN_TO_BYTES(n) ( ((n)-1) * 2 ) 447#define HERMES_RECLEN_TO_BYTES(n) (((n)-1) * 2)
446 448
447/* Note that for the next two, the count is in 16-bit words, not bytes */ 449/* Note that for the next two, the count is in 16-bit words, not bytes */
448static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsigned count) 450static inline void hermes_read_words(struct hermes *hw, int off,
451 void *buf, unsigned count)
449{ 452{
450 off = off << hw->reg_spacing; 453 off = off << hw->reg_spacing;
451 ioread16_rep(hw->iobase + off, buf, count); 454 ioread16_rep(hw->iobase + off, buf, count);
@@ -460,7 +463,8 @@ static inline void hermes_write_bytes(struct hermes *hw, int off,
460 iowrite8(buf[count - 1], hw->iobase + off); 463 iowrite8(buf[count - 1], hw->iobase + off);
461} 464}
462 465
463static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count) 466static inline void hermes_clear_words(struct hermes *hw, int off,
467 unsigned count)
464{ 468{
465 unsigned i; 469 unsigned i;
466 470
@@ -471,9 +475,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count
471} 475}
472 476
473#define HERMES_READ_RECORD(hw, bap, rid, buf) \ 477#define HERMES_READ_RECORD(hw, bap, rid, buf) \
474 (hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf))) 478 (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
475#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \ 479#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
476 (hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(sizeof(*buf)),(buf))) 480 (hermes_write_ltv((hw), (bap), (rid), \
481 HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
477 482
478static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) 483static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
479{ 484{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index d8c626e61a3..5260ceb5cfe 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -1,13 +1,7 @@
1/* 1/*
2 * Hermes download helper driver. 2 * Hermes download helper.
3 * 3 *
4 * This could be entirely merged into hermes.c. 4 * This helper:
5 *
6 * I'm keeping it separate to minimise the amount of merging between
7 * kernel upgrades. It also means the memory overhead for drivers that
8 * don't need firmware download low.
9 *
10 * This driver:
11 * - is capable of writing to the volatile area of the hermes device 5 * - is capable of writing to the volatile area of the hermes device
12 * - is currently not capable of writing to non-volatile areas 6 * - is currently not capable of writing to non-volatile areas
13 * - provide helpers to identify and update plugin data 7 * - provide helpers to identify and update plugin data
@@ -50,10 +44,6 @@
50#include "hermes.h" 44#include "hermes.h"
51#include "hermes_dld.h" 45#include "hermes_dld.h"
52 46
53MODULE_DESCRIPTION("Download helper for Lucent Hermes chipset");
54MODULE_AUTHOR("David Kilroy <kilroyd@gmail.com>");
55MODULE_LICENSE("Dual MPL/GPL");
56
57#define PFX "hermes_dld: " 47#define PFX "hermes_dld: "
58 48
59/* 49/*
@@ -347,7 +337,6 @@ int hermes_read_pda(hermes_t *hw,
347 337
348 return 0; 338 return 0;
349} 339}
350EXPORT_SYMBOL(hermes_read_pda);
351 340
352/* Parse PDA and write the records into the adapter 341/* Parse PDA and write the records into the adapter
353 * 342 *
@@ -376,7 +365,6 @@ int hermes_apply_pda(hermes_t *hw,
376 } 365 }
377 return 0; 366 return 0;
378} 367}
379EXPORT_SYMBOL(hermes_apply_pda);
380 368
381/* Identify the total number of bytes in all blocks 369/* Identify the total number of bytes in all blocks
382 * including the header data. 370 * including the header data.
@@ -398,7 +386,6 @@ hermes_blocks_length(const char *first_block)
398 386
399 return total_len; 387 return total_len;
400} 388}
401EXPORT_SYMBOL(hermes_blocks_length);
402 389
403/*** Hermes programming ***/ 390/*** Hermes programming ***/
404 391
@@ -452,7 +439,6 @@ int hermesi_program_init(hermes_t *hw, u32 offset)
452 439
453 return err; 440 return err;
454} 441}
455EXPORT_SYMBOL(hermesi_program_init);
456 442
457/* Done programming data (Hermes I) 443/* Done programming data (Hermes I)
458 * 444 *
@@ -488,7 +474,6 @@ int hermesi_program_end(hermes_t *hw)
488 474
489 return rc ? rc : err; 475 return rc ? rc : err;
490} 476}
491EXPORT_SYMBOL(hermesi_program_end);
492 477
493/* Program the data blocks */ 478/* Program the data blocks */
494int hermes_program(hermes_t *hw, const char *first_block, const char *end) 479int hermes_program(hermes_t *hw, const char *first_block, const char *end)
@@ -550,19 +535,6 @@ int hermes_program(hermes_t *hw, const char *first_block, const char *end)
550 } 535 }
551 return 0; 536 return 0;
552} 537}
553EXPORT_SYMBOL(hermes_program);
554
555static int __init init_hermes_dld(void)
556{
557 return 0;
558}
559
560static void __exit exit_hermes_dld(void)
561{
562}
563
564module_init(init_hermes_dld);
565module_exit(exit_hermes_dld);
566 538
567/*** Default plugging data for Hermes I ***/ 539/*** Default plugging data for Hermes I ***/
568/* Values from wl_lkm_718/hcf/dhf.c */ 540/* Values from wl_lkm_718/hcf/dhf.c */
@@ -573,9 +545,9 @@ static const struct { \
573 __le16 id; \ 545 __le16 id; \
574 u8 val[length]; \ 546 u8 val[length]; \
575} __attribute__ ((packed)) default_pdr_data_##pid = { \ 547} __attribute__ ((packed)) default_pdr_data_##pid = { \
576 __constant_cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ 548 cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
577 sizeof(__le16)) - 1), \ 549 sizeof(__le16)) - 1), \
578 __constant_cpu_to_le16(pid), \ 550 cpu_to_le16(pid), \
579 data \ 551 data \
580} 552}
581 553
@@ -727,4 +699,3 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
727 } 699 }
728 return 0; 700 return 0;
729} 701}
730EXPORT_SYMBOL(hermes_apply_pda_with_defaults);
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
new file mode 100644
index 00000000000..081428d9409
--- /dev/null
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -0,0 +1,586 @@
1/* Encapsulate basic setting changes and retrieval on Hermes hardware
2 *
3 * See copyright notice in main.c
4 */
5#include <linux/kernel.h>
6#include <linux/if_arp.h>
7#include <linux/ieee80211.h>
8#include <linux/wireless.h>
9
10#include "hermes.h"
11#include "hermes_rid.h"
12#include "orinoco.h"
13
14#include "hw.h"
15
16/********************************************************************/
17/* Data tables */
18/********************************************************************/
19
20/* This tables gives the actual meanings of the bitrate IDs returned
21 * by the firmware. */
22static const struct {
23 int bitrate; /* in 100s of kilobits */
24 int automatic;
25 u16 agere_txratectrl;
26 u16 intersil_txratectrl;
27} bitrate_table[] = {
28 {110, 1, 3, 15}, /* Entry 0 is the default */
29 {10, 0, 1, 1},
30 {10, 1, 1, 1},
31 {20, 0, 2, 2},
32 {20, 1, 6, 3},
33 {55, 0, 4, 4},
34 {55, 1, 7, 7},
35 {110, 0, 5, 8},
36};
37#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
38
39int orinoco_get_bitratemode(int bitrate, int automatic)
40{
41 int ratemode = -1;
42 int i;
43
44 if ((bitrate != 10) && (bitrate != 20) &&
45 (bitrate != 55) && (bitrate != 110))
46 return ratemode;
47
48 for (i = 0; i < BITRATE_TABLE_SIZE; i++) {
49 if ((bitrate_table[i].bitrate == bitrate) &&
50 (bitrate_table[i].automatic == automatic)) {
51 ratemode = i;
52 break;
53 }
54 }
55 return ratemode;
56}
57
58void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic)
59{
60 BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
61
62 *bitrate = bitrate_table[ratemode].bitrate * 100000;
63 *automatic = bitrate_table[ratemode].automatic;
64}
65
66/* Get tsc from the firmware */
67int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
68{
69 hermes_t *hw = &priv->hw;
70 int err = 0;
71 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
72
73 if ((key < 0) || (key > 4))
74 return -EINVAL;
75
76 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
77 sizeof(tsc_arr), NULL, &tsc_arr);
78 if (!err)
79 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
80
81 return err;
82}
83
84int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
85{
86 hermes_t *hw = &priv->hw;
87 int ratemode = priv->bitratemode;
88 int err = 0;
89
90 if (ratemode >= BITRATE_TABLE_SIZE) {
91 printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
92 priv->ndev->name, ratemode);
93 return -EINVAL;
94 }
95
96 switch (priv->firmware_type) {
97 case FIRMWARE_TYPE_AGERE:
98 err = hermes_write_wordrec(hw, USER_BAP,
99 HERMES_RID_CNFTXRATECONTROL,
100 bitrate_table[ratemode].agere_txratectrl);
101 break;
102 case FIRMWARE_TYPE_INTERSIL:
103 case FIRMWARE_TYPE_SYMBOL:
104 err = hermes_write_wordrec(hw, USER_BAP,
105 HERMES_RID_CNFTXRATECONTROL,
106 bitrate_table[ratemode].intersil_txratectrl);
107 break;
108 default:
109 BUG();
110 }
111
112 return err;
113}
114
115int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
116{
117 hermes_t *hw = &priv->hw;
118 int i;
119 int err = 0;
120 u16 val;
121
122 err = hermes_read_wordrec(hw, USER_BAP,
123 HERMES_RID_CURRENTTXRATE, &val);
124 if (err)
125 return err;
126
127 switch (priv->firmware_type) {
128 case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
129 /* Note : in Lucent firmware, the return value of
130 * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
131 * and therefore is totally different from the
132 * encoding of HERMES_RID_CNFTXRATECONTROL.
133 * Don't forget that 6Mb/s is really 5.5Mb/s */
134 if (val == 6)
135 *bitrate = 5500000;
136 else
137 *bitrate = val * 1000000;
138 break;
139 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
140 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
141 for (i = 0; i < BITRATE_TABLE_SIZE; i++)
142 if (bitrate_table[i].intersil_txratectrl == val)
143 break;
144
145 if (i >= BITRATE_TABLE_SIZE)
146 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
147 priv->ndev->name, val);
148
149 *bitrate = bitrate_table[i].bitrate * 100000;
150 break;
151 default:
152 BUG();
153 }
154
155 return err;
156}
157
158/* Set fixed AP address */
159int __orinoco_hw_set_wap(struct orinoco_private *priv)
160{
161 int roaming_flag;
162 int err = 0;
163 hermes_t *hw = &priv->hw;
164
165 switch (priv->firmware_type) {
166 case FIRMWARE_TYPE_AGERE:
167 /* not supported */
168 break;
169 case FIRMWARE_TYPE_INTERSIL:
170 if (priv->bssid_fixed)
171 roaming_flag = 2;
172 else
173 roaming_flag = 1;
174
175 err = hermes_write_wordrec(hw, USER_BAP,
176 HERMES_RID_CNFROAMINGMODE,
177 roaming_flag);
178 break;
179 case FIRMWARE_TYPE_SYMBOL:
180 err = HERMES_WRITE_RECORD(hw, USER_BAP,
181 HERMES_RID_CNFMANDATORYBSSID_SYMBOL,
182 &priv->desired_bssid);
183 break;
184 }
185 return err;
186}
187
188/* Change the WEP keys and/or the current keys. Can be called
189 * either from __orinoco_hw_setup_enc() or directly from
190 * orinoco_ioctl_setiwencode(). In the later case the association
191 * with the AP is not broken (if the firmware can handle it),
192 * which is needed for 802.1x implementations. */
193int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
194{
195 hermes_t *hw = &priv->hw;
196 int err = 0;
197
198 switch (priv->firmware_type) {
199 case FIRMWARE_TYPE_AGERE:
200 err = HERMES_WRITE_RECORD(hw, USER_BAP,
201 HERMES_RID_CNFWEPKEYS_AGERE,
202 &priv->keys);
203 if (err)
204 return err;
205 err = hermes_write_wordrec(hw, USER_BAP,
206 HERMES_RID_CNFTXKEY_AGERE,
207 priv->tx_key);
208 if (err)
209 return err;
210 break;
211 case FIRMWARE_TYPE_INTERSIL:
212 case FIRMWARE_TYPE_SYMBOL:
213 {
214 int keylen;
215 int i;
216
217 /* Force uniform key length to work around
218 * firmware bugs */
219 keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
220
221 if (keylen > LARGE_KEY_SIZE) {
222 printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
223 priv->ndev->name, priv->tx_key, keylen);
224 return -E2BIG;
225 }
226
227 /* Write all 4 keys */
228 for (i = 0; i < ORINOCO_MAX_KEYS; i++) {
229 err = hermes_write_ltv(hw, USER_BAP,
230 HERMES_RID_CNFDEFAULTKEY0 + i,
231 HERMES_BYTES_TO_RECLEN(keylen),
232 priv->keys[i].data);
233 if (err)
234 return err;
235 }
236
237 /* Write the index of the key used in transmission */
238 err = hermes_write_wordrec(hw, USER_BAP,
239 HERMES_RID_CNFWEPDEFAULTKEYID,
240 priv->tx_key);
241 if (err)
242 return err;
243 }
244 break;
245 }
246
247 return 0;
248}
249
250int __orinoco_hw_setup_enc(struct orinoco_private *priv)
251{
252 hermes_t *hw = &priv->hw;
253 int err = 0;
254 int master_wep_flag;
255 int auth_flag;
256 int enc_flag;
257
258 /* Setup WEP keys for WEP and WPA */
259 if (priv->encode_alg)
260 __orinoco_hw_setup_wepkeys(priv);
261
262 if (priv->wep_restrict)
263 auth_flag = HERMES_AUTH_SHARED_KEY;
264 else
265 auth_flag = HERMES_AUTH_OPEN;
266
267 if (priv->wpa_enabled)
268 enc_flag = 2;
269 else if (priv->encode_alg == IW_ENCODE_ALG_WEP)
270 enc_flag = 1;
271 else
272 enc_flag = 0;
273
274 switch (priv->firmware_type) {
275 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
276 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
277 /* Enable the shared-key authentication. */
278 err = hermes_write_wordrec(hw, USER_BAP,
279 HERMES_RID_CNFAUTHENTICATION_AGERE,
280 auth_flag);
281 }
282 err = hermes_write_wordrec(hw, USER_BAP,
283 HERMES_RID_CNFWEPENABLED_AGERE,
284 enc_flag);
285 if (err)
286 return err;
287
288 if (priv->has_wpa) {
289 /* Set WPA key management */
290 err = hermes_write_wordrec(hw, USER_BAP,
291 HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
292 priv->key_mgmt);
293 if (err)
294 return err;
295 }
296
297 break;
298
299 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
300 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
301 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
302 if (priv->wep_restrict ||
303 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
304 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
305 HERMES_WEP_EXCL_UNENCRYPTED;
306 else
307 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED;
308
309 err = hermes_write_wordrec(hw, USER_BAP,
310 HERMES_RID_CNFAUTHENTICATION,
311 auth_flag);
312 if (err)
313 return err;
314 } else
315 master_wep_flag = 0;
316
317 if (priv->iw_mode == IW_MODE_MONITOR)
318 master_wep_flag |= HERMES_WEP_HOST_DECRYPT;
319
320 /* Master WEP setting : on/off */
321 err = hermes_write_wordrec(hw, USER_BAP,
322 HERMES_RID_CNFWEPFLAGS_INTERSIL,
323 master_wep_flag);
324 if (err)
325 return err;
326
327 break;
328 }
329
330 return 0;
331}
332
333/* key must be 32 bytes, including the tx and rx MIC keys.
334 * rsc must be 8 bytes
335 * tsc must be 8 bytes or NULL
336 */
337int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
338 u8 *key, u8 *rsc, u8 *tsc)
339{
340 struct {
341 __le16 idx;
342 u8 rsc[IW_ENCODE_SEQ_MAX_SIZE];
343 u8 key[TKIP_KEYLEN];
344 u8 tx_mic[MIC_KEYLEN];
345 u8 rx_mic[MIC_KEYLEN];
346 u8 tsc[IW_ENCODE_SEQ_MAX_SIZE];
347 } __attribute__ ((packed)) buf;
348 int ret;
349 int err;
350 int k;
351 u16 xmitting;
352
353 key_idx &= 0x3;
354
355 if (set_tx)
356 key_idx |= 0x8000;
357
358 buf.idx = cpu_to_le16(key_idx);
359 memcpy(buf.key, key,
360 sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
361
362 if (rsc == NULL)
363 memset(buf.rsc, 0, sizeof(buf.rsc));
364 else
365 memcpy(buf.rsc, rsc, sizeof(buf.rsc));
366
367 if (tsc == NULL) {
368 memset(buf.tsc, 0, sizeof(buf.tsc));
369 buf.tsc[4] = 0x10;
370 } else {
371 memcpy(buf.tsc, tsc, sizeof(buf.tsc));
372 }
373
374 /* Wait upto 100ms for tx queue to empty */
375 k = 100;
376 do {
377 k--;
378 udelay(1000);
379 ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
380 &xmitting);
381 if (ret)
382 break;
383 } while ((k > 0) && xmitting);
384
385 if (k == 0)
386 ret = -ETIMEDOUT;
387
388 err = HERMES_WRITE_RECORD(hw, USER_BAP,
389 HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
390 &buf);
391
392 return ret ? ret : err;
393}
394
395int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
396{
397 hermes_t *hw = &priv->hw;
398 int err;
399
400 memset(&priv->tkip_key[key_idx], 0, sizeof(priv->tkip_key[key_idx]));
401 err = hermes_write_wordrec(hw, USER_BAP,
402 HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
403 key_idx);
404 if (err)
405 printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
406 priv->ndev->name, err, key_idx);
407 return err;
408}
409
410int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
411 struct dev_addr_list *mc_list,
412 int mc_count, int promisc)
413{
414 hermes_t *hw = &priv->hw;
415 int err = 0;
416
417 if (promisc != priv->promiscuous) {
418 err = hermes_write_wordrec(hw, USER_BAP,
419 HERMES_RID_CNFPROMISCUOUSMODE,
420 promisc);
421 if (err) {
422 printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
423 priv->ndev->name, err);
424 } else
425 priv->promiscuous = promisc;
426 }
427
428 /* If we're not in promiscuous mode, then we need to set the
429 * group address if either we want to multicast, or if we were
430 * multicasting and want to stop */
431 if (!promisc && (mc_count || priv->mc_count)) {
432 struct dev_mc_list *p = mc_list;
433 struct hermes_multicast mclist;
434 int i;
435
436 for (i = 0; i < mc_count; i++) {
437 /* paranoia: is list shorter than mc_count? */
438 BUG_ON(!p);
439 /* paranoia: bad address size in list? */
440 BUG_ON(p->dmi_addrlen != ETH_ALEN);
441
442 memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
443 p = p->next;
444 }
445
446 if (p)
447 printk(KERN_WARNING "%s: Multicast list is "
448 "longer than mc_count\n", priv->ndev->name);
449
450 err = hermes_write_ltv(hw, USER_BAP,
451 HERMES_RID_CNFGROUPADDRESSES,
452 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
453 &mclist);
454 if (err)
455 printk(KERN_ERR "%s: Error %d setting multicast list.\n",
456 priv->ndev->name, err);
457 else
458 priv->mc_count = mc_count;
459 }
460 return err;
461}
462
463/* Return : < 0 -> error code ; >= 0 -> length */
464int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
465 char buf[IW_ESSID_MAX_SIZE+1])
466{
467 hermes_t *hw = &priv->hw;
468 int err = 0;
469 struct hermes_idstring essidbuf;
470 char *p = (char *)(&essidbuf.val);
471 int len;
472 unsigned long flags;
473
474 if (orinoco_lock(priv, &flags) != 0)
475 return -EBUSY;
476
477 if (strlen(priv->desired_essid) > 0) {
478 /* We read the desired SSID from the hardware rather
479 than from priv->desired_essid, just in case the
480 firmware is allowed to change it on us. I'm not
481 sure about this */
482 /* My guess is that the OWNSSID should always be whatever
483 * we set to the card, whereas CURRENT_SSID is the one that
484 * may change... - Jean II */
485 u16 rid;
486
487 *active = 1;
488
489 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
490 HERMES_RID_CNFDESIREDSSID;
491
492 err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
493 NULL, &essidbuf);
494 if (err)
495 goto fail_unlock;
496 } else {
497 *active = 0;
498
499 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
500 sizeof(essidbuf), NULL, &essidbuf);
501 if (err)
502 goto fail_unlock;
503 }
504
505 len = le16_to_cpu(essidbuf.len);
506 BUG_ON(len > IW_ESSID_MAX_SIZE);
507
508 memset(buf, 0, IW_ESSID_MAX_SIZE);
509 memcpy(buf, p, len);
510 err = len;
511
512 fail_unlock:
513 orinoco_unlock(priv, &flags);
514
515 return err;
516}
517
518int orinoco_hw_get_freq(struct orinoco_private *priv)
519{
520 hermes_t *hw = &priv->hw;
521 int err = 0;
522 u16 channel;
523 int freq = 0;
524 unsigned long flags;
525
526 if (orinoco_lock(priv, &flags) != 0)
527 return -EBUSY;
528
529 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL,
530 &channel);
531 if (err)
532 goto out;
533
534 /* Intersil firmware 1.3.5 returns 0 when the interface is down */
535 if (channel == 0) {
536 err = -EBUSY;
537 goto out;
538 }
539
540 if ((channel < 1) || (channel > NUM_CHANNELS)) {
541 printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
542 priv->ndev->name, channel);
543 err = -EBUSY;
544 goto out;
545
546 }
547 freq = ieee80211_dsss_chan_to_freq(channel);
548
549 out:
550 orinoco_unlock(priv, &flags);
551
552 if (err > 0)
553 err = -EBUSY;
554 return err ? err : freq;
555}
556
557int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
558 int *numrates, s32 *rates, int max)
559{
560 hermes_t *hw = &priv->hw;
561 struct hermes_idstring list;
562 unsigned char *p = (unsigned char *)&list.val;
563 int err = 0;
564 int num;
565 int i;
566 unsigned long flags;
567
568 if (orinoco_lock(priv, &flags) != 0)
569 return -EBUSY;
570
571 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
572 sizeof(list), NULL, &list);
573 orinoco_unlock(priv, &flags);
574
575 if (err)
576 return err;
577
578 num = le16_to_cpu(list.len);
579 *numrates = num;
580 num = min(num, max);
581
582 for (i = 0; i < num; i++)
583 rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
584
585 return 0;
586}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
new file mode 100644
index 00000000000..dc3f23a9c1c
--- /dev/null
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -0,0 +1,47 @@
1/* Encapsulate basic setting changes on Hermes hardware
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_HW_H_
6#define _ORINOCO_HW_H_
7
8#include <linux/types.h>
9#include <linux/wireless.h>
10
11/* Hardware BAPs */
12#define USER_BAP 0
13#define IRQ_BAP 1
14
15/* WEP key sizes */
16#define SMALL_KEY_SIZE 5
17#define LARGE_KEY_SIZE 13
18
19/* Number of supported channels */
20#define NUM_CHANNELS 14
21
22/* Forward declarations */
23struct orinoco_private;
24struct dev_addr_list;
25
26int orinoco_get_bitratemode(int bitrate, int automatic);
27void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic);
28
29int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc);
30int __orinoco_hw_set_bitrate(struct orinoco_private *priv);
31int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate);
32int __orinoco_hw_set_wap(struct orinoco_private *priv);
33int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
34int __orinoco_hw_setup_enc(struct orinoco_private *priv);
35int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
36 u8 *key, u8 *rsc, u8 *tsc);
37int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
38int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
39 struct dev_addr_list *mc_list,
40 int mc_count, int promisc);
41int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
42 char buf[IW_ESSID_MAX_SIZE+1]);
43int orinoco_hw_get_freq(struct orinoco_private *priv);
44int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
45 int *numrates, s32 *rates, int max);
46
47#endif /* _ORINOCO_HW_H_ */
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
new file mode 100644
index 00000000000..54dfc4540b8
--- /dev/null
+++ b/drivers/net/wireless/orinoco/main.c
@@ -0,0 +1,2654 @@
1/* main.c - (formerly known as dldwd_cs.c, orinoco_cs.c and orinoco.c)
2 *
3 * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
4 * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
5 *
6 * Current maintainers (as of 29 September 2003) are:
7 * Pavel Roskin <proski AT gnu.org>
8 * and David Gibson <hermes AT gibson.dropbear.id.au>
9 *
10 * (C) Copyright David Gibson, IBM Corporation 2001-2003.
11 * Copyright (C) 2000 David Gibson, Linuxcare Australia.
12 * With some help from :
13 * Copyright (C) 2001 Jean Tourrilhes, HP Labs
14 * Copyright (C) 2001 Benjamin Herrenschmidt
15 *
16 * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
17 *
18 * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
19 * AT fasta.fh-dortmund.de>
20 * http://www.stud.fh-dortmund.de/~andy/wvlan/
21 *
22 * The contents of this file are subject to the Mozilla Public License
23 * Version 1.1 (the "License"); you may not use this file except in
24 * compliance with the License. You may obtain a copy of the License
25 * at http://www.mozilla.org/MPL/
26 *
27 * Software distributed under the License is distributed on an "AS IS"
28 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
29 * the License for the specific language governing rights and
30 * limitations under the License.
31 *
32 * The initial developer of the original code is David A. Hinds
33 * <dahinds AT users.sourceforge.net>. Portions created by David
34 * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
35 * Reserved.
36 *
37 * Alternatively, the contents of this file may be used under the
38 * terms of the GNU General Public License version 2 (the "GPL"), in
39 * which case the provisions of the GPL are applicable instead of the
40 * above. If you wish to allow the use of your version of this file
41 * only under the terms of the GPL and not to allow others to use your
42 * version of this file under the MPL, indicate your decision by
43 * deleting the provisions above and replace them with the notice and
44 * other provisions required by the GPL. If you do not delete the
45 * provisions above, a recipient may use your version of this file
46 * under either the MPL or the GPL. */
47
48/*
49 * TODO
50 * o Handle de-encapsulation within network layer, provide 802.11
51 * headers (patch from Thomas 'Dent' Mirlacher)
52 * o Fix possible races in SPY handling.
53 * o Disconnect wireless extensions from fundamental configuration.
54 * o (maybe) Software WEP support (patch from Stano Meduna).
55 * o (maybe) Use multiple Tx buffers - driver handling queue
56 * rather than firmware.
57 */
58
59/* Locking and synchronization:
60 *
61 * The basic principle is that everything is serialized through a
62 * single spinlock, priv->lock. The lock is used in user, bh and irq
63 * context, so when taken outside hardirq context it should always be
64 * taken with interrupts disabled. The lock protects both the
65 * hardware and the struct orinoco_private.
66 *
67 * Another flag, priv->hw_unavailable indicates that the hardware is
68 * unavailable for an extended period of time (e.g. suspended, or in
69 * the middle of a hard reset). This flag is protected by the
70 * spinlock. All code which touches the hardware should check the
71 * flag after taking the lock, and if it is set, give up on whatever
72 * they are doing and drop the lock again. The orinoco_lock()
73 * function handles this (it unlocks and returns -EBUSY if
74 * hw_unavailable is non-zero).
75 */
76
77#define DRIVER_NAME "orinoco"
78
79#include <linux/module.h>
80#include <linux/kernel.h>
81#include <linux/init.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/etherdevice.h>
85#include <linux/ethtool.h>
86#include <linux/suspend.h>
87#include <linux/if_arp.h>
88#include <linux/wireless.h>
89#include <linux/ieee80211.h>
90#include <net/iw_handler.h>
91
92#include "hermes_rid.h"
93#include "hermes_dld.h"
94#include "hw.h"
95#include "scan.h"
96#include "mic.h"
97#include "fw.h"
98#include "wext.h"
99#include "main.h"
100
101#include "orinoco.h"
102
103/********************************************************************/
104/* Module information */
105/********************************************************************/
106
107MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & "
108 "David Gibson <hermes@gibson.dropbear.id.au>");
109MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based "
110 "and similar wireless cards");
111MODULE_LICENSE("Dual MPL/GPL");
112
113/* Level of debugging. Used in the macros in orinoco.h */
114#ifdef ORINOCO_DEBUG
115int orinoco_debug = ORINOCO_DEBUG;
116EXPORT_SYMBOL(orinoco_debug);
117module_param(orinoco_debug, int, 0644);
118MODULE_PARM_DESC(orinoco_debug, "Debug level");
119#endif
120
121static int suppress_linkstatus; /* = 0 */
122module_param(suppress_linkstatus, bool, 0644);
123MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
124
125static int ignore_disconnect; /* = 0 */
126module_param(ignore_disconnect, int, 0644);
127MODULE_PARM_DESC(ignore_disconnect,
128 "Don't report lost link to the network layer");
129
130int force_monitor; /* = 0 */
131module_param(force_monitor, int, 0644);
132MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
133
134/********************************************************************/
135/* Internal constants */
136/********************************************************************/
137
138/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
139static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
140#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
141
142#define ORINOCO_MIN_MTU 256
143#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
144
145#define SYMBOL_MAX_VER_LEN (14)
146#define MAX_IRQLOOPS_PER_IRQ 10
147#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
148 * how many events the
149 * device could
150 * legitimately generate */
151#define TX_NICBUF_SIZE_BUG 1585 /* Bug in Symbol firmware */
152
153#define DUMMY_FID 0xFFFF
154
155/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
156 HERMES_MAX_MULTICAST : 0)*/
157#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
158
159#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
160 | HERMES_EV_TX | HERMES_EV_TXEXC \
161 | HERMES_EV_WTERR | HERMES_EV_INFO \
162 | HERMES_EV_INFDROP)
163
164static const struct ethtool_ops orinoco_ethtool_ops;
165
166/********************************************************************/
167/* Data types */
168/********************************************************************/
169
170/* Beginning of the Tx descriptor, used in TxExc handling */
171struct hermes_txexc_data {
172 struct hermes_tx_descriptor desc;
173 __le16 frame_ctl;
174 __le16 duration_id;
175 u8 addr1[ETH_ALEN];
176} __attribute__ ((packed));
177
178/* Rx frame header except compatibility 802.3 header */
179struct hermes_rx_descriptor {
180 /* Control */
181 __le16 status;
182 __le32 time;
183 u8 silence;
184 u8 signal;
185 u8 rate;
186 u8 rxflow;
187 __le32 reserved;
188
189 /* 802.11 header */
190 __le16 frame_ctl;
191 __le16 duration_id;
192 u8 addr1[ETH_ALEN];
193 u8 addr2[ETH_ALEN];
194 u8 addr3[ETH_ALEN];
195 __le16 seq_ctl;
196 u8 addr4[ETH_ALEN];
197
198 /* Data length */
199 __le16 data_len;
200} __attribute__ ((packed));
201
202struct orinoco_rx_data {
203 struct hermes_rx_descriptor *desc;
204 struct sk_buff *skb;
205 struct list_head list;
206};
207
208/********************************************************************/
209/* Function prototypes */
210/********************************************************************/
211
212static void __orinoco_set_multicast_list(struct net_device *dev);
213
214/********************************************************************/
215/* Internal helper functions */
216/********************************************************************/
217
218void set_port_type(struct orinoco_private *priv)
219{
220 switch (priv->iw_mode) {
221 case IW_MODE_INFRA:
222 priv->port_type = 1;
223 priv->createibss = 0;
224 break;
225 case IW_MODE_ADHOC:
226 if (priv->prefer_port3) {
227 priv->port_type = 3;
228 priv->createibss = 0;
229 } else {
230 priv->port_type = priv->ibss_port;
231 priv->createibss = 1;
232 }
233 break;
234 case IW_MODE_MONITOR:
235 priv->port_type = 3;
236 priv->createibss = 0;
237 break;
238 default:
239 printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
240 priv->ndev->name);
241 }
242}
243
244/********************************************************************/
245/* Device methods */
246/********************************************************************/
247
248static int orinoco_open(struct net_device *dev)
249{
250 struct orinoco_private *priv = netdev_priv(dev);
251 unsigned long flags;
252 int err;
253
254 if (orinoco_lock(priv, &flags) != 0)
255 return -EBUSY;
256
257 err = __orinoco_up(dev);
258
259 if (!err)
260 priv->open = 1;
261
262 orinoco_unlock(priv, &flags);
263
264 return err;
265}
266
267static int orinoco_stop(struct net_device *dev)
268{
269 struct orinoco_private *priv = netdev_priv(dev);
270 int err = 0;
271
272 /* We mustn't use orinoco_lock() here, because we need to be
273 able to close the interface even if hw_unavailable is set
274 (e.g. as we're released after a PC Card removal) */
275 spin_lock_irq(&priv->lock);
276
277 priv->open = 0;
278
279 err = __orinoco_down(dev);
280
281 spin_unlock_irq(&priv->lock);
282
283 return err;
284}
285
286static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
287{
288 struct orinoco_private *priv = netdev_priv(dev);
289
290 return &priv->stats;
291}
292
293static void orinoco_set_multicast_list(struct net_device *dev)
294{
295 struct orinoco_private *priv = netdev_priv(dev);
296 unsigned long flags;
297
298 if (orinoco_lock(priv, &flags) != 0) {
299 printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
300 "called when hw_unavailable\n", dev->name);
301 return;
302 }
303
304 __orinoco_set_multicast_list(dev);
305 orinoco_unlock(priv, &flags);
306}
307
308static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
309{
310 struct orinoco_private *priv = netdev_priv(dev);
311
312 if ((new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU))
313 return -EINVAL;
314
315 /* MTU + encapsulation + header length */
316 if ((new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
317 (priv->nicbuf_size - ETH_HLEN))
318 return -EINVAL;
319
320 dev->mtu = new_mtu;
321
322 return 0;
323}
324
325/********************************************************************/
326/* Tx path */
327/********************************************************************/
328
329static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
330{
331 struct orinoco_private *priv = netdev_priv(dev);
332 struct net_device_stats *stats = &priv->stats;
333 hermes_t *hw = &priv->hw;
334 int err = 0;
335 u16 txfid = priv->txfid;
336 struct ethhdr *eh;
337 int tx_control;
338 unsigned long flags;
339
340 if (!netif_running(dev)) {
341 printk(KERN_ERR "%s: Tx on stopped device!\n",
342 dev->name);
343 return NETDEV_TX_BUSY;
344 }
345
346 if (netif_queue_stopped(dev)) {
347 printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
348 dev->name);
349 return NETDEV_TX_BUSY;
350 }
351
352 if (orinoco_lock(priv, &flags) != 0) {
353 printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
354 dev->name);
355 return NETDEV_TX_BUSY;
356 }
357
358 if (!netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
359 /* Oops, the firmware hasn't established a connection,
360 silently drop the packet (this seems to be the
361 safest approach). */
362 goto drop;
363 }
364
365 /* Check packet length */
366 if (skb->len < ETH_HLEN)
367 goto drop;
368
369 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
370
371 if (priv->encode_alg == IW_ENCODE_ALG_TKIP)
372 tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
373 HERMES_TXCTRL_MIC;
374
375 if (priv->has_alt_txcntl) {
376 /* WPA enabled firmwares have tx_cntl at the end of
377 * the 802.11 header. So write zeroed descriptor and
378 * 802.11 header at the same time
379 */
380 char desc[HERMES_802_3_OFFSET];
381 __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
382
383 memset(&desc, 0, sizeof(desc));
384
385 *txcntl = cpu_to_le16(tx_control);
386 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
387 txfid, 0);
388 if (err) {
389 if (net_ratelimit())
390 printk(KERN_ERR "%s: Error %d writing Tx "
391 "descriptor to BAP\n", dev->name, err);
392 goto busy;
393 }
394 } else {
395 struct hermes_tx_descriptor desc;
396
397 memset(&desc, 0, sizeof(desc));
398
399 desc.tx_control = cpu_to_le16(tx_control);
400 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
401 txfid, 0);
402 if (err) {
403 if (net_ratelimit())
404 printk(KERN_ERR "%s: Error %d writing Tx "
405 "descriptor to BAP\n", dev->name, err);
406 goto busy;
407 }
408
409 /* Clear the 802.11 header and data length fields - some
410 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
411 * if this isn't done. */
412 hermes_clear_words(hw, HERMES_DATA0,
413 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
414 }
415
416 eh = (struct ethhdr *)skb->data;
417
418 /* Encapsulate Ethernet-II frames */
419 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
420 struct header_struct {
421 struct ethhdr eth; /* 802.3 header */
422 u8 encap[6]; /* 802.2 header */
423 } __attribute__ ((packed)) hdr;
424
425 /* Strip destination and source from the data */
426 skb_pull(skb, 2 * ETH_ALEN);
427
428 /* And move them to a separate header */
429 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
430 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
431 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
432
433 /* Insert the SNAP header */
434 if (skb_headroom(skb) < sizeof(hdr)) {
435 printk(KERN_ERR
436 "%s: Not enough headroom for 802.2 headers %d\n",
437 dev->name, skb_headroom(skb));
438 goto drop;
439 }
440 eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
441 memcpy(eh, &hdr, sizeof(hdr));
442 }
443
444 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
445 txfid, HERMES_802_3_OFFSET);
446 if (err) {
447 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
448 dev->name, err);
449 goto busy;
450 }
451
452 /* Calculate Michael MIC */
453 if (priv->encode_alg == IW_ENCODE_ALG_TKIP) {
454 u8 mic_buf[MICHAEL_MIC_LEN + 1];
455 u8 *mic;
456 size_t offset;
457 size_t len;
458
459 if (skb->len % 2) {
460 /* MIC start is on an odd boundary */
461 mic_buf[0] = skb->data[skb->len - 1];
462 mic = &mic_buf[1];
463 offset = skb->len - 1;
464 len = MICHAEL_MIC_LEN + 1;
465 } else {
466 mic = &mic_buf[0];
467 offset = skb->len;
468 len = MICHAEL_MIC_LEN;
469 }
470
471 orinoco_mic(priv->tx_tfm_mic,
472 priv->tkip_key[priv->tx_key].tx_mic,
473 eh->h_dest, eh->h_source, 0 /* priority */,
474 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
475
476 /* Write the MIC */
477 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
478 txfid, HERMES_802_3_OFFSET + offset);
479 if (err) {
480 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
481 dev->name, err);
482 goto busy;
483 }
484 }
485
486 /* Finally, we actually initiate the send */
487 netif_stop_queue(dev);
488
489 err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
490 txfid, NULL);
491 if (err) {
492 netif_start_queue(dev);
493 if (net_ratelimit())
494 printk(KERN_ERR "%s: Error %d transmitting packet\n",
495 dev->name, err);
496 goto busy;
497 }
498
499 dev->trans_start = jiffies;
500 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
501 goto ok;
502
503 drop:
504 stats->tx_errors++;
505 stats->tx_dropped++;
506
507 ok:
508 orinoco_unlock(priv, &flags);
509 dev_kfree_skb(skb);
510 return NETDEV_TX_OK;
511
512 busy:
513 if (err == -EIO)
514 schedule_work(&priv->reset_work);
515 orinoco_unlock(priv, &flags);
516 return NETDEV_TX_BUSY;
517}
518
519static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
520{
521 struct orinoco_private *priv = netdev_priv(dev);
522 u16 fid = hermes_read_regn(hw, ALLOCFID);
523
524 if (fid != priv->txfid) {
525 if (fid != DUMMY_FID)
526 printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
527 dev->name, fid);
528 return;
529 }
530
531 hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
532}
533
534static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
535{
536 struct orinoco_private *priv = netdev_priv(dev);
537 struct net_device_stats *stats = &priv->stats;
538
539 stats->tx_packets++;
540
541 netif_wake_queue(dev);
542
543 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
544}
545
546static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
547{
548 struct orinoco_private *priv = netdev_priv(dev);
549 struct net_device_stats *stats = &priv->stats;
550 u16 fid = hermes_read_regn(hw, TXCOMPLFID);
551 u16 status;
552 struct hermes_txexc_data hdr;
553 int err = 0;
554
555 if (fid == DUMMY_FID)
556 return; /* Nothing's really happened */
557
558 /* Read part of the frame header - we need status and addr1 */
559 err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
560 sizeof(struct hermes_txexc_data),
561 fid, 0);
562
563 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
564 stats->tx_errors++;
565
566 if (err) {
567 printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
568 "(FID=%04X error %d)\n",
569 dev->name, fid, err);
570 return;
571 }
572
573 DEBUG(1, "%s: Tx error, err %d (FID=%04X)\n", dev->name,
574 err, fid);
575
576 /* We produce a TXDROP event only for retry or lifetime
577 * exceeded, because that's the only status that really mean
578 * that this particular node went away.
579 * Other errors means that *we* screwed up. - Jean II */
580 status = le16_to_cpu(hdr.desc.status);
581 if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
582 union iwreq_data wrqu;
583
584 /* Copy 802.11 dest address.
585 * We use the 802.11 header because the frame may
586 * not be 802.3 or may be mangled...
587 * In Ad-Hoc mode, it will be the node address.
588 * In managed mode, it will be most likely the AP addr
589 * User space will figure out how to convert it to
590 * whatever it needs (IP address or else).
591 * - Jean II */
592 memcpy(wrqu.addr.sa_data, hdr.addr1, ETH_ALEN);
593 wrqu.addr.sa_family = ARPHRD_ETHER;
594
595 /* Send event to user space */
596 wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
597 }
598
599 netif_wake_queue(dev);
600}
601
602static void orinoco_tx_timeout(struct net_device *dev)
603{
604 struct orinoco_private *priv = netdev_priv(dev);
605 struct net_device_stats *stats = &priv->stats;
606 struct hermes *hw = &priv->hw;
607
608 printk(KERN_WARNING "%s: Tx timeout! "
609 "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
610 dev->name, hermes_read_regn(hw, ALLOCFID),
611 hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
612
613 stats->tx_errors++;
614
615 schedule_work(&priv->reset_work);
616}
617
618/********************************************************************/
619/* Rx path (data frames) */
620/********************************************************************/
621
622/* Does the frame have a SNAP header indicating it should be
623 * de-encapsulated to Ethernet-II? */
624static inline int is_ethersnap(void *_hdr)
625{
626 u8 *hdr = _hdr;
627
628 /* We de-encapsulate all packets which, a) have SNAP headers
629 * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
630 * and where b) the OUI of the SNAP header is 00:00:00 or
631 * 00:00:f8 - we need both because different APs appear to use
632 * different OUIs for some reason */
633 return (memcmp(hdr, &encaps_hdr, 5) == 0)
634 && ((hdr[5] == 0x00) || (hdr[5] == 0xf8));
635}
636
637static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
638 int level, int noise)
639{
640 struct iw_quality wstats;
641 wstats.level = level - 0x95;
642 wstats.noise = noise - 0x95;
643 wstats.qual = (level > noise) ? (level - noise) : 0;
644 wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
645 /* Update spy records */
646 wireless_spy_update(dev, mac, &wstats);
647}
648
649static void orinoco_stat_gather(struct net_device *dev,
650 struct sk_buff *skb,
651 struct hermes_rx_descriptor *desc)
652{
653 struct orinoco_private *priv = netdev_priv(dev);
654
655 /* Using spy support with lots of Rx packets, like in an
656 * infrastructure (AP), will really slow down everything, because
657 * the MAC address must be compared to each entry of the spy list.
658 * If the user really asks for it (set some address in the
659 * spy list), we do it, but he will pay the price.
660 * Note that to get here, you need both WIRELESS_SPY
661 * compiled in AND some addresses in the list !!!
662 */
663 /* Note : gcc will optimise the whole section away if
664 * WIRELESS_SPY is not defined... - Jean II */
665 if (SPY_NUMBER(priv)) {
666 orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
667 desc->signal, desc->silence);
668 }
669}
670
671/*
672 * orinoco_rx_monitor - handle received monitor frames.
673 *
674 * Arguments:
675 * dev network device
676 * rxfid received FID
677 * desc rx descriptor of the frame
678 *
679 * Call context: interrupt
680 */
681static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
682 struct hermes_rx_descriptor *desc)
683{
684 u32 hdrlen = 30; /* return full header by default */
685 u32 datalen = 0;
686 u16 fc;
687 int err;
688 int len;
689 struct sk_buff *skb;
690 struct orinoco_private *priv = netdev_priv(dev);
691 struct net_device_stats *stats = &priv->stats;
692 hermes_t *hw = &priv->hw;
693
694 len = le16_to_cpu(desc->data_len);
695
696 /* Determine the size of the header and the data */
697 fc = le16_to_cpu(desc->frame_ctl);
698 switch (fc & IEEE80211_FCTL_FTYPE) {
699 case IEEE80211_FTYPE_DATA:
700 if ((fc & IEEE80211_FCTL_TODS)
701 && (fc & IEEE80211_FCTL_FROMDS))
702 hdrlen = 30;
703 else
704 hdrlen = 24;
705 datalen = len;
706 break;
707 case IEEE80211_FTYPE_MGMT:
708 hdrlen = 24;
709 datalen = len;
710 break;
711 case IEEE80211_FTYPE_CTL:
712 switch (fc & IEEE80211_FCTL_STYPE) {
713 case IEEE80211_STYPE_PSPOLL:
714 case IEEE80211_STYPE_RTS:
715 case IEEE80211_STYPE_CFEND:
716 case IEEE80211_STYPE_CFENDACK:
717 hdrlen = 16;
718 break;
719 case IEEE80211_STYPE_CTS:
720 case IEEE80211_STYPE_ACK:
721 hdrlen = 10;
722 break;
723 }
724 break;
725 default:
726 /* Unknown frame type */
727 break;
728 }
729
730 /* sanity check the length */
731 if (datalen > IEEE80211_MAX_DATA_LEN + 12) {
732 printk(KERN_DEBUG "%s: oversized monitor frame, "
733 "data length = %d\n", dev->name, datalen);
734 stats->rx_length_errors++;
735 goto update_stats;
736 }
737
738 skb = dev_alloc_skb(hdrlen + datalen);
739 if (!skb) {
740 printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n",
741 dev->name);
742 goto update_stats;
743 }
744
745 /* Copy the 802.11 header to the skb */
746 memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
747 skb_reset_mac_header(skb);
748
749 /* If any, copy the data from the card to the skb */
750 if (datalen > 0) {
751 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
752 ALIGN(datalen, 2), rxfid,
753 HERMES_802_2_OFFSET);
754 if (err) {
755 printk(KERN_ERR "%s: error %d reading monitor frame\n",
756 dev->name, err);
757 goto drop;
758 }
759 }
760
761 skb->dev = dev;
762 skb->ip_summed = CHECKSUM_NONE;
763 skb->pkt_type = PACKET_OTHERHOST;
764 skb->protocol = cpu_to_be16(ETH_P_802_2);
765
766 stats->rx_packets++;
767 stats->rx_bytes += skb->len;
768
769 netif_rx(skb);
770 return;
771
772 drop:
773 dev_kfree_skb_irq(skb);
774 update_stats:
775 stats->rx_errors++;
776 stats->rx_dropped++;
777}
778
779static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
780{
781 struct orinoco_private *priv = netdev_priv(dev);
782 struct net_device_stats *stats = &priv->stats;
783 struct iw_statistics *wstats = &priv->wstats;
784 struct sk_buff *skb = NULL;
785 u16 rxfid, status;
786 int length;
787 struct hermes_rx_descriptor *desc;
788 struct orinoco_rx_data *rx_data;
789 int err;
790
791 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
792 if (!desc) {
793 printk(KERN_WARNING
794 "%s: Can't allocate space for RX descriptor\n",
795 dev->name);
796 goto update_stats;
797 }
798
799 rxfid = hermes_read_regn(hw, RXFID);
800
801 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
802 rxfid, 0);
803 if (err) {
804 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
805 "Frame dropped.\n", dev->name, err);
806 goto update_stats;
807 }
808
809 status = le16_to_cpu(desc->status);
810
811 if (status & HERMES_RXSTAT_BADCRC) {
812 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
813 dev->name);
814 stats->rx_crc_errors++;
815 goto update_stats;
816 }
817
818 /* Handle frames in monitor mode */
819 if (priv->iw_mode == IW_MODE_MONITOR) {
820 orinoco_rx_monitor(dev, rxfid, desc);
821 goto out;
822 }
823
824 if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
825 DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
826 dev->name);
827 wstats->discard.code++;
828 goto update_stats;
829 }
830
831 length = le16_to_cpu(desc->data_len);
832
833 /* Sanity checks */
834 if (length < 3) { /* No for even an 802.2 LLC header */
835 /* At least on Symbol firmware with PCF we get quite a
836 lot of these legitimately - Poll frames with no
837 data. */
838 goto out;
839 }
840 if (length > IEEE80211_MAX_DATA_LEN) {
841 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
842 dev->name, length);
843 stats->rx_length_errors++;
844 goto update_stats;
845 }
846
847 /* Payload size does not include Michael MIC. Increase payload
848 * size to read it together with the data. */
849 if (status & HERMES_RXSTAT_MIC)
850 length += MICHAEL_MIC_LEN;
851
852 /* We need space for the packet data itself, plus an ethernet
853 header, plus 2 bytes so we can align the IP header on a
854 32bit boundary, plus 1 byte so we can read in odd length
855 packets from the card, which has an IO granularity of 16
856 bits */
857 skb = dev_alloc_skb(length+ETH_HLEN+2+1);
858 if (!skb) {
859 printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
860 dev->name);
861 goto update_stats;
862 }
863
864 /* We'll prepend the header, so reserve space for it. The worst
865 case is no decapsulation, when 802.3 header is prepended and
866 nothing is removed. 2 is for aligning the IP header. */
867 skb_reserve(skb, ETH_HLEN + 2);
868
869 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length),
870 ALIGN(length, 2), rxfid,
871 HERMES_802_2_OFFSET);
872 if (err) {
873 printk(KERN_ERR "%s: error %d reading frame. "
874 "Frame dropped.\n", dev->name, err);
875 goto drop;
876 }
877
878 /* Add desc and skb to rx queue */
879 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
880 if (!rx_data) {
881 printk(KERN_WARNING "%s: Can't allocate RX packet\n",
882 dev->name);
883 goto drop;
884 }
885 rx_data->desc = desc;
886 rx_data->skb = skb;
887 list_add_tail(&rx_data->list, &priv->rx_list);
888 tasklet_schedule(&priv->rx_tasklet);
889
890 return;
891
892drop:
893 dev_kfree_skb_irq(skb);
894update_stats:
895 stats->rx_errors++;
896 stats->rx_dropped++;
897out:
898 kfree(desc);
899}
900
901static void orinoco_rx(struct net_device *dev,
902 struct hermes_rx_descriptor *desc,
903 struct sk_buff *skb)
904{
905 struct orinoco_private *priv = netdev_priv(dev);
906 struct net_device_stats *stats = &priv->stats;
907 u16 status, fc;
908 int length;
909 struct ethhdr *hdr;
910
911 status = le16_to_cpu(desc->status);
912 length = le16_to_cpu(desc->data_len);
913 fc = le16_to_cpu(desc->frame_ctl);
914
915 /* Calculate and check MIC */
916 if (status & HERMES_RXSTAT_MIC) {
917 int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
918 HERMES_MIC_KEY_ID_SHIFT);
919 u8 mic[MICHAEL_MIC_LEN];
920 u8 *rxmic;
921 u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
922 desc->addr3 : desc->addr2;
923
924 /* Extract Michael MIC from payload */
925 rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
926
927 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
928 length -= MICHAEL_MIC_LEN;
929
930 orinoco_mic(priv->rx_tfm_mic,
931 priv->tkip_key[key_id].rx_mic,
932 desc->addr1,
933 src,
934 0, /* priority or QoS? */
935 skb->data,
936 skb->len,
937 &mic[0]);
938
939 if (memcmp(mic, rxmic,
940 MICHAEL_MIC_LEN)) {
941 union iwreq_data wrqu;
942 struct iw_michaelmicfailure wxmic;
943
944 printk(KERN_WARNING "%s: "
945 "Invalid Michael MIC in data frame from %pM, "
946 "using key %i\n",
947 dev->name, src, key_id);
948
949 /* TODO: update stats */
950
951 /* Notify userspace */
952 memset(&wxmic, 0, sizeof(wxmic));
953 wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
954 wxmic.flags |= (desc->addr1[0] & 1) ?
955 IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
956 wxmic.src_addr.sa_family = ARPHRD_ETHER;
957 memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
958
959 (void) orinoco_hw_get_tkip_iv(priv, key_id,
960 &wxmic.tsc[0]);
961
962 memset(&wrqu, 0, sizeof(wrqu));
963 wrqu.data.length = sizeof(wxmic);
964 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
965 (char *) &wxmic);
966
967 goto drop;
968 }
969 }
970
971 /* Handle decapsulation
972 * In most cases, the firmware tell us about SNAP frames.
973 * For some reason, the SNAP frames sent by LinkSys APs
974 * are not properly recognised by most firmwares.
975 * So, check ourselves */
976 if (length >= ENCAPS_OVERHEAD &&
977 (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
978 ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
979 is_ethersnap(skb->data))) {
980 /* These indicate a SNAP within 802.2 LLC within
981 802.11 frame which we'll need to de-encapsulate to
982 the original EthernetII frame. */
983 hdr = (struct ethhdr *)skb_push(skb,
984 ETH_HLEN - ENCAPS_OVERHEAD);
985 } else {
986 /* 802.3 frame - prepend 802.3 header as is */
987 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
988 hdr->h_proto = htons(length);
989 }
990 memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
991 if (fc & IEEE80211_FCTL_FROMDS)
992 memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
993 else
994 memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
995
996 skb->protocol = eth_type_trans(skb, dev);
997 skb->ip_summed = CHECKSUM_NONE;
998 if (fc & IEEE80211_FCTL_TODS)
999 skb->pkt_type = PACKET_OTHERHOST;
1000
1001 /* Process the wireless stats if needed */
1002 orinoco_stat_gather(dev, skb, desc);
1003
1004 /* Pass the packet to the networking stack */
1005 netif_rx(skb);
1006 stats->rx_packets++;
1007 stats->rx_bytes += length;
1008
1009 return;
1010
1011 drop:
1012 dev_kfree_skb(skb);
1013 stats->rx_errors++;
1014 stats->rx_dropped++;
1015}
1016
1017static void orinoco_rx_isr_tasklet(unsigned long data)
1018{
1019 struct net_device *dev = (struct net_device *) data;
1020 struct orinoco_private *priv = netdev_priv(dev);
1021 struct orinoco_rx_data *rx_data, *temp;
1022 struct hermes_rx_descriptor *desc;
1023 struct sk_buff *skb;
1024 unsigned long flags;
1025
1026 /* orinoco_rx requires the driver lock, and we also need to
1027 * protect priv->rx_list, so just hold the lock over the
1028 * lot.
1029 *
1030 * If orinoco_lock fails, we've unplugged the card. In this
1031 * case just abort. */
1032 if (orinoco_lock(priv, &flags) != 0)
1033 return;
1034
1035 /* extract desc and skb from queue */
1036 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
1037 desc = rx_data->desc;
1038 skb = rx_data->skb;
1039 list_del(&rx_data->list);
1040 kfree(rx_data);
1041
1042 orinoco_rx(dev, desc, skb);
1043
1044 kfree(desc);
1045 }
1046
1047 orinoco_unlock(priv, &flags);
1048}
1049
1050/********************************************************************/
1051/* Rx path (info frames) */
1052/********************************************************************/
1053
1054static void print_linkstatus(struct net_device *dev, u16 status)
1055{
1056 char *s;
1057
1058 if (suppress_linkstatus)
1059 return;
1060
1061 switch (status) {
1062 case HERMES_LINKSTATUS_NOT_CONNECTED:
1063 s = "Not Connected";
1064 break;
1065 case HERMES_LINKSTATUS_CONNECTED:
1066 s = "Connected";
1067 break;
1068 case HERMES_LINKSTATUS_DISCONNECTED:
1069 s = "Disconnected";
1070 break;
1071 case HERMES_LINKSTATUS_AP_CHANGE:
1072 s = "AP Changed";
1073 break;
1074 case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
1075 s = "AP Out of Range";
1076 break;
1077 case HERMES_LINKSTATUS_AP_IN_RANGE:
1078 s = "AP In Range";
1079 break;
1080 case HERMES_LINKSTATUS_ASSOC_FAILED:
1081 s = "Association Failed";
1082 break;
1083 default:
1084 s = "UNKNOWN";
1085 }
1086
1087 printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
1088 dev->name, s, status);
1089}
1090
1091/* Search scan results for requested BSSID, join it if found */
1092static void orinoco_join_ap(struct work_struct *work)
1093{
1094 struct orinoco_private *priv =
1095 container_of(work, struct orinoco_private, join_work);
1096 struct net_device *dev = priv->ndev;
1097 struct hermes *hw = &priv->hw;
1098 int err;
1099 unsigned long flags;
1100 struct join_req {
1101 u8 bssid[ETH_ALEN];
1102 __le16 channel;
1103 } __attribute__ ((packed)) req;
1104 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1105 struct prism2_scan_apinfo *atom = NULL;
1106 int offset = 4;
1107 int found = 0;
1108 u8 *buf;
1109 u16 len;
1110
1111 /* Allocate buffer for scan results */
1112 buf = kmalloc(MAX_SCAN_LEN, GFP_KERNEL);
1113 if (!buf)
1114 return;
1115
1116 if (orinoco_lock(priv, &flags) != 0)
1117 goto fail_lock;
1118
1119 /* Sanity checks in case user changed something in the meantime */
1120 if (!priv->bssid_fixed)
1121 goto out;
1122
1123 if (strlen(priv->desired_essid) == 0)
1124 goto out;
1125
1126 /* Read scan results from the firmware */
1127 err = hermes_read_ltv(hw, USER_BAP,
1128 HERMES_RID_SCANRESULTSTABLE,
1129 MAX_SCAN_LEN, &len, buf);
1130 if (err) {
1131 printk(KERN_ERR "%s: Cannot read scan results\n",
1132 dev->name);
1133 goto out;
1134 }
1135
1136 len = HERMES_RECLEN_TO_BYTES(len);
1137
1138 /* Go through the scan results looking for the channel of the AP
1139 * we were requested to join */
1140 for (; offset + atom_len <= len; offset += atom_len) {
1141 atom = (struct prism2_scan_apinfo *) (buf + offset);
1142 if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) {
1143 found = 1;
1144 break;
1145 }
1146 }
1147
1148 if (!found) {
1149 DEBUG(1, "%s: Requested AP not found in scan results\n",
1150 dev->name);
1151 goto out;
1152 }
1153
1154 memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
1155 req.channel = atom->channel; /* both are little-endian */
1156 err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST,
1157 &req);
1158 if (err)
1159 printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
1160
1161 out:
1162 orinoco_unlock(priv, &flags);
1163
1164 fail_lock:
1165 kfree(buf);
1166}
1167
1168/* Send new BSSID to userspace */
1169static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1170{
1171 struct net_device *dev = priv->ndev;
1172 struct hermes *hw = &priv->hw;
1173 union iwreq_data wrqu;
1174 int err;
1175
1176 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1177 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1178 if (err != 0)
1179 return;
1180
1181 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1182
1183 /* Send event to user space */
1184 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
1185}
1186
1187static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1188{
1189 struct net_device *dev = priv->ndev;
1190 struct hermes *hw = &priv->hw;
1191 union iwreq_data wrqu;
1192 int err;
1193 u8 buf[88];
1194 u8 *ie;
1195
1196 if (!priv->has_wpa)
1197 return;
1198
1199 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1200 sizeof(buf), NULL, &buf);
1201 if (err != 0)
1202 return;
1203
1204 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1205 if (ie) {
1206 int rem = sizeof(buf) - (ie - &buf[0]);
1207 wrqu.data.length = ie[1] + 2;
1208 if (wrqu.data.length > rem)
1209 wrqu.data.length = rem;
1210
1211 if (wrqu.data.length)
1212 /* Send event to user space */
1213 wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
1214 }
1215}
1216
1217static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1218{
1219 struct net_device *dev = priv->ndev;
1220 struct hermes *hw = &priv->hw;
1221 union iwreq_data wrqu;
1222 int err;
1223 u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
1224 u8 *ie;
1225
1226 if (!priv->has_wpa)
1227 return;
1228
1229 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1230 sizeof(buf), NULL, &buf);
1231 if (err != 0)
1232 return;
1233
1234 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1235 if (ie) {
1236 int rem = sizeof(buf) - (ie - &buf[0]);
1237 wrqu.data.length = ie[1] + 2;
1238 if (wrqu.data.length > rem)
1239 wrqu.data.length = rem;
1240
1241 if (wrqu.data.length)
1242 /* Send event to user space */
1243 wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
1244 }
1245}
1246
1247static void orinoco_send_wevents(struct work_struct *work)
1248{
1249 struct orinoco_private *priv =
1250 container_of(work, struct orinoco_private, wevent_work);
1251 unsigned long flags;
1252
1253 if (orinoco_lock(priv, &flags) != 0)
1254 return;
1255
1256 orinoco_send_assocreqie_wevent(priv);
1257 orinoco_send_assocrespie_wevent(priv);
1258 orinoco_send_bssid_wevent(priv);
1259
1260 orinoco_unlock(priv, &flags);
1261}
1262
1263static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1264{
1265 struct orinoco_private *priv = netdev_priv(dev);
1266 u16 infofid;
1267 struct {
1268 __le16 len;
1269 __le16 type;
1270 } __attribute__ ((packed)) info;
1271 int len, type;
1272 int err;
1273
1274 /* This is an answer to an INQUIRE command that we did earlier,
1275 * or an information "event" generated by the card
1276 * The controller return to us a pseudo frame containing
1277 * the information in question - Jean II */
1278 infofid = hermes_read_regn(hw, INFOFID);
1279
1280 /* Read the info frame header - don't try too hard */
1281 err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
1282 infofid, 0);
1283 if (err) {
1284 printk(KERN_ERR "%s: error %d reading info frame. "
1285 "Frame dropped.\n", dev->name, err);
1286 return;
1287 }
1288
1289 len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
1290 type = le16_to_cpu(info.type);
1291
1292 switch (type) {
1293 case HERMES_INQ_TALLIES: {
1294 struct hermes_tallies_frame tallies;
1295 struct iw_statistics *wstats = &priv->wstats;
1296
1297 if (len > sizeof(tallies)) {
1298 printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
1299 dev->name, len);
1300 len = sizeof(tallies);
1301 }
1302
1303 err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
1304 infofid, sizeof(info));
1305 if (err)
1306 break;
1307
1308 /* Increment our various counters */
1309 /* wstats->discard.nwid - no wrong BSSID stuff */
1310 wstats->discard.code +=
1311 le16_to_cpu(tallies.RxWEPUndecryptable);
1312 if (len == sizeof(tallies))
1313 wstats->discard.code +=
1314 le16_to_cpu(tallies.RxDiscards_WEPICVError) +
1315 le16_to_cpu(tallies.RxDiscards_WEPExcluded);
1316 wstats->discard.misc +=
1317 le16_to_cpu(tallies.TxDiscardsWrongSA);
1318 wstats->discard.fragment +=
1319 le16_to_cpu(tallies.RxMsgInBadMsgFragments);
1320 wstats->discard.retries +=
1321 le16_to_cpu(tallies.TxRetryLimitExceeded);
1322 /* wstats->miss.beacon - no match */
1323 }
1324 break;
1325 case HERMES_INQ_LINKSTATUS: {
1326 struct hermes_linkstatus linkstatus;
1327 u16 newstatus;
1328 int connected;
1329
1330 if (priv->iw_mode == IW_MODE_MONITOR)
1331 break;
1332
1333 if (len != sizeof(linkstatus)) {
1334 printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
1335 dev->name, len);
1336 break;
1337 }
1338
1339 err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
1340 infofid, sizeof(info));
1341 if (err)
1342 break;
1343 newstatus = le16_to_cpu(linkstatus.linkstatus);
1344
1345 /* Symbol firmware uses "out of range" to signal that
1346 * the hostscan frame can be requested. */
1347 if (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE &&
1348 priv->firmware_type == FIRMWARE_TYPE_SYMBOL &&
1349 priv->has_hostscan && priv->scan_inprogress) {
1350 hermes_inquire(hw, HERMES_INQ_HOSTSCAN_SYMBOL);
1351 break;
1352 }
1353
1354 connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
1355 || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
1356 || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE);
1357
1358 if (connected)
1359 netif_carrier_on(dev);
1360 else if (!ignore_disconnect)
1361 netif_carrier_off(dev);
1362
1363 if (newstatus != priv->last_linkstatus) {
1364 priv->last_linkstatus = newstatus;
1365 print_linkstatus(dev, newstatus);
1366 /* The info frame contains only one word which is the
1367 * status (see hermes.h). The status is pretty boring
1368 * in itself, that's why we export the new BSSID...
1369 * Jean II */
1370 schedule_work(&priv->wevent_work);
1371 }
1372 }
1373 break;
1374 case HERMES_INQ_SCAN:
1375 if (!priv->scan_inprogress && priv->bssid_fixed &&
1376 priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
1377 schedule_work(&priv->join_work);
1378 break;
1379 }
1380 /* fall through */
1381 case HERMES_INQ_HOSTSCAN:
1382 case HERMES_INQ_HOSTSCAN_SYMBOL: {
1383 /* Result of a scanning. Contains information about
1384 * cells in the vicinity - Jean II */
1385 union iwreq_data wrqu;
1386 unsigned char *buf;
1387
1388 /* Scan is no longer in progress */
1389 priv->scan_inprogress = 0;
1390
1391 /* Sanity check */
1392 if (len > 4096) {
1393 printk(KERN_WARNING "%s: Scan results too large (%d bytes)\n",
1394 dev->name, len);
1395 break;
1396 }
1397
1398 /* Allocate buffer for results */
1399 buf = kmalloc(len, GFP_ATOMIC);
1400 if (buf == NULL)
1401 /* No memory, so can't printk()... */
1402 break;
1403
1404 /* Read scan data */
1405 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
1406 infofid, sizeof(info));
1407 if (err) {
1408 kfree(buf);
1409 break;
1410 }
1411
1412#ifdef ORINOCO_DEBUG
1413 {
1414 int i;
1415 printk(KERN_DEBUG "Scan result [%02X", buf[0]);
1416 for (i = 1; i < (len * 2); i++)
1417 printk(":%02X", buf[i]);
1418 printk("]\n");
1419 }
1420#endif /* ORINOCO_DEBUG */
1421
1422 if (orinoco_process_scan_results(priv, buf, len) == 0) {
1423 /* Send an empty event to user space.
1424 * We don't send the received data on the event because
1425 * it would require us to do complex transcoding, and
1426 * we want to minimise the work done in the irq handler
1427 * Use a request to extract the data - Jean II */
1428 wrqu.data.length = 0;
1429 wrqu.data.flags = 0;
1430 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
1431 }
1432 kfree(buf);
1433 }
1434 break;
1435 case HERMES_INQ_CHANNELINFO:
1436 {
1437 struct agere_ext_scan_info *bss;
1438
1439 if (!priv->scan_inprogress) {
1440 printk(KERN_DEBUG "%s: Got chaninfo without scan, "
1441 "len=%d\n", dev->name, len);
1442 break;
1443 }
1444
1445 /* An empty result indicates that the scan is complete */
1446 if (len == 0) {
1447 union iwreq_data wrqu;
1448
1449 /* Scan is no longer in progress */
1450 priv->scan_inprogress = 0;
1451
1452 wrqu.data.length = 0;
1453 wrqu.data.flags = 0;
1454 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
1455 break;
1456 }
1457
1458 /* Sanity check */
1459 else if (len > sizeof(*bss)) {
1460 printk(KERN_WARNING
1461 "%s: Ext scan results too large (%d bytes). "
1462 "Truncating results to %zd bytes.\n",
1463 dev->name, len, sizeof(*bss));
1464 len = sizeof(*bss);
1465 } else if (len < (offsetof(struct agere_ext_scan_info,
1466 data) + 2)) {
1467 /* Drop this result now so we don't have to
1468 * keep checking later */
1469 printk(KERN_WARNING
1470 "%s: Ext scan results too short (%d bytes)\n",
1471 dev->name, len);
1472 break;
1473 }
1474
1475 bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
1476 if (bss == NULL)
1477 break;
1478
1479 /* Read scan data */
1480 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
1481 infofid, sizeof(info));
1482 if (err) {
1483 kfree(bss);
1484 break;
1485 }
1486
1487 orinoco_add_ext_scan_result(priv, bss);
1488
1489 kfree(bss);
1490 break;
1491 }
1492 case HERMES_INQ_SEC_STAT_AGERE:
1493 /* Security status (Agere specific) */
1494 /* Ignore this frame for now */
1495 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
1496 break;
1497 /* fall through */
1498 default:
1499 printk(KERN_DEBUG "%s: Unknown information frame received: "
1500 "type 0x%04x, length %d\n", dev->name, type, len);
1501 /* We don't actually do anything about it */
1502 break;
1503 }
1504}
1505
1506static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
1507{
1508 if (net_ratelimit())
1509 printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
1510}
1511
1512/********************************************************************/
1513/* Internal hardware control routines */
1514/********************************************************************/
1515
1516int __orinoco_up(struct net_device *dev)
1517{
1518 struct orinoco_private *priv = netdev_priv(dev);
1519 struct hermes *hw = &priv->hw;
1520 int err;
1521
1522 netif_carrier_off(dev); /* just to make sure */
1523
1524 err = __orinoco_program_rids(dev);
1525 if (err) {
1526 printk(KERN_ERR "%s: Error %d configuring card\n",
1527 dev->name, err);
1528 return err;
1529 }
1530
1531 /* Fire things up again */
1532 hermes_set_irqmask(hw, ORINOCO_INTEN);
1533 err = hermes_enable_port(hw, 0);
1534 if (err) {
1535 printk(KERN_ERR "%s: Error %d enabling MAC port\n",
1536 dev->name, err);
1537 return err;
1538 }
1539
1540 netif_start_queue(dev);
1541
1542 return 0;
1543}
1544EXPORT_SYMBOL(__orinoco_up);
1545
1546int __orinoco_down(struct net_device *dev)
1547{
1548 struct orinoco_private *priv = netdev_priv(dev);
1549 struct hermes *hw = &priv->hw;
1550 int err;
1551
1552 netif_stop_queue(dev);
1553
1554 if (!priv->hw_unavailable) {
1555 if (!priv->broken_disableport) {
1556 err = hermes_disable_port(hw, 0);
1557 if (err) {
1558 /* Some firmwares (e.g. Intersil 1.3.x) seem
1559 * to have problems disabling the port, oh
1560 * well, too bad. */
1561 printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
1562 dev->name, err);
1563 priv->broken_disableport = 1;
1564 }
1565 }
1566 hermes_set_irqmask(hw, 0);
1567 hermes_write_regn(hw, EVACK, 0xffff);
1568 }
1569
1570 /* firmware will have to reassociate */
1571 netif_carrier_off(dev);
1572 priv->last_linkstatus = 0xffff;
1573
1574 return 0;
1575}
1576EXPORT_SYMBOL(__orinoco_down);
1577
1578static int orinoco_allocate_fid(struct net_device *dev)
1579{
1580 struct orinoco_private *priv = netdev_priv(dev);
1581 struct hermes *hw = &priv->hw;
1582 int err;
1583
1584 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
1585 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
1586 /* Try workaround for old Symbol firmware bug */
1587 priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
1588 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
1589
1590 printk(KERN_WARNING "%s: firmware ALLOC bug detected "
1591 "(old Symbol firmware?). Work around %s\n",
1592 dev->name, err ? "failed!" : "ok.");
1593 }
1594
1595 return err;
1596}
1597
1598int orinoco_reinit_firmware(struct net_device *dev)
1599{
1600 struct orinoco_private *priv = netdev_priv(dev);
1601 struct hermes *hw = &priv->hw;
1602 int err;
1603
1604 err = hermes_init(hw);
1605 if (priv->do_fw_download && !err) {
1606 err = orinoco_download(priv);
1607 if (err)
1608 priv->do_fw_download = 0;
1609 }
1610 if (!err)
1611 err = orinoco_allocate_fid(dev);
1612
1613 return err;
1614}
1615EXPORT_SYMBOL(orinoco_reinit_firmware);
1616
1617int __orinoco_program_rids(struct net_device *dev)
1618{
1619 struct orinoco_private *priv = netdev_priv(dev);
1620 hermes_t *hw = &priv->hw;
1621 int err;
1622 struct hermes_idstring idbuf;
1623
1624 /* Set the MAC address */
1625 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
1626 HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
1627 if (err) {
1628 printk(KERN_ERR "%s: Error %d setting MAC address\n",
1629 dev->name, err);
1630 return err;
1631 }
1632
1633 /* Set up the link mode */
1634 err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
1635 priv->port_type);
1636 if (err) {
1637 printk(KERN_ERR "%s: Error %d setting port type\n",
1638 dev->name, err);
1639 return err;
1640 }
1641 /* Set the channel/frequency */
1642 if (priv->channel != 0 && priv->iw_mode != IW_MODE_INFRA) {
1643 err = hermes_write_wordrec(hw, USER_BAP,
1644 HERMES_RID_CNFOWNCHANNEL,
1645 priv->channel);
1646 if (err) {
1647 printk(KERN_ERR "%s: Error %d setting channel %d\n",
1648 dev->name, err, priv->channel);
1649 return err;
1650 }
1651 }
1652
1653 if (priv->has_ibss) {
1654 u16 createibss;
1655
1656 if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) {
1657 printk(KERN_WARNING "%s: This firmware requires an "
1658 "ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
1659 /* With wvlan_cs, in this case, we would crash.
1660 * hopefully, this driver will behave better...
1661 * Jean II */
1662 createibss = 0;
1663 } else {
1664 createibss = priv->createibss;
1665 }
1666
1667 err = hermes_write_wordrec(hw, USER_BAP,
1668 HERMES_RID_CNFCREATEIBSS,
1669 createibss);
1670 if (err) {
1671 printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n",
1672 dev->name, err);
1673 return err;
1674 }
1675 }
1676
1677 /* Set the desired BSSID */
1678 err = __orinoco_hw_set_wap(priv);
1679 if (err) {
1680 printk(KERN_ERR "%s: Error %d setting AP address\n",
1681 dev->name, err);
1682 return err;
1683 }
1684 /* Set the desired ESSID */
1685 idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
1686 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
1687 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
1688 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
1689 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
1690 &idbuf);
1691 if (err) {
1692 printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
1693 dev->name, err);
1694 return err;
1695 }
1696 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
1697 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
1698 &idbuf);
1699 if (err) {
1700 printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
1701 dev->name, err);
1702 return err;
1703 }
1704
1705 /* Set the station name */
1706 idbuf.len = cpu_to_le16(strlen(priv->nick));
1707 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
1708 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
1709 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
1710 &idbuf);
1711 if (err) {
1712 printk(KERN_ERR "%s: Error %d setting nickname\n",
1713 dev->name, err);
1714 return err;
1715 }
1716
1717 /* Set AP density */
1718 if (priv->has_sensitivity) {
1719 err = hermes_write_wordrec(hw, USER_BAP,
1720 HERMES_RID_CNFSYSTEMSCALE,
1721 priv->ap_density);
1722 if (err) {
1723 printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
1724 "Disabling sensitivity control\n",
1725 dev->name, err);
1726
1727 priv->has_sensitivity = 0;
1728 }
1729 }
1730
1731 /* Set RTS threshold */
1732 err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
1733 priv->rts_thresh);
1734 if (err) {
1735 printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
1736 dev->name, err);
1737 return err;
1738 }
1739
1740 /* Set fragmentation threshold or MWO robustness */
1741 if (priv->has_mwo)
1742 err = hermes_write_wordrec(hw, USER_BAP,
1743 HERMES_RID_CNFMWOROBUST_AGERE,
1744 priv->mwo_robust);
1745 else
1746 err = hermes_write_wordrec(hw, USER_BAP,
1747 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
1748 priv->frag_thresh);
1749 if (err) {
1750 printk(KERN_ERR "%s: Error %d setting fragmentation\n",
1751 dev->name, err);
1752 return err;
1753 }
1754
1755 /* Set bitrate */
1756 err = __orinoco_hw_set_bitrate(priv);
1757 if (err) {
1758 printk(KERN_ERR "%s: Error %d setting bitrate\n",
1759 dev->name, err);
1760 return err;
1761 }
1762
1763 /* Set power management */
1764 if (priv->has_pm) {
1765 err = hermes_write_wordrec(hw, USER_BAP,
1766 HERMES_RID_CNFPMENABLED,
1767 priv->pm_on);
1768 if (err) {
1769 printk(KERN_ERR "%s: Error %d setting up PM\n",
1770 dev->name, err);
1771 return err;
1772 }
1773
1774 err = hermes_write_wordrec(hw, USER_BAP,
1775 HERMES_RID_CNFMULTICASTRECEIVE,
1776 priv->pm_mcast);
1777 if (err) {
1778 printk(KERN_ERR "%s: Error %d setting up PM\n",
1779 dev->name, err);
1780 return err;
1781 }
1782 err = hermes_write_wordrec(hw, USER_BAP,
1783 HERMES_RID_CNFMAXSLEEPDURATION,
1784 priv->pm_period);
1785 if (err) {
1786 printk(KERN_ERR "%s: Error %d setting up PM\n",
1787 dev->name, err);
1788 return err;
1789 }
1790 err = hermes_write_wordrec(hw, USER_BAP,
1791 HERMES_RID_CNFPMHOLDOVERDURATION,
1792 priv->pm_timeout);
1793 if (err) {
1794 printk(KERN_ERR "%s: Error %d setting up PM\n",
1795 dev->name, err);
1796 return err;
1797 }
1798 }
1799
1800 /* Set preamble - only for Symbol so far... */
1801 if (priv->has_preamble) {
1802 err = hermes_write_wordrec(hw, USER_BAP,
1803 HERMES_RID_CNFPREAMBLE_SYMBOL,
1804 priv->preamble);
1805 if (err) {
1806 printk(KERN_ERR "%s: Error %d setting preamble\n",
1807 dev->name, err);
1808 return err;
1809 }
1810 }
1811
1812 /* Set up encryption */
1813 if (priv->has_wep || priv->has_wpa) {
1814 err = __orinoco_hw_setup_enc(priv);
1815 if (err) {
1816 printk(KERN_ERR "%s: Error %d activating encryption\n",
1817 dev->name, err);
1818 return err;
1819 }
1820 }
1821
1822 if (priv->iw_mode == IW_MODE_MONITOR) {
1823 /* Enable monitor mode */
1824 dev->type = ARPHRD_IEEE80211;
1825 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
1826 HERMES_TEST_MONITOR, 0, NULL);
1827 } else {
1828 /* Disable monitor mode */
1829 dev->type = ARPHRD_ETHER;
1830 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
1831 HERMES_TEST_STOP, 0, NULL);
1832 }
1833 if (err)
1834 return err;
1835
1836 /* Set promiscuity / multicast*/
1837 priv->promiscuous = 0;
1838 priv->mc_count = 0;
1839
1840 /* FIXME: what about netif_tx_lock */
1841 __orinoco_set_multicast_list(dev);
1842
1843 return 0;
1844}
1845
1846/* FIXME: return int? */
1847static void
1848__orinoco_set_multicast_list(struct net_device *dev)
1849{
1850 struct orinoco_private *priv = netdev_priv(dev);
1851 int err = 0;
1852 int promisc, mc_count;
1853
1854 /* The Hermes doesn't seem to have an allmulti mode, so we go
1855 * into promiscuous mode and let the upper levels deal. */
1856 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1857 (dev->mc_count > MAX_MULTICAST(priv))) {
1858 promisc = 1;
1859 mc_count = 0;
1860 } else {
1861 promisc = 0;
1862 mc_count = dev->mc_count;
1863 }
1864
1865 err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
1866 promisc);
1867}
1868
1869/* This must be called from user context, without locks held - use
1870 * schedule_work() */
1871void orinoco_reset(struct work_struct *work)
1872{
1873 struct orinoco_private *priv =
1874 container_of(work, struct orinoco_private, reset_work);
1875 struct net_device *dev = priv->ndev;
1876 struct hermes *hw = &priv->hw;
1877 int err;
1878 unsigned long flags;
1879
1880 if (orinoco_lock(priv, &flags) != 0)
1881 /* When the hardware becomes available again, whatever
1882 * detects that is responsible for re-initializing
1883 * it. So no need for anything further */
1884 return;
1885
1886 netif_stop_queue(dev);
1887
1888 /* Shut off interrupts. Depending on what state the hardware
1889 * is in, this might not work, but we'll try anyway */
1890 hermes_set_irqmask(hw, 0);
1891 hermes_write_regn(hw, EVACK, 0xffff);
1892
1893 priv->hw_unavailable++;
1894 priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
1895 netif_carrier_off(dev);
1896
1897 orinoco_unlock(priv, &flags);
1898
1899 /* Scanning support: Cleanup of driver struct */
1900 orinoco_clear_scan_results(priv, 0);
1901 priv->scan_inprogress = 0;
1902
1903 if (priv->hard_reset) {
1904 err = (*priv->hard_reset)(priv);
1905 if (err) {
1906 printk(KERN_ERR "%s: orinoco_reset: Error %d "
1907 "performing hard reset\n", dev->name, err);
1908 goto disable;
1909 }
1910 }
1911
1912 err = orinoco_reinit_firmware(dev);
1913 if (err) {
1914 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
1915 dev->name, err);
1916 goto disable;
1917 }
1918
1919 /* This has to be called from user context */
1920 spin_lock_irq(&priv->lock);
1921
1922 priv->hw_unavailable--;
1923
1924 /* priv->open or priv->hw_unavailable might have changed while
1925 * we dropped the lock */
1926 if (priv->open && (!priv->hw_unavailable)) {
1927 err = __orinoco_up(dev);
1928 if (err) {
1929 printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
1930 dev->name, err);
1931 } else
1932 dev->trans_start = jiffies;
1933 }
1934
1935 spin_unlock_irq(&priv->lock);
1936
1937 return;
1938 disable:
1939 hermes_set_irqmask(hw, 0);
1940 netif_device_detach(dev);
1941 printk(KERN_ERR "%s: Device has been disabled!\n", dev->name);
1942}
1943
1944/********************************************************************/
1945/* Interrupt handler */
1946/********************************************************************/
1947
1948static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
1949{
1950 printk(KERN_DEBUG "%s: TICK\n", dev->name);
1951}
1952
1953static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
1954{
1955 /* This seems to happen a fair bit under load, but ignoring it
1956 seems to work fine...*/
1957 printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
1958 dev->name);
1959}
1960
1961irqreturn_t orinoco_interrupt(int irq, void *dev_id)
1962{
1963 struct net_device *dev = dev_id;
1964 struct orinoco_private *priv = netdev_priv(dev);
1965 hermes_t *hw = &priv->hw;
1966 int count = MAX_IRQLOOPS_PER_IRQ;
1967 u16 evstat, events;
1968 /* These are used to detect a runaway interrupt situation.
1969 *
1970 * If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
1971 * we panic and shut down the hardware
1972 */
1973 /* jiffies value the last time we were called */
1974 static int last_irq_jiffy; /* = 0 */
1975 static int loops_this_jiffy; /* = 0 */
1976 unsigned long flags;
1977
1978 if (orinoco_lock(priv, &flags) != 0) {
1979 /* If hw is unavailable - we don't know if the irq was
1980 * for us or not */
1981 return IRQ_HANDLED;
1982 }
1983
1984 evstat = hermes_read_regn(hw, EVSTAT);
1985 events = evstat & hw->inten;
1986 if (!events) {
1987 orinoco_unlock(priv, &flags);
1988 return IRQ_NONE;
1989 }
1990
1991 if (jiffies != last_irq_jiffy)
1992 loops_this_jiffy = 0;
1993 last_irq_jiffy = jiffies;
1994
1995 while (events && count--) {
1996 if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
1997 printk(KERN_WARNING "%s: IRQ handler is looping too "
1998 "much! Resetting.\n", dev->name);
1999 /* Disable interrupts for now */
2000 hermes_set_irqmask(hw, 0);
2001 schedule_work(&priv->reset_work);
2002 break;
2003 }
2004
2005 /* Check the card hasn't been removed */
2006 if (!hermes_present(hw)) {
2007 DEBUG(0, "orinoco_interrupt(): card removed\n");
2008 break;
2009 }
2010
2011 if (events & HERMES_EV_TICK)
2012 __orinoco_ev_tick(dev, hw);
2013 if (events & HERMES_EV_WTERR)
2014 __orinoco_ev_wterr(dev, hw);
2015 if (events & HERMES_EV_INFDROP)
2016 __orinoco_ev_infdrop(dev, hw);
2017 if (events & HERMES_EV_INFO)
2018 __orinoco_ev_info(dev, hw);
2019 if (events & HERMES_EV_RX)
2020 __orinoco_ev_rx(dev, hw);
2021 if (events & HERMES_EV_TXEXC)
2022 __orinoco_ev_txexc(dev, hw);
2023 if (events & HERMES_EV_TX)
2024 __orinoco_ev_tx(dev, hw);
2025 if (events & HERMES_EV_ALLOC)
2026 __orinoco_ev_alloc(dev, hw);
2027
2028 hermes_write_regn(hw, EVACK, evstat);
2029
2030 evstat = hermes_read_regn(hw, EVSTAT);
2031 events = evstat & hw->inten;
2032 };
2033
2034 orinoco_unlock(priv, &flags);
2035 return IRQ_HANDLED;
2036}
2037EXPORT_SYMBOL(orinoco_interrupt);
2038
2039/********************************************************************/
2040/* Power management */
2041/********************************************************************/
2042#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT)
2043static int orinoco_pm_notifier(struct notifier_block *notifier,
2044 unsigned long pm_event,
2045 void *unused)
2046{
2047 struct orinoco_private *priv = container_of(notifier,
2048 struct orinoco_private,
2049 pm_notifier);
2050
2051 /* All we need to do is cache the firmware before suspend, and
2052 * release it when we come out.
2053 *
2054 * Only need to do this if we're downloading firmware. */
2055 if (!priv->do_fw_download)
2056 return NOTIFY_DONE;
2057
2058 switch (pm_event) {
2059 case PM_HIBERNATION_PREPARE:
2060 case PM_SUSPEND_PREPARE:
2061 orinoco_cache_fw(priv, 0);
2062 break;
2063
2064 case PM_POST_RESTORE:
2065 /* Restore from hibernation failed. We need to clean
2066 * up in exactly the same way, so fall through. */
2067 case PM_POST_HIBERNATION:
2068 case PM_POST_SUSPEND:
2069 orinoco_uncache_fw(priv);
2070 break;
2071
2072 case PM_RESTORE_PREPARE:
2073 default:
2074 break;
2075 }
2076
2077 return NOTIFY_DONE;
2078}
2079#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
2080#define orinoco_pm_notifier NULL
2081#endif
2082
2083/********************************************************************/
2084/* Initialization */
2085/********************************************************************/
2086
2087struct comp_id {
2088 u16 id, variant, major, minor;
2089} __attribute__ ((packed));
2090
2091static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
2092{
2093 if (nic_id->id < 0x8000)
2094 return FIRMWARE_TYPE_AGERE;
2095 else if (nic_id->id == 0x8000 && nic_id->major == 0)
2096 return FIRMWARE_TYPE_SYMBOL;
2097 else
2098 return FIRMWARE_TYPE_INTERSIL;
2099}
2100
2101/* Set priv->firmware type, determine firmware properties */
2102static int determine_firmware(struct net_device *dev)
2103{
2104 struct orinoco_private *priv = netdev_priv(dev);
2105 hermes_t *hw = &priv->hw;
2106 int err;
2107 struct comp_id nic_id, sta_id;
2108 unsigned int firmver;
2109 char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
2110
2111 /* Get the hardware version */
2112 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
2113 if (err) {
2114 printk(KERN_ERR "%s: Cannot read hardware identity: error %d\n",
2115 dev->name, err);
2116 return err;
2117 }
2118
2119 le16_to_cpus(&nic_id.id);
2120 le16_to_cpus(&nic_id.variant);
2121 le16_to_cpus(&nic_id.major);
2122 le16_to_cpus(&nic_id.minor);
2123 printk(KERN_DEBUG "%s: Hardware identity %04x:%04x:%04x:%04x\n",
2124 dev->name, nic_id.id, nic_id.variant,
2125 nic_id.major, nic_id.minor);
2126
2127 priv->firmware_type = determine_firmware_type(&nic_id);
2128
2129 /* Get the firmware version */
2130 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
2131 if (err) {
2132 printk(KERN_ERR "%s: Cannot read station identity: error %d\n",
2133 dev->name, err);
2134 return err;
2135 }
2136
2137 le16_to_cpus(&sta_id.id);
2138 le16_to_cpus(&sta_id.variant);
2139 le16_to_cpus(&sta_id.major);
2140 le16_to_cpus(&sta_id.minor);
2141 printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
2142 dev->name, sta_id.id, sta_id.variant,
2143 sta_id.major, sta_id.minor);
2144
2145 switch (sta_id.id) {
2146 case 0x15:
2147 printk(KERN_ERR "%s: Primary firmware is active\n",
2148 dev->name);
2149 return -ENODEV;
2150 case 0x14b:
2151 printk(KERN_ERR "%s: Tertiary firmware is active\n",
2152 dev->name);
2153 return -ENODEV;
2154 case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */
2155 case 0x21: /* Symbol Spectrum24 Trilogy */
2156 break;
2157 default:
2158 printk(KERN_NOTICE "%s: Unknown station ID, please report\n",
2159 dev->name);
2160 break;
2161 }
2162
2163 /* Default capabilities */
2164 priv->has_sensitivity = 1;
2165 priv->has_mwo = 0;
2166 priv->has_preamble = 0;
2167 priv->has_port3 = 1;
2168 priv->has_ibss = 1;
2169 priv->has_wep = 0;
2170 priv->has_big_wep = 0;
2171 priv->has_alt_txcntl = 0;
2172 priv->has_ext_scan = 0;
2173 priv->has_wpa = 0;
2174 priv->do_fw_download = 0;
2175
2176 /* Determine capabilities from the firmware version */
2177 switch (priv->firmware_type) {
2178 case FIRMWARE_TYPE_AGERE:
2179 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
2180 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
2181 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
2182 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor);
2183
2184 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
2185
2186 priv->has_ibss = (firmver >= 0x60006);
2187 priv->has_wep = (firmver >= 0x40020);
2188 priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
2189 Gold cards from the others? */
2190 priv->has_mwo = (firmver >= 0x60000);
2191 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
2192 priv->ibss_port = 1;
2193 priv->has_hostscan = (firmver >= 0x8000a);
2194 priv->do_fw_download = 1;
2195 priv->broken_monitor = (firmver >= 0x80000);
2196 priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
2197 priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
2198 priv->has_wpa = (firmver >= 0x9002a);
2199 /* Tested with Agere firmware :
2200 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
2201 * Tested CableTron firmware : 4.32 => Anton */
2202 break;
2203 case FIRMWARE_TYPE_SYMBOL:
2204 /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
2205 /* Intel MAC : 00:02:B3:* */
2206 /* 3Com MAC : 00:50:DA:* */
2207 memset(tmp, 0, sizeof(tmp));
2208 /* Get the Symbol firmware version */
2209 err = hermes_read_ltv(hw, USER_BAP,
2210 HERMES_RID_SECONDARYVERSION_SYMBOL,
2211 SYMBOL_MAX_VER_LEN, NULL, &tmp);
2212 if (err) {
2213 printk(KERN_WARNING
2214 "%s: Error %d reading Symbol firmware info. "
2215 "Wildly guessing capabilities...\n",
2216 dev->name, err);
2217 firmver = 0;
2218 tmp[0] = '\0';
2219 } else {
2220 /* The firmware revision is a string, the format is
2221 * something like : "V2.20-01".
2222 * Quick and dirty parsing... - Jean II
2223 */
2224 firmver = ((tmp[1] - '0') << 16)
2225 | ((tmp[3] - '0') << 12)
2226 | ((tmp[4] - '0') << 8)
2227 | ((tmp[6] - '0') << 4)
2228 | (tmp[7] - '0');
2229
2230 tmp[SYMBOL_MAX_VER_LEN] = '\0';
2231 }
2232
2233 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
2234 "Symbol %s", tmp);
2235
2236 priv->has_ibss = (firmver >= 0x20000);
2237 priv->has_wep = (firmver >= 0x15012);
2238 priv->has_big_wep = (firmver >= 0x20000);
2239 priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) ||
2240 (firmver >= 0x29000 && firmver < 0x30000) ||
2241 firmver >= 0x31000;
2242 priv->has_preamble = (firmver >= 0x20000);
2243 priv->ibss_port = 4;
2244
2245 /* Symbol firmware is found on various cards, but
2246 * there has been no attempt to check firmware
2247 * download on non-spectrum_cs based cards.
2248 *
2249 * Given that the Agere firmware download works
2250 * differently, we should avoid doing a firmware
2251 * download with the Symbol algorithm on non-spectrum
2252 * cards.
2253 *
2254 * For now we can identify a spectrum_cs based card
2255 * because it has a firmware reset function.
2256 */
2257 priv->do_fw_download = (priv->stop_fw != NULL);
2258
2259 priv->broken_disableport = (firmver == 0x25013) ||
2260 (firmver >= 0x30000 && firmver <= 0x31000);
2261 priv->has_hostscan = (firmver >= 0x31001) ||
2262 (firmver >= 0x29057 && firmver < 0x30000);
2263 /* Tested with Intel firmware : 0x20015 => Jean II */
2264 /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
2265 break;
2266 case FIRMWARE_TYPE_INTERSIL:
2267 /* D-Link, Linksys, Adtron, ZoomAir, and many others...
2268 * Samsung, Compaq 100/200 and Proxim are slightly
2269 * different and less well tested */
2270 /* D-Link MAC : 00:40:05:* */
2271 /* Addtron MAC : 00:90:D1:* */
2272 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
2273 "Intersil %d.%d.%d", sta_id.major, sta_id.minor,
2274 sta_id.variant);
2275
2276 firmver = ((unsigned long)sta_id.major << 16) |
2277 ((unsigned long)sta_id.minor << 8) | sta_id.variant;
2278
2279 priv->has_ibss = (firmver >= 0x000700); /* FIXME */
2280 priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
2281 priv->has_pm = (firmver >= 0x000700);
2282 priv->has_hostscan = (firmver >= 0x010301);
2283
2284 if (firmver >= 0x000800)
2285 priv->ibss_port = 0;
2286 else {
2287 printk(KERN_NOTICE "%s: Intersil firmware earlier "
2288 "than v0.8.x - several features not supported\n",
2289 dev->name);
2290 priv->ibss_port = 1;
2291 }
2292 break;
2293 }
2294 printk(KERN_DEBUG "%s: Firmware determined as %s\n", dev->name,
2295 priv->fw_name);
2296
2297 return 0;
2298}
2299
2300static int orinoco_init(struct net_device *dev)
2301{
2302 struct orinoco_private *priv = netdev_priv(dev);
2303 hermes_t *hw = &priv->hw;
2304 int err = 0;
2305 struct hermes_idstring nickbuf;
2306 u16 reclen;
2307 int len;
2308
2309 /* No need to lock, the hw_unavailable flag is already set in
2310 * alloc_orinocodev() */
2311 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
2312
2313 /* Initialize the firmware */
2314 err = hermes_init(hw);
2315 if (err != 0) {
2316 printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
2317 dev->name, err);
2318 goto out;
2319 }
2320
2321 err = determine_firmware(dev);
2322 if (err != 0) {
2323 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
2324 dev->name);
2325 goto out;
2326 }
2327
2328 if (priv->do_fw_download) {
2329#ifdef CONFIG_HERMES_CACHE_FW_ON_INIT
2330 orinoco_cache_fw(priv, 0);
2331#endif
2332
2333 err = orinoco_download(priv);
2334 if (err)
2335 priv->do_fw_download = 0;
2336
2337 /* Check firmware version again */
2338 err = determine_firmware(dev);
2339 if (err != 0) {
2340 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
2341 dev->name);
2342 goto out;
2343 }
2344 }
2345
2346 if (priv->has_port3)
2347 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n",
2348 dev->name);
2349 if (priv->has_ibss)
2350 printk(KERN_DEBUG "%s: IEEE standard IBSS ad-hoc mode supported\n",
2351 dev->name);
2352 if (priv->has_wep) {
2353 printk(KERN_DEBUG "%s: WEP supported, %s-bit key\n", dev->name,
2354 priv->has_big_wep ? "104" : "40");
2355 }
2356 if (priv->has_wpa) {
2357 printk(KERN_DEBUG "%s: WPA-PSK supported\n", dev->name);
2358 if (orinoco_mic_init(priv)) {
2359 printk(KERN_ERR "%s: Failed to setup MIC crypto "
2360 "algorithm. Disabling WPA support\n", dev->name);
2361 priv->has_wpa = 0;
2362 }
2363 }
2364
2365 /* Now we have the firmware capabilities, allocate appropiate
2366 * sized scan buffers */
2367 if (orinoco_bss_data_allocate(priv))
2368 goto out;
2369 orinoco_bss_data_init(priv);
2370
2371 /* Get the MAC address */
2372 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
2373 ETH_ALEN, NULL, dev->dev_addr);
2374 if (err) {
2375 printk(KERN_WARNING "%s: failed to read MAC address!\n",
2376 dev->name);
2377 goto out;
2378 }
2379
2380 printk(KERN_DEBUG "%s: MAC address %pM\n",
2381 dev->name, dev->dev_addr);
2382
2383 /* Get the station name */
2384 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
2385 sizeof(nickbuf), &reclen, &nickbuf);
2386 if (err) {
2387 printk(KERN_ERR "%s: failed to read station name\n",
2388 dev->name);
2389 goto out;
2390 }
2391 if (nickbuf.len)
2392 len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
2393 else
2394 len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
2395 memcpy(priv->nick, &nickbuf.val, len);
2396 priv->nick[len] = '\0';
2397
2398 printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
2399
2400 err = orinoco_allocate_fid(dev);
2401 if (err) {
2402 printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
2403 dev->name);
2404 goto out;
2405 }
2406
2407 /* Get allowed channels */
2408 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
2409 &priv->channel_mask);
2410 if (err) {
2411 printk(KERN_ERR "%s: failed to read channel list!\n",
2412 dev->name);
2413 goto out;
2414 }
2415
2416 /* Get initial AP density */
2417 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
2418 &priv->ap_density);
2419 if (err || priv->ap_density < 1 || priv->ap_density > 3)
2420 priv->has_sensitivity = 0;
2421
2422 /* Get initial RTS threshold */
2423 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
2424 &priv->rts_thresh);
2425 if (err) {
2426 printk(KERN_ERR "%s: failed to read RTS threshold!\n",
2427 dev->name);
2428 goto out;
2429 }
2430
2431 /* Get initial fragmentation settings */
2432 if (priv->has_mwo)
2433 err = hermes_read_wordrec(hw, USER_BAP,
2434 HERMES_RID_CNFMWOROBUST_AGERE,
2435 &priv->mwo_robust);
2436 else
2437 err = hermes_read_wordrec(hw, USER_BAP,
2438 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
2439 &priv->frag_thresh);
2440 if (err) {
2441 printk(KERN_ERR "%s: failed to read fragmentation settings!\n",
2442 dev->name);
2443 goto out;
2444 }
2445
2446 /* Power management setup */
2447 if (priv->has_pm) {
2448 priv->pm_on = 0;
2449 priv->pm_mcast = 1;
2450 err = hermes_read_wordrec(hw, USER_BAP,
2451 HERMES_RID_CNFMAXSLEEPDURATION,
2452 &priv->pm_period);
2453 if (err) {
2454 printk(KERN_ERR "%s: failed to read power management period!\n",
2455 dev->name);
2456 goto out;
2457 }
2458 err = hermes_read_wordrec(hw, USER_BAP,
2459 HERMES_RID_CNFPMHOLDOVERDURATION,
2460 &priv->pm_timeout);
2461 if (err) {
2462 printk(KERN_ERR "%s: failed to read power management timeout!\n",
2463 dev->name);
2464 goto out;
2465 }
2466 }
2467
2468 /* Preamble setup */
2469 if (priv->has_preamble) {
2470 err = hermes_read_wordrec(hw, USER_BAP,
2471 HERMES_RID_CNFPREAMBLE_SYMBOL,
2472 &priv->preamble);
2473 if (err)
2474 goto out;
2475 }
2476
2477 /* Set up the default configuration */
2478 priv->iw_mode = IW_MODE_INFRA;
2479 /* By default use IEEE/IBSS ad-hoc mode if we have it */
2480 priv->prefer_port3 = priv->has_port3 && (!priv->has_ibss);
2481 set_port_type(priv);
2482 priv->channel = 0; /* use firmware default */
2483
2484 priv->promiscuous = 0;
2485 priv->encode_alg = IW_ENCODE_ALG_NONE;
2486 priv->tx_key = 0;
2487 priv->wpa_enabled = 0;
2488 priv->tkip_cm_active = 0;
2489 priv->key_mgmt = 0;
2490 priv->wpa_ie_len = 0;
2491 priv->wpa_ie = NULL;
2492
2493 /* Make the hardware available, as long as it hasn't been
2494 * removed elsewhere (e.g. by PCMCIA hot unplug) */
2495 spin_lock_irq(&priv->lock);
2496 priv->hw_unavailable--;
2497 spin_unlock_irq(&priv->lock);
2498
2499 printk(KERN_DEBUG "%s: ready\n", dev->name);
2500
2501 out:
2502 return err;
2503}
2504
2505static const struct net_device_ops orinoco_netdev_ops = {
2506 .ndo_init = orinoco_init,
2507 .ndo_open = orinoco_open,
2508 .ndo_stop = orinoco_stop,
2509 .ndo_start_xmit = orinoco_xmit,
2510 .ndo_set_multicast_list = orinoco_set_multicast_list,
2511 .ndo_change_mtu = orinoco_change_mtu,
2512 .ndo_tx_timeout = orinoco_tx_timeout,
2513 .ndo_get_stats = orinoco_get_stats,
2514};
2515
2516struct net_device
2517*alloc_orinocodev(int sizeof_card,
2518 struct device *device,
2519 int (*hard_reset)(struct orinoco_private *),
2520 int (*stop_fw)(struct orinoco_private *, int))
2521{
2522 struct net_device *dev;
2523 struct orinoco_private *priv;
2524
2525 dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
2526 if (!dev)
2527 return NULL;
2528 priv = netdev_priv(dev);
2529 priv->ndev = dev;
2530 if (sizeof_card)
2531 priv->card = (void *)((unsigned long)priv
2532 + sizeof(struct orinoco_private));
2533 else
2534 priv->card = NULL;
2535 priv->dev = device;
2536
2537 /* Setup / override net_device fields */
2538 dev->netdev_ops = &orinoco_netdev_ops;
2539 dev->watchdog_timeo = HZ; /* 1 second timeout */
2540 dev->ethtool_ops = &orinoco_ethtool_ops;
2541 dev->wireless_handlers = &orinoco_handler_def;
2542#ifdef WIRELESS_SPY
2543 priv->wireless_data.spy_data = &priv->spy_data;
2544 dev->wireless_data = &priv->wireless_data;
2545#endif
2546 /* we use the default eth_mac_addr for setting the MAC addr */
2547
2548 /* Reserve space in skb for the SNAP header */
2549 dev->hard_header_len += ENCAPS_OVERHEAD;
2550
2551 /* Set up default callbacks */
2552 priv->hard_reset = hard_reset;
2553 priv->stop_fw = stop_fw;
2554
2555 spin_lock_init(&priv->lock);
2556 priv->open = 0;
2557 priv->hw_unavailable = 1; /* orinoco_init() must clear this
2558 * before anything else touches the
2559 * hardware */
2560 INIT_WORK(&priv->reset_work, orinoco_reset);
2561 INIT_WORK(&priv->join_work, orinoco_join_ap);
2562 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
2563
2564 INIT_LIST_HEAD(&priv->rx_list);
2565 tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
2566 (unsigned long) dev);
2567
2568 netif_carrier_off(dev);
2569 priv->last_linkstatus = 0xffff;
2570
2571 priv->cached_pri_fw = NULL;
2572 priv->cached_fw = NULL;
2573
2574 /* Register PM notifiers */
2575 priv->pm_notifier.notifier_call = orinoco_pm_notifier;
2576 register_pm_notifier(&priv->pm_notifier);
2577
2578 return dev;
2579}
2580EXPORT_SYMBOL(alloc_orinocodev);
2581
2582void free_orinocodev(struct net_device *dev)
2583{
2584 struct orinoco_private *priv = netdev_priv(dev);
2585 struct orinoco_rx_data *rx_data, *temp;
2586
2587 /* If the tasklet is scheduled when we call tasklet_kill it
2588 * will run one final time. However the tasklet will only
2589 * drain priv->rx_list if the hw is still available. */
2590 tasklet_kill(&priv->rx_tasklet);
2591
2592 /* Explicitly drain priv->rx_list */
2593 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
2594 list_del(&rx_data->list);
2595
2596 dev_kfree_skb(rx_data->skb);
2597 kfree(rx_data->desc);
2598 kfree(rx_data);
2599 }
2600
2601 unregister_pm_notifier(&priv->pm_notifier);
2602 orinoco_uncache_fw(priv);
2603
2604 priv->wpa_ie_len = 0;
2605 kfree(priv->wpa_ie);
2606 orinoco_mic_free(priv);
2607 orinoco_bss_data_free(priv);
2608 free_netdev(dev);
2609}
2610EXPORT_SYMBOL(free_orinocodev);
2611
2612static void orinoco_get_drvinfo(struct net_device *dev,
2613 struct ethtool_drvinfo *info)
2614{
2615 struct orinoco_private *priv = netdev_priv(dev);
2616
2617 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
2618 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
2619 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
2620 if (dev->dev.parent)
2621 strncpy(info->bus_info, dev_name(dev->dev.parent),
2622 sizeof(info->bus_info) - 1);
2623 else
2624 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
2625 "PCMCIA %p", priv->hw.iobase);
2626}
2627
2628static const struct ethtool_ops orinoco_ethtool_ops = {
2629 .get_drvinfo = orinoco_get_drvinfo,
2630 .get_link = ethtool_op_get_link,
2631};
2632
2633/********************************************************************/
2634/* Module initialization */
2635/********************************************************************/
2636
2637/* Can't be declared "const" or the whole __initdata section will
2638 * become const */
2639static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
2640 " (David Gibson <hermes@gibson.dropbear.id.au>, "
2641 "Pavel Roskin <proski@gnu.org>, et al)";
2642
2643static int __init init_orinoco(void)
2644{
2645 printk(KERN_DEBUG "%s\n", version);
2646 return 0;
2647}
2648
2649static void __exit exit_orinoco(void)
2650{
2651}
2652
2653module_init(init_orinoco);
2654module_exit(exit_orinoco);
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
new file mode 100644
index 00000000000..af2bae4fe39
--- /dev/null
+++ b/drivers/net/wireless/orinoco/main.h
@@ -0,0 +1,63 @@
1/* Exports from main to helper modules
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_MAIN_H_
6#define _ORINOCO_MAIN_H_
7
8#include <linux/ieee80211.h>
9#include "orinoco.h"
10
11/********************************************************************/
12/* Compile time configuration and compatibility stuff */
13/********************************************************************/
14
15/* We do this this way to avoid ifdefs in the actual code */
16#ifdef WIRELESS_SPY
17#define SPY_NUMBER(priv) (priv->spy_data.spy_number)
18#else
19#define SPY_NUMBER(priv) 0
20#endif /* WIRELESS_SPY */
21
22/********************************************************************/
23
24/* Export module parameter */
25extern int force_monitor;
26
27/* Forward declarations */
28struct net_device;
29struct work_struct;
30
31void set_port_type(struct orinoco_private *priv);
32int __orinoco_program_rids(struct net_device *dev);
33void orinoco_reset(struct work_struct *work);
34
35
36/* Information element helpers - find a home for these... */
37static inline u8 *orinoco_get_ie(u8 *data, size_t len,
38 enum ieee80211_eid eid)
39{
40 u8 *p = data;
41 while ((p + 2) < (data + len)) {
42 if (p[0] == eid)
43 return p;
44 p += p[1] + 2;
45 }
46 return NULL;
47}
48
49#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
50#define WPA_SELECTOR_LEN 4
51static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
52{
53 u8 *p = data;
54 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
55 if ((p[0] == WLAN_EID_GENERIC) &&
56 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
57 return p;
58 p += p[1] + 2;
59 }
60 return NULL;
61}
62
63#endif /* _ORINOCO_MAIN_H_ */
diff --git a/drivers/net/wireless/orinoco/mic.c b/drivers/net/wireless/orinoco/mic.c
new file mode 100644
index 00000000000..c03e7f54d1b
--- /dev/null
+++ b/drivers/net/wireless/orinoco/mic.c
@@ -0,0 +1,79 @@
1/* Orinoco MIC helpers
2 *
3 * See copyright notice in main.c
4 */
5#include <linux/kernel.h>
6#include <linux/string.h>
7#include <linux/if_ether.h>
8#include <linux/scatterlist.h>
9#include <linux/crypto.h>
10
11#include "orinoco.h"
12#include "mic.h"
13
14/********************************************************************/
15/* Michael MIC crypto setup */
16/********************************************************************/
17int orinoco_mic_init(struct orinoco_private *priv)
18{
19 priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
20 if (IS_ERR(priv->tx_tfm_mic)) {
21 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
22 "crypto API michael_mic\n");
23 priv->tx_tfm_mic = NULL;
24 return -ENOMEM;
25 }
26
27 priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
28 if (IS_ERR(priv->rx_tfm_mic)) {
29 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
30 "crypto API michael_mic\n");
31 priv->rx_tfm_mic = NULL;
32 return -ENOMEM;
33 }
34
35 return 0;
36}
37
38void orinoco_mic_free(struct orinoco_private *priv)
39{
40 if (priv->tx_tfm_mic)
41 crypto_free_hash(priv->tx_tfm_mic);
42 if (priv->rx_tfm_mic)
43 crypto_free_hash(priv->rx_tfm_mic);
44}
45
46int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
47 u8 *da, u8 *sa, u8 priority,
48 u8 *data, size_t data_len, u8 *mic)
49{
50 struct hash_desc desc;
51 struct scatterlist sg[2];
52 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
53
54 if (tfm_michael == NULL) {
55 printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
56 return -1;
57 }
58
59 /* Copy header into buffer. We need the padding on the end zeroed */
60 memcpy(&hdr[0], da, ETH_ALEN);
61 memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
62 hdr[ETH_ALEN*2] = priority;
63 hdr[ETH_ALEN*2+1] = 0;
64 hdr[ETH_ALEN*2+2] = 0;
65 hdr[ETH_ALEN*2+3] = 0;
66
67 /* Use scatter gather to MIC header and data in one go */
68 sg_init_table(sg, 2);
69 sg_set_buf(&sg[0], hdr, sizeof(hdr));
70 sg_set_buf(&sg[1], data, data_len);
71
72 if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
73 return -1;
74
75 desc.tfm = tfm_michael;
76 desc.flags = 0;
77 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
78 mic);
79}
diff --git a/drivers/net/wireless/orinoco/mic.h b/drivers/net/wireless/orinoco/mic.h
new file mode 100644
index 00000000000..04d05bc566d
--- /dev/null
+++ b/drivers/net/wireless/orinoco/mic.h
@@ -0,0 +1,22 @@
1/* Orinoco MIC helpers
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_MIC_H_
6#define _ORINOCO_MIC_H_
7
8#include <linux/types.h>
9
10#define MICHAEL_MIC_LEN 8
11
12/* Forward declarations */
13struct orinoco_private;
14struct crypto_hash;
15
16int orinoco_mic_init(struct orinoco_private *priv);
17void orinoco_mic_free(struct orinoco_private *priv);
18int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
19 u8 *da, u8 *sa, u8 priority,
20 u8 *data, size_t data_len, u8 *mic);
21
22#endif /* ORINOCO_MIC_H */
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
deleted file mode 100644
index 45a04faa781..00000000000
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ /dev/null
@@ -1,6148 +0,0 @@
1/* orinoco.c - (formerly known as dldwd_cs.c and orinoco_cs.c)
2 *
3 * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
4 * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
5 *
6 * Current maintainers (as of 29 September 2003) are:
7 * Pavel Roskin <proski AT gnu.org>
8 * and David Gibson <hermes AT gibson.dropbear.id.au>
9 *
10 * (C) Copyright David Gibson, IBM Corporation 2001-2003.
11 * Copyright (C) 2000 David Gibson, Linuxcare Australia.
12 * With some help from :
13 * Copyright (C) 2001 Jean Tourrilhes, HP Labs
14 * Copyright (C) 2001 Benjamin Herrenschmidt
15 *
16 * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
17 *
18 * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
19 * AT fasta.fh-dortmund.de>
20 * http://www.stud.fh-dortmund.de/~andy/wvlan/
21 *
22 * The contents of this file are subject to the Mozilla Public License
23 * Version 1.1 (the "License"); you may not use this file except in
24 * compliance with the License. You may obtain a copy of the License
25 * at http://www.mozilla.org/MPL/
26 *
27 * Software distributed under the License is distributed on an "AS IS"
28 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
29 * the License for the specific language governing rights and
30 * limitations under the License.
31 *
32 * The initial developer of the original code is David A. Hinds
33 * <dahinds AT users.sourceforge.net>. Portions created by David
34 * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
35 * Reserved.
36 *
37 * Alternatively, the contents of this file may be used under the
38 * terms of the GNU General Public License version 2 (the "GPL"), in
39 * which case the provisions of the GPL are applicable instead of the
40 * above. If you wish to allow the use of your version of this file
41 * only under the terms of the GPL and not to allow others to use your
42 * version of this file under the MPL, indicate your decision by
43 * deleting the provisions above and replace them with the notice and
44 * other provisions required by the GPL. If you do not delete the
45 * provisions above, a recipient may use your version of this file
46 * under either the MPL or the GPL. */
47
48/*
49 * TODO
50 * o Handle de-encapsulation within network layer, provide 802.11
51 * headers (patch from Thomas 'Dent' Mirlacher)
52 * o Fix possible races in SPY handling.
53 * o Disconnect wireless extensions from fundamental configuration.
54 * o (maybe) Software WEP support (patch from Stano Meduna).
55 * o (maybe) Use multiple Tx buffers - driver handling queue
56 * rather than firmware.
57 */
58
59/* Locking and synchronization:
60 *
61 * The basic principle is that everything is serialized through a
62 * single spinlock, priv->lock. The lock is used in user, bh and irq
63 * context, so when taken outside hardirq context it should always be
64 * taken with interrupts disabled. The lock protects both the
65 * hardware and the struct orinoco_private.
66 *
67 * Another flag, priv->hw_unavailable indicates that the hardware is
68 * unavailable for an extended period of time (e.g. suspended, or in
69 * the middle of a hard reset). This flag is protected by the
70 * spinlock. All code which touches the hardware should check the
71 * flag after taking the lock, and if it is set, give up on whatever
72 * they are doing and drop the lock again. The orinoco_lock()
73 * function handles this (it unlocks and returns -EBUSY if
74 * hw_unavailable is non-zero).
75 */
76
77#define DRIVER_NAME "orinoco"
78
79#include <linux/module.h>
80#include <linux/kernel.h>
81#include <linux/init.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/etherdevice.h>
85#include <linux/ethtool.h>
86#include <linux/firmware.h>
87#include <linux/suspend.h>
88#include <linux/if_arp.h>
89#include <linux/wireless.h>
90#include <linux/ieee80211.h>
91#include <net/iw_handler.h>
92
93#include <linux/scatterlist.h>
94#include <linux/crypto.h>
95
96#include "hermes_rid.h"
97#include "hermes_dld.h"
98#include "orinoco.h"
99
100/********************************************************************/
101/* Module information */
102/********************************************************************/
103
104MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>");
105MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based and similar wireless cards");
106MODULE_LICENSE("Dual MPL/GPL");
107
108/* Level of debugging. Used in the macros in orinoco.h */
109#ifdef ORINOCO_DEBUG
110int orinoco_debug = ORINOCO_DEBUG;
111module_param(orinoco_debug, int, 0644);
112MODULE_PARM_DESC(orinoco_debug, "Debug level");
113EXPORT_SYMBOL(orinoco_debug);
114#endif
115
116static int suppress_linkstatus; /* = 0 */
117module_param(suppress_linkstatus, bool, 0644);
118MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
119static int ignore_disconnect; /* = 0 */
120module_param(ignore_disconnect, int, 0644);
121MODULE_PARM_DESC(ignore_disconnect, "Don't report lost link to the network layer");
122
123static int force_monitor; /* = 0 */
124module_param(force_monitor, int, 0644);
125MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
126
127/********************************************************************/
128/* Compile time configuration and compatibility stuff */
129/********************************************************************/
130
131/* We do this this way to avoid ifdefs in the actual code */
132#ifdef WIRELESS_SPY
133#define SPY_NUMBER(priv) (priv->spy_data.spy_number)
134#else
135#define SPY_NUMBER(priv) 0
136#endif /* WIRELESS_SPY */
137
138/********************************************************************/
139/* Internal constants */
140/********************************************************************/
141
142/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
143static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
144#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
145
146#define ORINOCO_MIN_MTU 256
147#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
148
149#define SYMBOL_MAX_VER_LEN (14)
150#define USER_BAP 0
151#define IRQ_BAP 1
152#define MAX_IRQLOOPS_PER_IRQ 10
153#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
154 * how many events the
155 * device could
156 * legitimately generate */
157#define SMALL_KEY_SIZE 5
158#define LARGE_KEY_SIZE 13
159#define TX_NICBUF_SIZE_BUG 1585 /* Bug in Symbol firmware */
160
161#define DUMMY_FID 0xFFFF
162
163/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
164 HERMES_MAX_MULTICAST : 0)*/
165#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
166
167#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
168 | HERMES_EV_TX | HERMES_EV_TXEXC \
169 | HERMES_EV_WTERR | HERMES_EV_INFO \
170 | HERMES_EV_INFDROP )
171
172#define MAX_RID_LEN 1024
173
174static const struct iw_handler_def orinoco_handler_def;
175static const struct ethtool_ops orinoco_ethtool_ops;
176
177/********************************************************************/
178/* Data tables */
179/********************************************************************/
180
181/* The frequency of each channel in MHz */
182static const long channel_frequency[] = {
183 2412, 2417, 2422, 2427, 2432, 2437, 2442,
184 2447, 2452, 2457, 2462, 2467, 2472, 2484
185};
186#define NUM_CHANNELS ARRAY_SIZE(channel_frequency)
187
188/* This tables gives the actual meanings of the bitrate IDs returned
189 * by the firmware. */
190static struct {
191 int bitrate; /* in 100s of kilobits */
192 int automatic;
193 u16 agere_txratectrl;
194 u16 intersil_txratectrl;
195} bitrate_table[] = {
196 {110, 1, 3, 15}, /* Entry 0 is the default */
197 {10, 0, 1, 1},
198 {10, 1, 1, 1},
199 {20, 0, 2, 2},
200 {20, 1, 6, 3},
201 {55, 0, 4, 4},
202 {55, 1, 7, 7},
203 {110, 0, 5, 8},
204};
205#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
206
207/********************************************************************/
208/* Data types */
209/********************************************************************/
210
211/* Beginning of the Tx descriptor, used in TxExc handling */
212struct hermes_txexc_data {
213 struct hermes_tx_descriptor desc;
214 __le16 frame_ctl;
215 __le16 duration_id;
216 u8 addr1[ETH_ALEN];
217} __attribute__ ((packed));
218
219/* Rx frame header except compatibility 802.3 header */
220struct hermes_rx_descriptor {
221 /* Control */
222 __le16 status;
223 __le32 time;
224 u8 silence;
225 u8 signal;
226 u8 rate;
227 u8 rxflow;
228 __le32 reserved;
229
230 /* 802.11 header */
231 __le16 frame_ctl;
232 __le16 duration_id;
233 u8 addr1[ETH_ALEN];
234 u8 addr2[ETH_ALEN];
235 u8 addr3[ETH_ALEN];
236 __le16 seq_ctl;
237 u8 addr4[ETH_ALEN];
238
239 /* Data length */
240 __le16 data_len;
241} __attribute__ ((packed));
242
243/********************************************************************/
244/* Function prototypes */
245/********************************************************************/
246
247static int __orinoco_program_rids(struct net_device *dev);
248static void __orinoco_set_multicast_list(struct net_device *dev);
249
250/********************************************************************/
251/* Michael MIC crypto setup */
252/********************************************************************/
253#define MICHAEL_MIC_LEN 8
254static int orinoco_mic_init(struct orinoco_private *priv)
255{
256 priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
257 if (IS_ERR(priv->tx_tfm_mic)) {
258 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
259 "crypto API michael_mic\n");
260 priv->tx_tfm_mic = NULL;
261 return -ENOMEM;
262 }
263
264 priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
265 if (IS_ERR(priv->rx_tfm_mic)) {
266 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
267 "crypto API michael_mic\n");
268 priv->rx_tfm_mic = NULL;
269 return -ENOMEM;
270 }
271
272 return 0;
273}
274
275static void orinoco_mic_free(struct orinoco_private *priv)
276{
277 if (priv->tx_tfm_mic)
278 crypto_free_hash(priv->tx_tfm_mic);
279 if (priv->rx_tfm_mic)
280 crypto_free_hash(priv->rx_tfm_mic);
281}
282
283static int michael_mic(struct crypto_hash *tfm_michael, u8 *key,
284 u8 *da, u8 *sa, u8 priority,
285 u8 *data, size_t data_len, u8 *mic)
286{
287 struct hash_desc desc;
288 struct scatterlist sg[2];
289 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
290
291 if (tfm_michael == NULL) {
292 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
293 return -1;
294 }
295
296 /* Copy header into buffer. We need the padding on the end zeroed */
297 memcpy(&hdr[0], da, ETH_ALEN);
298 memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
299 hdr[ETH_ALEN*2] = priority;
300 hdr[ETH_ALEN*2+1] = 0;
301 hdr[ETH_ALEN*2+2] = 0;
302 hdr[ETH_ALEN*2+3] = 0;
303
304 /* Use scatter gather to MIC header and data in one go */
305 sg_init_table(sg, 2);
306 sg_set_buf(&sg[0], hdr, sizeof(hdr));
307 sg_set_buf(&sg[1], data, data_len);
308
309 if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
310 return -1;
311
312 desc.tfm = tfm_michael;
313 desc.flags = 0;
314 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
315 mic);
316}
317
318/********************************************************************/
319/* Internal helper functions */
320/********************************************************************/
321
322static inline void set_port_type(struct orinoco_private *priv)
323{
324 switch (priv->iw_mode) {
325 case IW_MODE_INFRA:
326 priv->port_type = 1;
327 priv->createibss = 0;
328 break;
329 case IW_MODE_ADHOC:
330 if (priv->prefer_port3) {
331 priv->port_type = 3;
332 priv->createibss = 0;
333 } else {
334 priv->port_type = priv->ibss_port;
335 priv->createibss = 1;
336 }
337 break;
338 case IW_MODE_MONITOR:
339 priv->port_type = 3;
340 priv->createibss = 0;
341 break;
342 default:
343 printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
344 priv->ndev->name);
345 }
346}
347
348#define ORINOCO_MAX_BSS_COUNT 64
349static int orinoco_bss_data_allocate(struct orinoco_private *priv)
350{
351 if (priv->bss_xbss_data)
352 return 0;
353
354 if (priv->has_ext_scan)
355 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
356 sizeof(struct xbss_element),
357 GFP_KERNEL);
358 else
359 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
360 sizeof(struct bss_element),
361 GFP_KERNEL);
362
363 if (!priv->bss_xbss_data) {
364 printk(KERN_WARNING "Out of memory allocating beacons");
365 return -ENOMEM;
366 }
367 return 0;
368}
369
370static void orinoco_bss_data_free(struct orinoco_private *priv)
371{
372 kfree(priv->bss_xbss_data);
373 priv->bss_xbss_data = NULL;
374}
375
376#define PRIV_BSS ((struct bss_element *)priv->bss_xbss_data)
377#define PRIV_XBSS ((struct xbss_element *)priv->bss_xbss_data)
378static void orinoco_bss_data_init(struct orinoco_private *priv)
379{
380 int i;
381
382 INIT_LIST_HEAD(&priv->bss_free_list);
383 INIT_LIST_HEAD(&priv->bss_list);
384 if (priv->has_ext_scan)
385 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
386 list_add_tail(&(PRIV_XBSS[i].list),
387 &priv->bss_free_list);
388 else
389 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
390 list_add_tail(&(PRIV_BSS[i].list),
391 &priv->bss_free_list);
392
393}
394
395static inline u8 *orinoco_get_ie(u8 *data, size_t len,
396 enum ieee80211_eid eid)
397{
398 u8 *p = data;
399 while ((p + 2) < (data + len)) {
400 if (p[0] == eid)
401 return p;
402 p += p[1] + 2;
403 }
404 return NULL;
405}
406
407#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
408#define WPA_SELECTOR_LEN 4
409static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
410{
411 u8 *p = data;
412 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
413 if ((p[0] == WLAN_EID_GENERIC) &&
414 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
415 return p;
416 p += p[1] + 2;
417 }
418 return NULL;
419}
420
421
422/********************************************************************/
423/* Download functionality */
424/********************************************************************/
425
426struct fw_info {
427 char *pri_fw;
428 char *sta_fw;
429 char *ap_fw;
430 u32 pda_addr;
431 u16 pda_size;
432};
433
434const static struct fw_info orinoco_fw[] = {
435 { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
436 { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
437 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
438};
439
440/* Structure used to access fields in FW
441 * Make sure LE decoding macros are used
442 */
443struct orinoco_fw_header {
444 char hdr_vers[6]; /* ASCII string for header version */
445 __le16 headersize; /* Total length of header */
446 __le32 entry_point; /* NIC entry point */
447 __le32 blocks; /* Number of blocks to program */
448 __le32 block_offset; /* Offset of block data from eof header */
449 __le32 pdr_offset; /* Offset to PDR data from eof header */
450 __le32 pri_offset; /* Offset to primary plug data */
451 __le32 compat_offset; /* Offset to compatibility data*/
452 char signature[0]; /* FW signature length headersize-20 */
453} __attribute__ ((packed));
454
455/* Download either STA or AP firmware into the card. */
456static int
457orinoco_dl_firmware(struct orinoco_private *priv,
458 const struct fw_info *fw,
459 int ap)
460{
461 /* Plug Data Area (PDA) */
462 __le16 *pda;
463
464 hermes_t *hw = &priv->hw;
465 const struct firmware *fw_entry;
466 const struct orinoco_fw_header *hdr;
467 const unsigned char *first_block;
468 const unsigned char *end;
469 const char *firmware;
470 struct net_device *dev = priv->ndev;
471 int err = 0;
472
473 pda = kzalloc(fw->pda_size, GFP_KERNEL);
474 if (!pda)
475 return -ENOMEM;
476
477 if (ap)
478 firmware = fw->ap_fw;
479 else
480 firmware = fw->sta_fw;
481
482 printk(KERN_DEBUG "%s: Attempting to download firmware %s\n",
483 dev->name, firmware);
484
485 /* Read current plug data */
486 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
487 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
488 if (err)
489 goto free;
490
491 if (!priv->cached_fw) {
492 err = request_firmware(&fw_entry, firmware, priv->dev);
493
494 if (err) {
495 printk(KERN_ERR "%s: Cannot find firmware %s\n",
496 dev->name, firmware);
497 err = -ENOENT;
498 goto free;
499 }
500 } else
501 fw_entry = priv->cached_fw;
502
503 hdr = (const struct orinoco_fw_header *) fw_entry->data;
504
505 /* Enable aux port to allow programming */
506 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
507 printk(KERN_DEBUG "%s: Program init returned %d\n", dev->name, err);
508 if (err != 0)
509 goto abort;
510
511 /* Program data */
512 first_block = (fw_entry->data +
513 le16_to_cpu(hdr->headersize) +
514 le32_to_cpu(hdr->block_offset));
515 end = fw_entry->data + fw_entry->size;
516
517 err = hermes_program(hw, first_block, end);
518 printk(KERN_DEBUG "%s: Program returned %d\n", dev->name, err);
519 if (err != 0)
520 goto abort;
521
522 /* Update production data */
523 first_block = (fw_entry->data +
524 le16_to_cpu(hdr->headersize) +
525 le32_to_cpu(hdr->pdr_offset));
526
527 err = hermes_apply_pda_with_defaults(hw, first_block, pda);
528 printk(KERN_DEBUG "%s: Apply PDA returned %d\n", dev->name, err);
529 if (err)
530 goto abort;
531
532 /* Tell card we've finished */
533 err = hermesi_program_end(hw);
534 printk(KERN_DEBUG "%s: Program end returned %d\n", dev->name, err);
535 if (err != 0)
536 goto abort;
537
538 /* Check if we're running */
539 printk(KERN_DEBUG "%s: hermes_present returned %d\n",
540 dev->name, hermes_present(hw));
541
542abort:
543 /* If we requested the firmware, release it. */
544 if (!priv->cached_fw)
545 release_firmware(fw_entry);
546
547free:
548 kfree(pda);
549 return err;
550}
551
552/* End markers */
553#define TEXT_END 0x1A /* End of text header */
554
555/*
556 * Process a firmware image - stop the card, load the firmware, reset
557 * the card and make sure it responds. For the secondary firmware take
558 * care of the PDA - read it and then write it on top of the firmware.
559 */
560static int
561symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
562 const unsigned char *image, const unsigned char *end,
563 int secondary)
564{
565 hermes_t *hw = &priv->hw;
566 int ret = 0;
567 const unsigned char *ptr;
568 const unsigned char *first_block;
569
570 /* Plug Data Area (PDA) */
571 __le16 *pda = NULL;
572
573 /* Binary block begins after the 0x1A marker */
574 ptr = image;
575 while (*ptr++ != TEXT_END);
576 first_block = ptr;
577
578 /* Read the PDA from EEPROM */
579 if (secondary) {
580 pda = kzalloc(fw->pda_size, GFP_KERNEL);
581 if (!pda)
582 return -ENOMEM;
583
584 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
585 if (ret)
586 goto free;
587 }
588
589 /* Stop the firmware, so that it can be safely rewritten */
590 if (priv->stop_fw) {
591 ret = priv->stop_fw(priv, 1);
592 if (ret)
593 goto free;
594 }
595
596 /* Program the adapter with new firmware */
597 ret = hermes_program(hw, first_block, end);
598 if (ret)
599 goto free;
600
601 /* Write the PDA to the adapter */
602 if (secondary) {
603 size_t len = hermes_blocks_length(first_block);
604 ptr = first_block + len;
605 ret = hermes_apply_pda(hw, ptr, pda);
606 kfree(pda);
607 if (ret)
608 return ret;
609 }
610
611 /* Run the firmware */
612 if (priv->stop_fw) {
613 ret = priv->stop_fw(priv, 0);
614 if (ret)
615 return ret;
616 }
617
618 /* Reset hermes chip and make sure it responds */
619 ret = hermes_init(hw);
620
621 /* hermes_reset() should return 0 with the secondary firmware */
622 if (secondary && ret != 0)
623 return -ENODEV;
624
625 /* And this should work with any firmware */
626 if (!hermes_present(hw))
627 return -ENODEV;
628
629 return 0;
630
631free:
632 kfree(pda);
633 return ret;
634}
635
636
637/*
638 * Download the firmware into the card, this also does a PCMCIA soft
639 * reset on the card, to make sure it's in a sane state.
640 */
641static int
642symbol_dl_firmware(struct orinoco_private *priv,
643 const struct fw_info *fw)
644{
645 struct net_device *dev = priv->ndev;
646 int ret;
647 const struct firmware *fw_entry;
648
649 if (!priv->cached_pri_fw) {
650 if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) {
651 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
652 dev->name, fw->pri_fw);
653 return -ENOENT;
654 }
655 } else
656 fw_entry = priv->cached_pri_fw;
657
658 /* Load primary firmware */
659 ret = symbol_dl_image(priv, fw, fw_entry->data,
660 fw_entry->data + fw_entry->size, 0);
661
662 if (!priv->cached_pri_fw)
663 release_firmware(fw_entry);
664 if (ret) {
665 printk(KERN_ERR "%s: Primary firmware download failed\n",
666 dev->name);
667 return ret;
668 }
669
670 if (!priv->cached_fw) {
671 if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) {
672 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
673 dev->name, fw->sta_fw);
674 return -ENOENT;
675 }
676 } else
677 fw_entry = priv->cached_fw;
678
679 /* Load secondary firmware */
680 ret = symbol_dl_image(priv, fw, fw_entry->data,
681 fw_entry->data + fw_entry->size, 1);
682 if (!priv->cached_fw)
683 release_firmware(fw_entry);
684 if (ret) {
685 printk(KERN_ERR "%s: Secondary firmware download failed\n",
686 dev->name);
687 }
688
689 return ret;
690}
691
692static int orinoco_download(struct orinoco_private *priv)
693{
694 int err = 0;
695 /* Reload firmware */
696 switch (priv->firmware_type) {
697 case FIRMWARE_TYPE_AGERE:
698 /* case FIRMWARE_TYPE_INTERSIL: */
699 err = orinoco_dl_firmware(priv,
700 &orinoco_fw[priv->firmware_type], 0);
701 break;
702
703 case FIRMWARE_TYPE_SYMBOL:
704 err = symbol_dl_firmware(priv,
705 &orinoco_fw[priv->firmware_type]);
706 break;
707 case FIRMWARE_TYPE_INTERSIL:
708 break;
709 }
710 /* TODO: if we fail we probably need to reinitialise
711 * the driver */
712
713 return err;
714}
715
716#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
717static void orinoco_cache_fw(struct orinoco_private *priv, int ap)
718{
719 const struct firmware *fw_entry = NULL;
720 const char *pri_fw;
721 const char *fw;
722
723 pri_fw = orinoco_fw[priv->firmware_type].pri_fw;
724 if (ap)
725 fw = orinoco_fw[priv->firmware_type].ap_fw;
726 else
727 fw = orinoco_fw[priv->firmware_type].sta_fw;
728
729 if (pri_fw) {
730 if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0)
731 priv->cached_pri_fw = fw_entry;
732 }
733
734 if (fw) {
735 if (request_firmware(&fw_entry, fw, priv->dev) == 0)
736 priv->cached_fw = fw_entry;
737 }
738}
739
740static void orinoco_uncache_fw(struct orinoco_private *priv)
741{
742 if (priv->cached_pri_fw)
743 release_firmware(priv->cached_pri_fw);
744 if (priv->cached_fw)
745 release_firmware(priv->cached_fw);
746
747 priv->cached_pri_fw = NULL;
748 priv->cached_fw = NULL;
749}
750#else
751#define orinoco_cache_fw(priv, ap)
752#define orinoco_uncache_fw(priv)
753#endif
754
755/********************************************************************/
756/* Device methods */
757/********************************************************************/
758
759static int orinoco_open(struct net_device *dev)
760{
761 struct orinoco_private *priv = netdev_priv(dev);
762 unsigned long flags;
763 int err;
764
765 if (orinoco_lock(priv, &flags) != 0)
766 return -EBUSY;
767
768 err = __orinoco_up(dev);
769
770 if (! err)
771 priv->open = 1;
772
773 orinoco_unlock(priv, &flags);
774
775 return err;
776}
777
778static int orinoco_stop(struct net_device *dev)
779{
780 struct orinoco_private *priv = netdev_priv(dev);
781 int err = 0;
782
783 /* We mustn't use orinoco_lock() here, because we need to be
784 able to close the interface even if hw_unavailable is set
785 (e.g. as we're released after a PC Card removal) */
786 spin_lock_irq(&priv->lock);
787
788 priv->open = 0;
789
790 err = __orinoco_down(dev);
791
792 spin_unlock_irq(&priv->lock);
793
794 return err;
795}
796
797static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
798{
799 struct orinoco_private *priv = netdev_priv(dev);
800
801 return &priv->stats;
802}
803
804static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
805{
806 struct orinoco_private *priv = netdev_priv(dev);
807 hermes_t *hw = &priv->hw;
808 struct iw_statistics *wstats = &priv->wstats;
809 int err;
810 unsigned long flags;
811
812 if (! netif_device_present(dev)) {
813 printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
814 dev->name);
815 return NULL; /* FIXME: Can we do better than this? */
816 }
817
818 /* If busy, return the old stats. Returning NULL may cause
819 * the interface to disappear from /proc/net/wireless */
820 if (orinoco_lock(priv, &flags) != 0)
821 return wstats;
822
823 /* We can't really wait for the tallies inquiry command to
824 * complete, so we just use the previous results and trigger
825 * a new tallies inquiry command for next time - Jean II */
826 /* FIXME: Really we should wait for the inquiry to come back -
827 * as it is the stats we give don't make a whole lot of sense.
828 * Unfortunately, it's not clear how to do that within the
829 * wireless extensions framework: I think we're in user
830 * context, but a lock seems to be held by the time we get in
831 * here so we're not safe to sleep here. */
832 hermes_inquire(hw, HERMES_INQ_TALLIES);
833
834 if (priv->iw_mode == IW_MODE_ADHOC) {
835 memset(&wstats->qual, 0, sizeof(wstats->qual));
836 /* If a spy address is defined, we report stats of the
837 * first spy address - Jean II */
838 if (SPY_NUMBER(priv)) {
839 wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
840 wstats->qual.level = priv->spy_data.spy_stat[0].level;
841 wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
842 wstats->qual.updated = priv->spy_data.spy_stat[0].updated;
843 }
844 } else {
845 struct {
846 __le16 qual, signal, noise, unused;
847 } __attribute__ ((packed)) cq;
848
849 err = HERMES_READ_RECORD(hw, USER_BAP,
850 HERMES_RID_COMMSQUALITY, &cq);
851
852 if (!err) {
853 wstats->qual.qual = (int)le16_to_cpu(cq.qual);
854 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
855 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
856 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
857 }
858 }
859
860 orinoco_unlock(priv, &flags);
861 return wstats;
862}
863
864static void orinoco_set_multicast_list(struct net_device *dev)
865{
866 struct orinoco_private *priv = netdev_priv(dev);
867 unsigned long flags;
868
869 if (orinoco_lock(priv, &flags) != 0) {
870 printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
871 "called when hw_unavailable\n", dev->name);
872 return;
873 }
874
875 __orinoco_set_multicast_list(dev);
876 orinoco_unlock(priv, &flags);
877}
878
879static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
880{
881 struct orinoco_private *priv = netdev_priv(dev);
882
883 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
884 return -EINVAL;
885
886 /* MTU + encapsulation + header length */
887 if ( (new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
888 (priv->nicbuf_size - ETH_HLEN) )
889 return -EINVAL;
890
891 dev->mtu = new_mtu;
892
893 return 0;
894}
895
896/********************************************************************/
897/* Tx path */
898/********************************************************************/
899
900static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
901{
902 struct orinoco_private *priv = netdev_priv(dev);
903 struct net_device_stats *stats = &priv->stats;
904 hermes_t *hw = &priv->hw;
905 int err = 0;
906 u16 txfid = priv->txfid;
907 struct ethhdr *eh;
908 int tx_control;
909 unsigned long flags;
910
911 if (! netif_running(dev)) {
912 printk(KERN_ERR "%s: Tx on stopped device!\n",
913 dev->name);
914 return NETDEV_TX_BUSY;
915 }
916
917 if (netif_queue_stopped(dev)) {
918 printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
919 dev->name);
920 return NETDEV_TX_BUSY;
921 }
922
923 if (orinoco_lock(priv, &flags) != 0) {
924 printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
925 dev->name);
926 return NETDEV_TX_BUSY;
927 }
928
929 if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
930 /* Oops, the firmware hasn't established a connection,
931 silently drop the packet (this seems to be the
932 safest approach). */
933 goto drop;
934 }
935
936 /* Check packet length */
937 if (skb->len < ETH_HLEN)
938 goto drop;
939
940 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
941
942 if (priv->encode_alg == IW_ENCODE_ALG_TKIP)
943 tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
944 HERMES_TXCTRL_MIC;
945
946 if (priv->has_alt_txcntl) {
947 /* WPA enabled firmwares have tx_cntl at the end of
948 * the 802.11 header. So write zeroed descriptor and
949 * 802.11 header at the same time
950 */
951 char desc[HERMES_802_3_OFFSET];
952 __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
953
954 memset(&desc, 0, sizeof(desc));
955
956 *txcntl = cpu_to_le16(tx_control);
957 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
958 txfid, 0);
959 if (err) {
960 if (net_ratelimit())
961 printk(KERN_ERR "%s: Error %d writing Tx "
962 "descriptor to BAP\n", dev->name, err);
963 goto busy;
964 }
965 } else {
966 struct hermes_tx_descriptor desc;
967
968 memset(&desc, 0, sizeof(desc));
969
970 desc.tx_control = cpu_to_le16(tx_control);
971 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
972 txfid, 0);
973 if (err) {
974 if (net_ratelimit())
975 printk(KERN_ERR "%s: Error %d writing Tx "
976 "descriptor to BAP\n", dev->name, err);
977 goto busy;
978 }
979
980 /* Clear the 802.11 header and data length fields - some
981 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
982 * if this isn't done. */
983 hermes_clear_words(hw, HERMES_DATA0,
984 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
985 }
986
987 eh = (struct ethhdr *)skb->data;
988
989 /* Encapsulate Ethernet-II frames */
990 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
991 struct header_struct {
992 struct ethhdr eth; /* 802.3 header */
993 u8 encap[6]; /* 802.2 header */
994 } __attribute__ ((packed)) hdr;
995
996 /* Strip destination and source from the data */
997 skb_pull(skb, 2 * ETH_ALEN);
998
999 /* And move them to a separate header */
1000 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
1001 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
1002 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
1003
1004 /* Insert the SNAP header */
1005 if (skb_headroom(skb) < sizeof(hdr)) {
1006 printk(KERN_ERR
1007 "%s: Not enough headroom for 802.2 headers %d\n",
1008 dev->name, skb_headroom(skb));
1009 goto drop;
1010 }
1011 eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
1012 memcpy(eh, &hdr, sizeof(hdr));
1013 }
1014
1015 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
1016 txfid, HERMES_802_3_OFFSET);
1017 if (err) {
1018 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
1019 dev->name, err);
1020 goto busy;
1021 }
1022
1023 /* Calculate Michael MIC */
1024 if (priv->encode_alg == IW_ENCODE_ALG_TKIP) {
1025 u8 mic_buf[MICHAEL_MIC_LEN + 1];
1026 u8 *mic;
1027 size_t offset;
1028 size_t len;
1029
1030 if (skb->len % 2) {
1031 /* MIC start is on an odd boundary */
1032 mic_buf[0] = skb->data[skb->len - 1];
1033 mic = &mic_buf[1];
1034 offset = skb->len - 1;
1035 len = MICHAEL_MIC_LEN + 1;
1036 } else {
1037 mic = &mic_buf[0];
1038 offset = skb->len;
1039 len = MICHAEL_MIC_LEN;
1040 }
1041
1042 michael_mic(priv->tx_tfm_mic,
1043 priv->tkip_key[priv->tx_key].tx_mic,
1044 eh->h_dest, eh->h_source, 0 /* priority */,
1045 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
1046
1047 /* Write the MIC */
1048 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
1049 txfid, HERMES_802_3_OFFSET + offset);
1050 if (err) {
1051 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
1052 dev->name, err);
1053 goto busy;
1054 }
1055 }
1056
1057 /* Finally, we actually initiate the send */
1058 netif_stop_queue(dev);
1059
1060 err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
1061 txfid, NULL);
1062 if (err) {
1063 netif_start_queue(dev);
1064 if (net_ratelimit())
1065 printk(KERN_ERR "%s: Error %d transmitting packet\n",
1066 dev->name, err);
1067 goto busy;
1068 }
1069
1070 dev->trans_start = jiffies;
1071 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
1072 goto ok;
1073
1074 drop:
1075 stats->tx_errors++;
1076 stats->tx_dropped++;
1077
1078 ok:
1079 orinoco_unlock(priv, &flags);
1080 dev_kfree_skb(skb);
1081 return NETDEV_TX_OK;
1082
1083 busy:
1084 if (err == -EIO)
1085 schedule_work(&priv->reset_work);
1086 orinoco_unlock(priv, &flags);
1087 return NETDEV_TX_BUSY;
1088}
1089
1090static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
1091{
1092 struct orinoco_private *priv = netdev_priv(dev);
1093 u16 fid = hermes_read_regn(hw, ALLOCFID);
1094
1095 if (fid != priv->txfid) {
1096 if (fid != DUMMY_FID)
1097 printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
1098 dev->name, fid);
1099 return;
1100 }
1101
1102 hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
1103}
1104
1105static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
1106{
1107 struct orinoco_private *priv = netdev_priv(dev);
1108 struct net_device_stats *stats = &priv->stats;
1109
1110 stats->tx_packets++;
1111
1112 netif_wake_queue(dev);
1113
1114 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
1115}
1116
1117static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
1118{
1119 struct orinoco_private *priv = netdev_priv(dev);
1120 struct net_device_stats *stats = &priv->stats;
1121 u16 fid = hermes_read_regn(hw, TXCOMPLFID);
1122 u16 status;
1123 struct hermes_txexc_data hdr;
1124 int err = 0;
1125
1126 if (fid == DUMMY_FID)
1127 return; /* Nothing's really happened */
1128
1129 /* Read part of the frame header - we need status and addr1 */
1130 err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
1131 sizeof(struct hermes_txexc_data),
1132 fid, 0);
1133
1134 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
1135 stats->tx_errors++;
1136
1137 if (err) {
1138 printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
1139 "(FID=%04X error %d)\n",
1140 dev->name, fid, err);
1141 return;
1142 }
1143
1144 DEBUG(1, "%s: Tx error, err %d (FID=%04X)\n", dev->name,
1145 err, fid);
1146
1147 /* We produce a TXDROP event only for retry or lifetime
1148 * exceeded, because that's the only status that really mean
1149 * that this particular node went away.
1150 * Other errors means that *we* screwed up. - Jean II */
1151 status = le16_to_cpu(hdr.desc.status);
1152 if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
1153 union iwreq_data wrqu;
1154
1155 /* Copy 802.11 dest address.
1156 * We use the 802.11 header because the frame may
1157 * not be 802.3 or may be mangled...
1158 * In Ad-Hoc mode, it will be the node address.
1159 * In managed mode, it will be most likely the AP addr
1160 * User space will figure out how to convert it to
1161 * whatever it needs (IP address or else).
1162 * - Jean II */
1163 memcpy(wrqu.addr.sa_data, hdr.addr1, ETH_ALEN);
1164 wrqu.addr.sa_family = ARPHRD_ETHER;
1165
1166 /* Send event to user space */
1167 wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
1168 }
1169
1170 netif_wake_queue(dev);
1171}
1172
1173static void orinoco_tx_timeout(struct net_device *dev)
1174{
1175 struct orinoco_private *priv = netdev_priv(dev);
1176 struct net_device_stats *stats = &priv->stats;
1177 struct hermes *hw = &priv->hw;
1178
1179 printk(KERN_WARNING "%s: Tx timeout! "
1180 "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
1181 dev->name, hermes_read_regn(hw, ALLOCFID),
1182 hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
1183
1184 stats->tx_errors++;
1185
1186 schedule_work(&priv->reset_work);
1187}
1188
1189/********************************************************************/
1190/* Rx path (data frames) */
1191/********************************************************************/
1192
1193/* Does the frame have a SNAP header indicating it should be
1194 * de-encapsulated to Ethernet-II? */
1195static inline int is_ethersnap(void *_hdr)
1196{
1197 u8 *hdr = _hdr;
1198
1199 /* We de-encapsulate all packets which, a) have SNAP headers
1200 * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
1201 * and where b) the OUI of the SNAP header is 00:00:00 or
1202 * 00:00:f8 - we need both because different APs appear to use
1203 * different OUIs for some reason */
1204 return (memcmp(hdr, &encaps_hdr, 5) == 0)
1205 && ( (hdr[5] == 0x00) || (hdr[5] == 0xf8) );
1206}
1207
1208static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
1209 int level, int noise)
1210{
1211 struct iw_quality wstats;
1212 wstats.level = level - 0x95;
1213 wstats.noise = noise - 0x95;
1214 wstats.qual = (level > noise) ? (level - noise) : 0;
1215 wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1216 /* Update spy records */
1217 wireless_spy_update(dev, mac, &wstats);
1218}
1219
1220static void orinoco_stat_gather(struct net_device *dev,
1221 struct sk_buff *skb,
1222 struct hermes_rx_descriptor *desc)
1223{
1224 struct orinoco_private *priv = netdev_priv(dev);
1225
1226 /* Using spy support with lots of Rx packets, like in an
1227 * infrastructure (AP), will really slow down everything, because
1228 * the MAC address must be compared to each entry of the spy list.
1229 * If the user really asks for it (set some address in the
1230 * spy list), we do it, but he will pay the price.
1231 * Note that to get here, you need both WIRELESS_SPY
1232 * compiled in AND some addresses in the list !!!
1233 */
1234 /* Note : gcc will optimise the whole section away if
1235 * WIRELESS_SPY is not defined... - Jean II */
1236 if (SPY_NUMBER(priv)) {
1237 orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
1238 desc->signal, desc->silence);
1239 }
1240}
1241
1242/*
1243 * orinoco_rx_monitor - handle received monitor frames.
1244 *
1245 * Arguments:
1246 * dev network device
1247 * rxfid received FID
1248 * desc rx descriptor of the frame
1249 *
1250 * Call context: interrupt
1251 */
1252static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
1253 struct hermes_rx_descriptor *desc)
1254{
1255 u32 hdrlen = 30; /* return full header by default */
1256 u32 datalen = 0;
1257 u16 fc;
1258 int err;
1259 int len;
1260 struct sk_buff *skb;
1261 struct orinoco_private *priv = netdev_priv(dev);
1262 struct net_device_stats *stats = &priv->stats;
1263 hermes_t *hw = &priv->hw;
1264
1265 len = le16_to_cpu(desc->data_len);
1266
1267 /* Determine the size of the header and the data */
1268 fc = le16_to_cpu(desc->frame_ctl);
1269 switch (fc & IEEE80211_FCTL_FTYPE) {
1270 case IEEE80211_FTYPE_DATA:
1271 if ((fc & IEEE80211_FCTL_TODS)
1272 && (fc & IEEE80211_FCTL_FROMDS))
1273 hdrlen = 30;
1274 else
1275 hdrlen = 24;
1276 datalen = len;
1277 break;
1278 case IEEE80211_FTYPE_MGMT:
1279 hdrlen = 24;
1280 datalen = len;
1281 break;
1282 case IEEE80211_FTYPE_CTL:
1283 switch (fc & IEEE80211_FCTL_STYPE) {
1284 case IEEE80211_STYPE_PSPOLL:
1285 case IEEE80211_STYPE_RTS:
1286 case IEEE80211_STYPE_CFEND:
1287 case IEEE80211_STYPE_CFENDACK:
1288 hdrlen = 16;
1289 break;
1290 case IEEE80211_STYPE_CTS:
1291 case IEEE80211_STYPE_ACK:
1292 hdrlen = 10;
1293 break;
1294 }
1295 break;
1296 default:
1297 /* Unknown frame type */
1298 break;
1299 }
1300
1301 /* sanity check the length */
1302 if (datalen > IEEE80211_MAX_DATA_LEN + 12) {
1303 printk(KERN_DEBUG "%s: oversized monitor frame, "
1304 "data length = %d\n", dev->name, datalen);
1305 stats->rx_length_errors++;
1306 goto update_stats;
1307 }
1308
1309 skb = dev_alloc_skb(hdrlen + datalen);
1310 if (!skb) {
1311 printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n",
1312 dev->name);
1313 goto update_stats;
1314 }
1315
1316 /* Copy the 802.11 header to the skb */
1317 memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
1318 skb_reset_mac_header(skb);
1319
1320 /* If any, copy the data from the card to the skb */
1321 if (datalen > 0) {
1322 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
1323 ALIGN(datalen, 2), rxfid,
1324 HERMES_802_2_OFFSET);
1325 if (err) {
1326 printk(KERN_ERR "%s: error %d reading monitor frame\n",
1327 dev->name, err);
1328 goto drop;
1329 }
1330 }
1331
1332 skb->dev = dev;
1333 skb->ip_summed = CHECKSUM_NONE;
1334 skb->pkt_type = PACKET_OTHERHOST;
1335 skb->protocol = __constant_htons(ETH_P_802_2);
1336
1337 stats->rx_packets++;
1338 stats->rx_bytes += skb->len;
1339
1340 netif_rx(skb);
1341 return;
1342
1343 drop:
1344 dev_kfree_skb_irq(skb);
1345 update_stats:
1346 stats->rx_errors++;
1347 stats->rx_dropped++;
1348}
1349
1350/* Get tsc from the firmware */
1351static int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key,
1352 u8 *tsc)
1353{
1354 hermes_t *hw = &priv->hw;
1355 int err = 0;
1356 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
1357
1358 if ((key < 0) || (key > 4))
1359 return -EINVAL;
1360
1361 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
1362 sizeof(tsc_arr), NULL, &tsc_arr);
1363 if (!err)
1364 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
1365
1366 return err;
1367}
1368
1369static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
1370{
1371 struct orinoco_private *priv = netdev_priv(dev);
1372 struct net_device_stats *stats = &priv->stats;
1373 struct iw_statistics *wstats = &priv->wstats;
1374 struct sk_buff *skb = NULL;
1375 u16 rxfid, status;
1376 int length;
1377 struct hermes_rx_descriptor *desc;
1378 struct orinoco_rx_data *rx_data;
1379 int err;
1380
1381 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
1382 if (!desc) {
1383 printk(KERN_WARNING
1384 "%s: Can't allocate space for RX descriptor\n",
1385 dev->name);
1386 goto update_stats;
1387 }
1388
1389 rxfid = hermes_read_regn(hw, RXFID);
1390
1391 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
1392 rxfid, 0);
1393 if (err) {
1394 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
1395 "Frame dropped.\n", dev->name, err);
1396 goto update_stats;
1397 }
1398
1399 status = le16_to_cpu(desc->status);
1400
1401 if (status & HERMES_RXSTAT_BADCRC) {
1402 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
1403 dev->name);
1404 stats->rx_crc_errors++;
1405 goto update_stats;
1406 }
1407
1408 /* Handle frames in monitor mode */
1409 if (priv->iw_mode == IW_MODE_MONITOR) {
1410 orinoco_rx_monitor(dev, rxfid, desc);
1411 goto out;
1412 }
1413
1414 if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
1415 DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
1416 dev->name);
1417 wstats->discard.code++;
1418 goto update_stats;
1419 }
1420
1421 length = le16_to_cpu(desc->data_len);
1422
1423 /* Sanity checks */
1424 if (length < 3) { /* No for even an 802.2 LLC header */
1425 /* At least on Symbol firmware with PCF we get quite a
1426 lot of these legitimately - Poll frames with no
1427 data. */
1428 goto out;
1429 }
1430 if (length > IEEE80211_MAX_DATA_LEN) {
1431 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
1432 dev->name, length);
1433 stats->rx_length_errors++;
1434 goto update_stats;
1435 }
1436
1437 /* Payload size does not include Michael MIC. Increase payload
1438 * size to read it together with the data. */
1439 if (status & HERMES_RXSTAT_MIC)
1440 length += MICHAEL_MIC_LEN;
1441
1442 /* We need space for the packet data itself, plus an ethernet
1443 header, plus 2 bytes so we can align the IP header on a
1444 32bit boundary, plus 1 byte so we can read in odd length
1445 packets from the card, which has an IO granularity of 16
1446 bits */
1447 skb = dev_alloc_skb(length+ETH_HLEN+2+1);
1448 if (!skb) {
1449 printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
1450 dev->name);
1451 goto update_stats;
1452 }
1453
1454 /* We'll prepend the header, so reserve space for it. The worst
1455 case is no decapsulation, when 802.3 header is prepended and
1456 nothing is removed. 2 is for aligning the IP header. */
1457 skb_reserve(skb, ETH_HLEN + 2);
1458
1459 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length),
1460 ALIGN(length, 2), rxfid,
1461 HERMES_802_2_OFFSET);
1462 if (err) {
1463 printk(KERN_ERR "%s: error %d reading frame. "
1464 "Frame dropped.\n", dev->name, err);
1465 goto drop;
1466 }
1467
1468 /* Add desc and skb to rx queue */
1469 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
1470 if (!rx_data) {
1471 printk(KERN_WARNING "%s: Can't allocate RX packet\n",
1472 dev->name);
1473 goto drop;
1474 }
1475 rx_data->desc = desc;
1476 rx_data->skb = skb;
1477 list_add_tail(&rx_data->list, &priv->rx_list);
1478 tasklet_schedule(&priv->rx_tasklet);
1479
1480 return;
1481
1482drop:
1483 dev_kfree_skb_irq(skb);
1484update_stats:
1485 stats->rx_errors++;
1486 stats->rx_dropped++;
1487out:
1488 kfree(desc);
1489}
1490
1491static void orinoco_rx(struct net_device *dev,
1492 struct hermes_rx_descriptor *desc,
1493 struct sk_buff *skb)
1494{
1495 struct orinoco_private *priv = netdev_priv(dev);
1496 struct net_device_stats *stats = &priv->stats;
1497 u16 status, fc;
1498 int length;
1499 struct ethhdr *hdr;
1500
1501 status = le16_to_cpu(desc->status);
1502 length = le16_to_cpu(desc->data_len);
1503 fc = le16_to_cpu(desc->frame_ctl);
1504
1505 /* Calculate and check MIC */
1506 if (status & HERMES_RXSTAT_MIC) {
1507 int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
1508 HERMES_MIC_KEY_ID_SHIFT);
1509 u8 mic[MICHAEL_MIC_LEN];
1510 u8 *rxmic;
1511 u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
1512 desc->addr3 : desc->addr2;
1513
1514 /* Extract Michael MIC from payload */
1515 rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
1516
1517 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
1518 length -= MICHAEL_MIC_LEN;
1519
1520 michael_mic(priv->rx_tfm_mic,
1521 priv->tkip_key[key_id].rx_mic,
1522 desc->addr1,
1523 src,
1524 0, /* priority or QoS? */
1525 skb->data,
1526 skb->len,
1527 &mic[0]);
1528
1529 if (memcmp(mic, rxmic,
1530 MICHAEL_MIC_LEN)) {
1531 union iwreq_data wrqu;
1532 struct iw_michaelmicfailure wxmic;
1533
1534 printk(KERN_WARNING "%s: "
1535 "Invalid Michael MIC in data frame from %pM, "
1536 "using key %i\n",
1537 dev->name, src, key_id);
1538
1539 /* TODO: update stats */
1540
1541 /* Notify userspace */
1542 memset(&wxmic, 0, sizeof(wxmic));
1543 wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
1544 wxmic.flags |= (desc->addr1[0] & 1) ?
1545 IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
1546 wxmic.src_addr.sa_family = ARPHRD_ETHER;
1547 memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
1548
1549 (void) orinoco_hw_get_tkip_iv(priv, key_id,
1550 &wxmic.tsc[0]);
1551
1552 memset(&wrqu, 0, sizeof(wrqu));
1553 wrqu.data.length = sizeof(wxmic);
1554 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
1555 (char *) &wxmic);
1556
1557 goto drop;
1558 }
1559 }
1560
1561 /* Handle decapsulation
1562 * In most cases, the firmware tell us about SNAP frames.
1563 * For some reason, the SNAP frames sent by LinkSys APs
1564 * are not properly recognised by most firmwares.
1565 * So, check ourselves */
1566 if (length >= ENCAPS_OVERHEAD &&
1567 (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
1568 ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
1569 is_ethersnap(skb->data))) {
1570 /* These indicate a SNAP within 802.2 LLC within
1571 802.11 frame which we'll need to de-encapsulate to
1572 the original EthernetII frame. */
1573 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN - ENCAPS_OVERHEAD);
1574 } else {
1575 /* 802.3 frame - prepend 802.3 header as is */
1576 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
1577 hdr->h_proto = htons(length);
1578 }
1579 memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
1580 if (fc & IEEE80211_FCTL_FROMDS)
1581 memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
1582 else
1583 memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
1584
1585 skb->protocol = eth_type_trans(skb, dev);
1586 skb->ip_summed = CHECKSUM_NONE;
1587 if (fc & IEEE80211_FCTL_TODS)
1588 skb->pkt_type = PACKET_OTHERHOST;
1589
1590 /* Process the wireless stats if needed */
1591 orinoco_stat_gather(dev, skb, desc);
1592
1593 /* Pass the packet to the networking stack */
1594 netif_rx(skb);
1595 stats->rx_packets++;
1596 stats->rx_bytes += length;
1597
1598 return;
1599
1600 drop:
1601 dev_kfree_skb(skb);
1602 stats->rx_errors++;
1603 stats->rx_dropped++;
1604}
1605
1606static void orinoco_rx_isr_tasklet(unsigned long data)
1607{
1608 struct net_device *dev = (struct net_device *) data;
1609 struct orinoco_private *priv = netdev_priv(dev);
1610 struct orinoco_rx_data *rx_data, *temp;
1611 struct hermes_rx_descriptor *desc;
1612 struct sk_buff *skb;
1613 unsigned long flags;
1614
1615 /* orinoco_rx requires the driver lock, and we also need to
1616 * protect priv->rx_list, so just hold the lock over the
1617 * lot.
1618 *
1619 * If orinoco_lock fails, we've unplugged the card. In this
1620 * case just abort. */
1621 if (orinoco_lock(priv, &flags) != 0)
1622 return;
1623
1624 /* extract desc and skb from queue */
1625 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
1626 desc = rx_data->desc;
1627 skb = rx_data->skb;
1628 list_del(&rx_data->list);
1629 kfree(rx_data);
1630
1631 orinoco_rx(dev, desc, skb);
1632
1633 kfree(desc);
1634 }
1635
1636 orinoco_unlock(priv, &flags);
1637}
1638
1639/********************************************************************/
1640/* Rx path (info frames) */
1641/********************************************************************/
1642
1643static void print_linkstatus(struct net_device *dev, u16 status)
1644{
1645 char * s;
1646
1647 if (suppress_linkstatus)
1648 return;
1649
1650 switch (status) {
1651 case HERMES_LINKSTATUS_NOT_CONNECTED:
1652 s = "Not Connected";
1653 break;
1654 case HERMES_LINKSTATUS_CONNECTED:
1655 s = "Connected";
1656 break;
1657 case HERMES_LINKSTATUS_DISCONNECTED:
1658 s = "Disconnected";
1659 break;
1660 case HERMES_LINKSTATUS_AP_CHANGE:
1661 s = "AP Changed";
1662 break;
1663 case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
1664 s = "AP Out of Range";
1665 break;
1666 case HERMES_LINKSTATUS_AP_IN_RANGE:
1667 s = "AP In Range";
1668 break;
1669 case HERMES_LINKSTATUS_ASSOC_FAILED:
1670 s = "Association Failed";
1671 break;
1672 default:
1673 s = "UNKNOWN";
1674 }
1675
1676 printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
1677 dev->name, s, status);
1678}
1679
1680/* Search scan results for requested BSSID, join it if found */
1681static void orinoco_join_ap(struct work_struct *work)
1682{
1683 struct orinoco_private *priv =
1684 container_of(work, struct orinoco_private, join_work);
1685 struct net_device *dev = priv->ndev;
1686 struct hermes *hw = &priv->hw;
1687 int err;
1688 unsigned long flags;
1689 struct join_req {
1690 u8 bssid[ETH_ALEN];
1691 __le16 channel;
1692 } __attribute__ ((packed)) req;
1693 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1694 struct prism2_scan_apinfo *atom = NULL;
1695 int offset = 4;
1696 int found = 0;
1697 u8 *buf;
1698 u16 len;
1699
1700 /* Allocate buffer for scan results */
1701 buf = kmalloc(MAX_SCAN_LEN, GFP_KERNEL);
1702 if (! buf)
1703 return;
1704
1705 if (orinoco_lock(priv, &flags) != 0)
1706 goto fail_lock;
1707
1708 /* Sanity checks in case user changed something in the meantime */
1709 if (! priv->bssid_fixed)
1710 goto out;
1711
1712 if (strlen(priv->desired_essid) == 0)
1713 goto out;
1714
1715 /* Read scan results from the firmware */
1716 err = hermes_read_ltv(hw, USER_BAP,
1717 HERMES_RID_SCANRESULTSTABLE,
1718 MAX_SCAN_LEN, &len, buf);
1719 if (err) {
1720 printk(KERN_ERR "%s: Cannot read scan results\n",
1721 dev->name);
1722 goto out;
1723 }
1724
1725 len = HERMES_RECLEN_TO_BYTES(len);
1726
1727 /* Go through the scan results looking for the channel of the AP
1728 * we were requested to join */
1729 for (; offset + atom_len <= len; offset += atom_len) {
1730 atom = (struct prism2_scan_apinfo *) (buf + offset);
1731 if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) {
1732 found = 1;
1733 break;
1734 }
1735 }
1736
1737 if (! found) {
1738 DEBUG(1, "%s: Requested AP not found in scan results\n",
1739 dev->name);
1740 goto out;
1741 }
1742
1743 memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
1744 req.channel = atom->channel; /* both are little-endian */
1745 err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST,
1746 &req);
1747 if (err)
1748 printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
1749
1750 out:
1751 orinoco_unlock(priv, &flags);
1752
1753 fail_lock:
1754 kfree(buf);
1755}
1756
1757/* Send new BSSID to userspace */
1758static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1759{
1760 struct net_device *dev = priv->ndev;
1761 struct hermes *hw = &priv->hw;
1762 union iwreq_data wrqu;
1763 int err;
1764
1765 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1766 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1767 if (err != 0)
1768 return;
1769
1770 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1771
1772 /* Send event to user space */
1773 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
1774}
1775
1776static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1777{
1778 struct net_device *dev = priv->ndev;
1779 struct hermes *hw = &priv->hw;
1780 union iwreq_data wrqu;
1781 int err;
1782 u8 buf[88];
1783 u8 *ie;
1784
1785 if (!priv->has_wpa)
1786 return;
1787
1788 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1789 sizeof(buf), NULL, &buf);
1790 if (err != 0)
1791 return;
1792
1793 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1794 if (ie) {
1795 int rem = sizeof(buf) - (ie - &buf[0]);
1796 wrqu.data.length = ie[1] + 2;
1797 if (wrqu.data.length > rem)
1798 wrqu.data.length = rem;
1799
1800 if (wrqu.data.length)
1801 /* Send event to user space */
1802 wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
1803 }
1804}
1805
1806static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1807{
1808 struct net_device *dev = priv->ndev;
1809 struct hermes *hw = &priv->hw;
1810 union iwreq_data wrqu;
1811 int err;
1812 u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
1813 u8 *ie;
1814
1815 if (!priv->has_wpa)
1816 return;
1817
1818 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1819 sizeof(buf), NULL, &buf);
1820 if (err != 0)
1821 return;
1822
1823 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1824 if (ie) {
1825 int rem = sizeof(buf) - (ie - &buf[0]);
1826 wrqu.data.length = ie[1] + 2;
1827 if (wrqu.data.length > rem)
1828 wrqu.data.length = rem;
1829
1830 if (wrqu.data.length)
1831 /* Send event to user space */
1832 wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
1833 }
1834}
1835
1836static void orinoco_send_wevents(struct work_struct *work)
1837{
1838 struct orinoco_private *priv =
1839 container_of(work, struct orinoco_private, wevent_work);
1840 unsigned long flags;
1841
1842 if (orinoco_lock(priv, &flags) != 0)
1843 return;
1844
1845 orinoco_send_assocreqie_wevent(priv);
1846 orinoco_send_assocrespie_wevent(priv);
1847 orinoco_send_bssid_wevent(priv);
1848
1849 orinoco_unlock(priv, &flags);
1850}
1851
1852static inline void orinoco_clear_scan_results(struct orinoco_private *priv,
1853 unsigned long scan_age)
1854{
1855 if (priv->has_ext_scan) {
1856 struct xbss_element *bss;
1857 struct xbss_element *tmp_bss;
1858
1859 /* Blow away current list of scan results */
1860 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1861 if (!scan_age ||
1862 time_after(jiffies, bss->last_scanned + scan_age)) {
1863 list_move_tail(&bss->list,
1864 &priv->bss_free_list);
1865 /* Don't blow away ->list, just BSS data */
1866 memset(&bss->bss, 0, sizeof(bss->bss));
1867 bss->last_scanned = 0;
1868 }
1869 }
1870 } else {
1871 struct bss_element *bss;
1872 struct bss_element *tmp_bss;
1873
1874 /* Blow away current list of scan results */
1875 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1876 if (!scan_age ||
1877 time_after(jiffies, bss->last_scanned + scan_age)) {
1878 list_move_tail(&bss->list,
1879 &priv->bss_free_list);
1880 /* Don't blow away ->list, just BSS data */
1881 memset(&bss->bss, 0, sizeof(bss->bss));
1882 bss->last_scanned = 0;
1883 }
1884 }
1885 }
1886}
1887
1888static void orinoco_add_ext_scan_result(struct orinoco_private *priv,
1889 struct agere_ext_scan_info *atom)
1890{
1891 struct xbss_element *bss = NULL;
1892 int found = 0;
1893
1894 /* Try to update an existing bss first */
1895 list_for_each_entry(bss, &priv->bss_list, list) {
1896 if (compare_ether_addr(bss->bss.bssid, atom->bssid))
1897 continue;
1898 /* ESSID lengths */
1899 if (bss->bss.data[1] != atom->data[1])
1900 continue;
1901 if (memcmp(&bss->bss.data[2], &atom->data[2],
1902 atom->data[1]))
1903 continue;
1904 found = 1;
1905 break;
1906 }
1907
1908 /* Grab a bss off the free list */
1909 if (!found && !list_empty(&priv->bss_free_list)) {
1910 bss = list_entry(priv->bss_free_list.next,
1911 struct xbss_element, list);
1912 list_del(priv->bss_free_list.next);
1913
1914 list_add_tail(&bss->list, &priv->bss_list);
1915 }
1916
1917 if (bss) {
1918 /* Always update the BSS to get latest beacon info */
1919 memcpy(&bss->bss, atom, sizeof(bss->bss));
1920 bss->last_scanned = jiffies;
1921 }
1922}
1923
1924static int orinoco_process_scan_results(struct net_device *dev,
1925 unsigned char *buf,
1926 int len)
1927{
1928 struct orinoco_private *priv = netdev_priv(dev);
1929 int offset; /* In the scan data */
1930 union hermes_scan_info *atom;
1931 int atom_len;
1932
1933 switch (priv->firmware_type) {
1934 case FIRMWARE_TYPE_AGERE:
1935 atom_len = sizeof(struct agere_scan_apinfo);
1936 offset = 0;
1937 break;
1938 case FIRMWARE_TYPE_SYMBOL:
1939 /* Lack of documentation necessitates this hack.
1940 * Different firmwares have 68 or 76 byte long atoms.
1941 * We try modulo first. If the length divides by both,
1942 * we check what would be the channel in the second
1943 * frame for a 68-byte atom. 76-byte atoms have 0 there.
1944 * Valid channel cannot be 0. */
1945 if (len % 76)
1946 atom_len = 68;
1947 else if (len % 68)
1948 atom_len = 76;
1949 else if (len >= 1292 && buf[68] == 0)
1950 atom_len = 76;
1951 else
1952 atom_len = 68;
1953 offset = 0;
1954 break;
1955 case FIRMWARE_TYPE_INTERSIL:
1956 offset = 4;
1957 if (priv->has_hostscan) {
1958 atom_len = le16_to_cpup((__le16 *)buf);
1959 /* Sanity check for atom_len */
1960 if (atom_len < sizeof(struct prism2_scan_apinfo)) {
1961 printk(KERN_ERR "%s: Invalid atom_len in scan "
1962 "data: %d\n", dev->name, atom_len);
1963 return -EIO;
1964 }
1965 } else
1966 atom_len = offsetof(struct prism2_scan_apinfo, atim);
1967 break;
1968 default:
1969 return -EOPNOTSUPP;
1970 }
1971
1972 /* Check that we got an whole number of atoms */
1973 if ((len - offset) % atom_len) {
1974 printk(KERN_ERR "%s: Unexpected scan data length %d, "
1975 "atom_len %d, offset %d\n", dev->name, len,
1976 atom_len, offset);
1977 return -EIO;
1978 }
1979
1980 orinoco_clear_scan_results(priv, msecs_to_jiffies(15000));
1981
1982 /* Read the entries one by one */
1983 for (; offset + atom_len <= len; offset += atom_len) {
1984 int found = 0;
1985 struct bss_element *bss = NULL;
1986
1987 /* Get next atom */
1988 atom = (union hermes_scan_info *) (buf + offset);
1989
1990 /* Try to update an existing bss first */
1991 list_for_each_entry(bss, &priv->bss_list, list) {
1992 if (compare_ether_addr(bss->bss.a.bssid, atom->a.bssid))
1993 continue;
1994 if (le16_to_cpu(bss->bss.a.essid_len) !=
1995 le16_to_cpu(atom->a.essid_len))
1996 continue;
1997 if (memcmp(bss->bss.a.essid, atom->a.essid,
1998 le16_to_cpu(atom->a.essid_len)))
1999 continue;
2000 found = 1;
2001 break;
2002 }
2003
2004 /* Grab a bss off the free list */
2005 if (!found && !list_empty(&priv->bss_free_list)) {
2006 bss = list_entry(priv->bss_free_list.next,
2007 struct bss_element, list);
2008 list_del(priv->bss_free_list.next);
2009
2010 list_add_tail(&bss->list, &priv->bss_list);
2011 }
2012
2013 if (bss) {
2014 /* Always update the BSS to get latest beacon info */
2015 memcpy(&bss->bss, atom, sizeof(bss->bss));
2016 bss->last_scanned = jiffies;
2017 }
2018 }
2019
2020 return 0;
2021}
2022
2023static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
2024{
2025 struct orinoco_private *priv = netdev_priv(dev);
2026 u16 infofid;
2027 struct {
2028 __le16 len;
2029 __le16 type;
2030 } __attribute__ ((packed)) info;
2031 int len, type;
2032 int err;
2033
2034 /* This is an answer to an INQUIRE command that we did earlier,
2035 * or an information "event" generated by the card
2036 * The controller return to us a pseudo frame containing
2037 * the information in question - Jean II */
2038 infofid = hermes_read_regn(hw, INFOFID);
2039
2040 /* Read the info frame header - don't try too hard */
2041 err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
2042 infofid, 0);
2043 if (err) {
2044 printk(KERN_ERR "%s: error %d reading info frame. "
2045 "Frame dropped.\n", dev->name, err);
2046 return;
2047 }
2048
2049 len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
2050 type = le16_to_cpu(info.type);
2051
2052 switch (type) {
2053 case HERMES_INQ_TALLIES: {
2054 struct hermes_tallies_frame tallies;
2055 struct iw_statistics *wstats = &priv->wstats;
2056
2057 if (len > sizeof(tallies)) {
2058 printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
2059 dev->name, len);
2060 len = sizeof(tallies);
2061 }
2062
2063 err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
2064 infofid, sizeof(info));
2065 if (err)
2066 break;
2067
2068 /* Increment our various counters */
2069 /* wstats->discard.nwid - no wrong BSSID stuff */
2070 wstats->discard.code +=
2071 le16_to_cpu(tallies.RxWEPUndecryptable);
2072 if (len == sizeof(tallies))
2073 wstats->discard.code +=
2074 le16_to_cpu(tallies.RxDiscards_WEPICVError) +
2075 le16_to_cpu(tallies.RxDiscards_WEPExcluded);
2076 wstats->discard.misc +=
2077 le16_to_cpu(tallies.TxDiscardsWrongSA);
2078 wstats->discard.fragment +=
2079 le16_to_cpu(tallies.RxMsgInBadMsgFragments);
2080 wstats->discard.retries +=
2081 le16_to_cpu(tallies.TxRetryLimitExceeded);
2082 /* wstats->miss.beacon - no match */
2083 }
2084 break;
2085 case HERMES_INQ_LINKSTATUS: {
2086 struct hermes_linkstatus linkstatus;
2087 u16 newstatus;
2088 int connected;
2089
2090 if (priv->iw_mode == IW_MODE_MONITOR)
2091 break;
2092
2093 if (len != sizeof(linkstatus)) {
2094 printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
2095 dev->name, len);
2096 break;
2097 }
2098
2099 err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
2100 infofid, sizeof(info));
2101 if (err)
2102 break;
2103 newstatus = le16_to_cpu(linkstatus.linkstatus);
2104
2105 /* Symbol firmware uses "out of range" to signal that
2106 * the hostscan frame can be requested. */
2107 if (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE &&
2108 priv->firmware_type == FIRMWARE_TYPE_SYMBOL &&
2109 priv->has_hostscan && priv->scan_inprogress) {
2110 hermes_inquire(hw, HERMES_INQ_HOSTSCAN_SYMBOL);
2111 break;
2112 }
2113
2114 connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
2115 || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
2116 || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE);
2117
2118 if (connected)
2119 netif_carrier_on(dev);
2120 else if (!ignore_disconnect)
2121 netif_carrier_off(dev);
2122
2123 if (newstatus != priv->last_linkstatus) {
2124 priv->last_linkstatus = newstatus;
2125 print_linkstatus(dev, newstatus);
2126 /* The info frame contains only one word which is the
2127 * status (see hermes.h). The status is pretty boring
2128 * in itself, that's why we export the new BSSID...
2129 * Jean II */
2130 schedule_work(&priv->wevent_work);
2131 }
2132 }
2133 break;
2134 case HERMES_INQ_SCAN:
2135 if (!priv->scan_inprogress && priv->bssid_fixed &&
2136 priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
2137 schedule_work(&priv->join_work);
2138 break;
2139 }
2140 /* fall through */
2141 case HERMES_INQ_HOSTSCAN:
2142 case HERMES_INQ_HOSTSCAN_SYMBOL: {
2143 /* Result of a scanning. Contains information about
2144 * cells in the vicinity - Jean II */
2145 union iwreq_data wrqu;
2146 unsigned char *buf;
2147
2148 /* Scan is no longer in progress */
2149 priv->scan_inprogress = 0;
2150
2151 /* Sanity check */
2152 if (len > 4096) {
2153 printk(KERN_WARNING "%s: Scan results too large (%d bytes)\n",
2154 dev->name, len);
2155 break;
2156 }
2157
2158 /* Allocate buffer for results */
2159 buf = kmalloc(len, GFP_ATOMIC);
2160 if (buf == NULL)
2161 /* No memory, so can't printk()... */
2162 break;
2163
2164 /* Read scan data */
2165 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
2166 infofid, sizeof(info));
2167 if (err) {
2168 kfree(buf);
2169 break;
2170 }
2171
2172#ifdef ORINOCO_DEBUG
2173 {
2174 int i;
2175 printk(KERN_DEBUG "Scan result [%02X", buf[0]);
2176 for(i = 1; i < (len * 2); i++)
2177 printk(":%02X", buf[i]);
2178 printk("]\n");
2179 }
2180#endif /* ORINOCO_DEBUG */
2181
2182 if (orinoco_process_scan_results(dev, buf, len) == 0) {
2183 /* Send an empty event to user space.
2184 * We don't send the received data on the event because
2185 * it would require us to do complex transcoding, and
2186 * we want to minimise the work done in the irq handler
2187 * Use a request to extract the data - Jean II */
2188 wrqu.data.length = 0;
2189 wrqu.data.flags = 0;
2190 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
2191 }
2192 kfree(buf);
2193 }
2194 break;
2195 case HERMES_INQ_CHANNELINFO:
2196 {
2197 struct agere_ext_scan_info *bss;
2198
2199 if (!priv->scan_inprogress) {
2200 printk(KERN_DEBUG "%s: Got chaninfo without scan, "
2201 "len=%d\n", dev->name, len);
2202 break;
2203 }
2204
2205 /* An empty result indicates that the scan is complete */
2206 if (len == 0) {
2207 union iwreq_data wrqu;
2208
2209 /* Scan is no longer in progress */
2210 priv->scan_inprogress = 0;
2211
2212 wrqu.data.length = 0;
2213 wrqu.data.flags = 0;
2214 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
2215 break;
2216 }
2217
2218 /* Sanity check */
2219 else if (len > sizeof(*bss)) {
2220 printk(KERN_WARNING
2221 "%s: Ext scan results too large (%d bytes). "
2222 "Truncating results to %zd bytes.\n",
2223 dev->name, len, sizeof(*bss));
2224 len = sizeof(*bss);
2225 } else if (len < (offsetof(struct agere_ext_scan_info,
2226 data) + 2)) {
2227 /* Drop this result now so we don't have to
2228 * keep checking later */
2229 printk(KERN_WARNING
2230 "%s: Ext scan results too short (%d bytes)\n",
2231 dev->name, len);
2232 break;
2233 }
2234
2235 bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
2236 if (bss == NULL)
2237 break;
2238
2239 /* Read scan data */
2240 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
2241 infofid, sizeof(info));
2242 if (err) {
2243 kfree(bss);
2244 break;
2245 }
2246
2247 orinoco_add_ext_scan_result(priv, bss);
2248
2249 kfree(bss);
2250 break;
2251 }
2252 case HERMES_INQ_SEC_STAT_AGERE:
2253 /* Security status (Agere specific) */
2254 /* Ignore this frame for now */
2255 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
2256 break;
2257 /* fall through */
2258 default:
2259 printk(KERN_DEBUG "%s: Unknown information frame received: "
2260 "type 0x%04x, length %d\n", dev->name, type, len);
2261 /* We don't actually do anything about it */
2262 break;
2263 }
2264}
2265
2266static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
2267{
2268 if (net_ratelimit())
2269 printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
2270}
2271
2272/********************************************************************/
2273/* Internal hardware control routines */
2274/********************************************************************/
2275
2276int __orinoco_up(struct net_device *dev)
2277{
2278 struct orinoco_private *priv = netdev_priv(dev);
2279 struct hermes *hw = &priv->hw;
2280 int err;
2281
2282 netif_carrier_off(dev); /* just to make sure */
2283
2284 err = __orinoco_program_rids(dev);
2285 if (err) {
2286 printk(KERN_ERR "%s: Error %d configuring card\n",
2287 dev->name, err);
2288 return err;
2289 }
2290
2291 /* Fire things up again */
2292 hermes_set_irqmask(hw, ORINOCO_INTEN);
2293 err = hermes_enable_port(hw, 0);
2294 if (err) {
2295 printk(KERN_ERR "%s: Error %d enabling MAC port\n",
2296 dev->name, err);
2297 return err;
2298 }
2299
2300 netif_start_queue(dev);
2301
2302 return 0;
2303}
2304
2305int __orinoco_down(struct net_device *dev)
2306{
2307 struct orinoco_private *priv = netdev_priv(dev);
2308 struct hermes *hw = &priv->hw;
2309 int err;
2310
2311 netif_stop_queue(dev);
2312
2313 if (! priv->hw_unavailable) {
2314 if (! priv->broken_disableport) {
2315 err = hermes_disable_port(hw, 0);
2316 if (err) {
2317 /* Some firmwares (e.g. Intersil 1.3.x) seem
2318 * to have problems disabling the port, oh
2319 * well, too bad. */
2320 printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
2321 dev->name, err);
2322 priv->broken_disableport = 1;
2323 }
2324 }
2325 hermes_set_irqmask(hw, 0);
2326 hermes_write_regn(hw, EVACK, 0xffff);
2327 }
2328
2329 /* firmware will have to reassociate */
2330 netif_carrier_off(dev);
2331 priv->last_linkstatus = 0xffff;
2332
2333 return 0;
2334}
2335
2336static int orinoco_allocate_fid(struct net_device *dev)
2337{
2338 struct orinoco_private *priv = netdev_priv(dev);
2339 struct hermes *hw = &priv->hw;
2340 int err;
2341
2342 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
2343 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
2344 /* Try workaround for old Symbol firmware bug */
2345 printk(KERN_WARNING "%s: firmware ALLOC bug detected "
2346 "(old Symbol firmware?). Trying to work around... ",
2347 dev->name);
2348
2349 priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
2350 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
2351 if (err)
2352 printk("failed!\n");
2353 else
2354 printk("ok.\n");
2355 }
2356
2357 return err;
2358}
2359
2360int orinoco_reinit_firmware(struct net_device *dev)
2361{
2362 struct orinoco_private *priv = netdev_priv(dev);
2363 struct hermes *hw = &priv->hw;
2364 int err;
2365
2366 err = hermes_init(hw);
2367 if (priv->do_fw_download && !err) {
2368 err = orinoco_download(priv);
2369 if (err)
2370 priv->do_fw_download = 0;
2371 }
2372 if (!err)
2373 err = orinoco_allocate_fid(dev);
2374
2375 return err;
2376}
2377
2378static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
2379{
2380 hermes_t *hw = &priv->hw;
2381 int err = 0;
2382
2383 if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
2384 printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
2385 priv->ndev->name, priv->bitratemode);
2386 return -EINVAL;
2387 }
2388
2389 switch (priv->firmware_type) {
2390 case FIRMWARE_TYPE_AGERE:
2391 err = hermes_write_wordrec(hw, USER_BAP,
2392 HERMES_RID_CNFTXRATECONTROL,
2393 bitrate_table[priv->bitratemode].agere_txratectrl);
2394 break;
2395 case FIRMWARE_TYPE_INTERSIL:
2396 case FIRMWARE_TYPE_SYMBOL:
2397 err = hermes_write_wordrec(hw, USER_BAP,
2398 HERMES_RID_CNFTXRATECONTROL,
2399 bitrate_table[priv->bitratemode].intersil_txratectrl);
2400 break;
2401 default:
2402 BUG();
2403 }
2404
2405 return err;
2406}
2407
2408/* Set fixed AP address */
2409static int __orinoco_hw_set_wap(struct orinoco_private *priv)
2410{
2411 int roaming_flag;
2412 int err = 0;
2413 hermes_t *hw = &priv->hw;
2414
2415 switch (priv->firmware_type) {
2416 case FIRMWARE_TYPE_AGERE:
2417 /* not supported */
2418 break;
2419 case FIRMWARE_TYPE_INTERSIL:
2420 if (priv->bssid_fixed)
2421 roaming_flag = 2;
2422 else
2423 roaming_flag = 1;
2424
2425 err = hermes_write_wordrec(hw, USER_BAP,
2426 HERMES_RID_CNFROAMINGMODE,
2427 roaming_flag);
2428 break;
2429 case FIRMWARE_TYPE_SYMBOL:
2430 err = HERMES_WRITE_RECORD(hw, USER_BAP,
2431 HERMES_RID_CNFMANDATORYBSSID_SYMBOL,
2432 &priv->desired_bssid);
2433 break;
2434 }
2435 return err;
2436}
2437
2438/* Change the WEP keys and/or the current keys. Can be called
2439 * either from __orinoco_hw_setup_enc() or directly from
2440 * orinoco_ioctl_setiwencode(). In the later case the association
2441 * with the AP is not broken (if the firmware can handle it),
2442 * which is needed for 802.1x implementations. */
2443static int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
2444{
2445 hermes_t *hw = &priv->hw;
2446 int err = 0;
2447
2448 switch (priv->firmware_type) {
2449 case FIRMWARE_TYPE_AGERE:
2450 err = HERMES_WRITE_RECORD(hw, USER_BAP,
2451 HERMES_RID_CNFWEPKEYS_AGERE,
2452 &priv->keys);
2453 if (err)
2454 return err;
2455 err = hermes_write_wordrec(hw, USER_BAP,
2456 HERMES_RID_CNFTXKEY_AGERE,
2457 priv->tx_key);
2458 if (err)
2459 return err;
2460 break;
2461 case FIRMWARE_TYPE_INTERSIL:
2462 case FIRMWARE_TYPE_SYMBOL:
2463 {
2464 int keylen;
2465 int i;
2466
2467 /* Force uniform key length to work around firmware bugs */
2468 keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
2469
2470 if (keylen > LARGE_KEY_SIZE) {
2471 printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
2472 priv->ndev->name, priv->tx_key, keylen);
2473 return -E2BIG;
2474 }
2475
2476 /* Write all 4 keys */
2477 for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
2478 err = hermes_write_ltv(hw, USER_BAP,
2479 HERMES_RID_CNFDEFAULTKEY0 + i,
2480 HERMES_BYTES_TO_RECLEN(keylen),
2481 priv->keys[i].data);
2482 if (err)
2483 return err;
2484 }
2485
2486 /* Write the index of the key used in transmission */
2487 err = hermes_write_wordrec(hw, USER_BAP,
2488 HERMES_RID_CNFWEPDEFAULTKEYID,
2489 priv->tx_key);
2490 if (err)
2491 return err;
2492 }
2493 break;
2494 }
2495
2496 return 0;
2497}
2498
2499static int __orinoco_hw_setup_enc(struct orinoco_private *priv)
2500{
2501 hermes_t *hw = &priv->hw;
2502 int err = 0;
2503 int master_wep_flag;
2504 int auth_flag;
2505 int enc_flag;
2506
2507 /* Setup WEP keys for WEP and WPA */
2508 if (priv->encode_alg)
2509 __orinoco_hw_setup_wepkeys(priv);
2510
2511 if (priv->wep_restrict)
2512 auth_flag = HERMES_AUTH_SHARED_KEY;
2513 else
2514 auth_flag = HERMES_AUTH_OPEN;
2515
2516 if (priv->wpa_enabled)
2517 enc_flag = 2;
2518 else if (priv->encode_alg == IW_ENCODE_ALG_WEP)
2519 enc_flag = 1;
2520 else
2521 enc_flag = 0;
2522
2523 switch (priv->firmware_type) {
2524 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
2525 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
2526 /* Enable the shared-key authentication. */
2527 err = hermes_write_wordrec(hw, USER_BAP,
2528 HERMES_RID_CNFAUTHENTICATION_AGERE,
2529 auth_flag);
2530 }
2531 err = hermes_write_wordrec(hw, USER_BAP,
2532 HERMES_RID_CNFWEPENABLED_AGERE,
2533 enc_flag);
2534 if (err)
2535 return err;
2536
2537 if (priv->has_wpa) {
2538 /* Set WPA key management */
2539 err = hermes_write_wordrec(hw, USER_BAP,
2540 HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
2541 priv->key_mgmt);
2542 if (err)
2543 return err;
2544 }
2545
2546 break;
2547
2548 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
2549 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
2550 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
2551 if (priv->wep_restrict ||
2552 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
2553 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
2554 HERMES_WEP_EXCL_UNENCRYPTED;
2555 else
2556 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED;
2557
2558 err = hermes_write_wordrec(hw, USER_BAP,
2559 HERMES_RID_CNFAUTHENTICATION,
2560 auth_flag);
2561 if (err)
2562 return err;
2563 } else
2564 master_wep_flag = 0;
2565
2566 if (priv->iw_mode == IW_MODE_MONITOR)
2567 master_wep_flag |= HERMES_WEP_HOST_DECRYPT;
2568
2569 /* Master WEP setting : on/off */
2570 err = hermes_write_wordrec(hw, USER_BAP,
2571 HERMES_RID_CNFWEPFLAGS_INTERSIL,
2572 master_wep_flag);
2573 if (err)
2574 return err;
2575
2576 break;
2577 }
2578
2579 return 0;
2580}
2581
2582/* key must be 32 bytes, including the tx and rx MIC keys.
2583 * rsc must be 8 bytes
2584 * tsc must be 8 bytes or NULL
2585 */
2586static int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
2587 u8 *key, u8 *rsc, u8 *tsc)
2588{
2589 struct {
2590 __le16 idx;
2591 u8 rsc[IW_ENCODE_SEQ_MAX_SIZE];
2592 u8 key[TKIP_KEYLEN];
2593 u8 tx_mic[MIC_KEYLEN];
2594 u8 rx_mic[MIC_KEYLEN];
2595 u8 tsc[IW_ENCODE_SEQ_MAX_SIZE];
2596 } __attribute__ ((packed)) buf;
2597 int ret;
2598 int err;
2599 int k;
2600 u16 xmitting;
2601
2602 key_idx &= 0x3;
2603
2604 if (set_tx)
2605 key_idx |= 0x8000;
2606
2607 buf.idx = cpu_to_le16(key_idx);
2608 memcpy(buf.key, key,
2609 sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
2610
2611 if (rsc == NULL)
2612 memset(buf.rsc, 0, sizeof(buf.rsc));
2613 else
2614 memcpy(buf.rsc, rsc, sizeof(buf.rsc));
2615
2616 if (tsc == NULL) {
2617 memset(buf.tsc, 0, sizeof(buf.tsc));
2618 buf.tsc[4] = 0x10;
2619 } else {
2620 memcpy(buf.tsc, tsc, sizeof(buf.tsc));
2621 }
2622
2623 /* Wait upto 100ms for tx queue to empty */
2624 k = 100;
2625 do {
2626 k--;
2627 udelay(1000);
2628 ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
2629 &xmitting);
2630 if (ret)
2631 break;
2632 } while ((k > 0) && xmitting);
2633
2634 if (k == 0)
2635 ret = -ETIMEDOUT;
2636
2637 err = HERMES_WRITE_RECORD(hw, USER_BAP,
2638 HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
2639 &buf);
2640
2641 return ret ? ret : err;
2642}
2643
2644static int orinoco_clear_tkip_key(struct orinoco_private *priv,
2645 int key_idx)
2646{
2647 hermes_t *hw = &priv->hw;
2648 int err;
2649
2650 memset(&priv->tkip_key[key_idx], 0, sizeof(priv->tkip_key[key_idx]));
2651 err = hermes_write_wordrec(hw, USER_BAP,
2652 HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
2653 key_idx);
2654 if (err)
2655 printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
2656 priv->ndev->name, err, key_idx);
2657 return err;
2658}
2659
2660static int __orinoco_program_rids(struct net_device *dev)
2661{
2662 struct orinoco_private *priv = netdev_priv(dev);
2663 hermes_t *hw = &priv->hw;
2664 int err;
2665 struct hermes_idstring idbuf;
2666
2667 /* Set the MAC address */
2668 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
2669 HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
2670 if (err) {
2671 printk(KERN_ERR "%s: Error %d setting MAC address\n",
2672 dev->name, err);
2673 return err;
2674 }
2675
2676 /* Set up the link mode */
2677 err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
2678 priv->port_type);
2679 if (err) {
2680 printk(KERN_ERR "%s: Error %d setting port type\n",
2681 dev->name, err);
2682 return err;
2683 }
2684 /* Set the channel/frequency */
2685 if (priv->channel != 0 && priv->iw_mode != IW_MODE_INFRA) {
2686 err = hermes_write_wordrec(hw, USER_BAP,
2687 HERMES_RID_CNFOWNCHANNEL,
2688 priv->channel);
2689 if (err) {
2690 printk(KERN_ERR "%s: Error %d setting channel %d\n",
2691 dev->name, err, priv->channel);
2692 return err;
2693 }
2694 }
2695
2696 if (priv->has_ibss) {
2697 u16 createibss;
2698
2699 if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) {
2700 printk(KERN_WARNING "%s: This firmware requires an "
2701 "ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
2702 /* With wvlan_cs, in this case, we would crash.
2703 * hopefully, this driver will behave better...
2704 * Jean II */
2705 createibss = 0;
2706 } else {
2707 createibss = priv->createibss;
2708 }
2709
2710 err = hermes_write_wordrec(hw, USER_BAP,
2711 HERMES_RID_CNFCREATEIBSS,
2712 createibss);
2713 if (err) {
2714 printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n",
2715 dev->name, err);
2716 return err;
2717 }
2718 }
2719
2720 /* Set the desired BSSID */
2721 err = __orinoco_hw_set_wap(priv);
2722 if (err) {
2723 printk(KERN_ERR "%s: Error %d setting AP address\n",
2724 dev->name, err);
2725 return err;
2726 }
2727 /* Set the desired ESSID */
2728 idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
2729 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
2730 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
2731 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
2732 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
2733 &idbuf);
2734 if (err) {
2735 printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
2736 dev->name, err);
2737 return err;
2738 }
2739 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
2740 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
2741 &idbuf);
2742 if (err) {
2743 printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
2744 dev->name, err);
2745 return err;
2746 }
2747
2748 /* Set the station name */
2749 idbuf.len = cpu_to_le16(strlen(priv->nick));
2750 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
2751 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
2752 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
2753 &idbuf);
2754 if (err) {
2755 printk(KERN_ERR "%s: Error %d setting nickname\n",
2756 dev->name, err);
2757 return err;
2758 }
2759
2760 /* Set AP density */
2761 if (priv->has_sensitivity) {
2762 err = hermes_write_wordrec(hw, USER_BAP,
2763 HERMES_RID_CNFSYSTEMSCALE,
2764 priv->ap_density);
2765 if (err) {
2766 printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
2767 "Disabling sensitivity control\n",
2768 dev->name, err);
2769
2770 priv->has_sensitivity = 0;
2771 }
2772 }
2773
2774 /* Set RTS threshold */
2775 err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
2776 priv->rts_thresh);
2777 if (err) {
2778 printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
2779 dev->name, err);
2780 return err;
2781 }
2782
2783 /* Set fragmentation threshold or MWO robustness */
2784 if (priv->has_mwo)
2785 err = hermes_write_wordrec(hw, USER_BAP,
2786 HERMES_RID_CNFMWOROBUST_AGERE,
2787 priv->mwo_robust);
2788 else
2789 err = hermes_write_wordrec(hw, USER_BAP,
2790 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
2791 priv->frag_thresh);
2792 if (err) {
2793 printk(KERN_ERR "%s: Error %d setting fragmentation\n",
2794 dev->name, err);
2795 return err;
2796 }
2797
2798 /* Set bitrate */
2799 err = __orinoco_hw_set_bitrate(priv);
2800 if (err) {
2801 printk(KERN_ERR "%s: Error %d setting bitrate\n",
2802 dev->name, err);
2803 return err;
2804 }
2805
2806 /* Set power management */
2807 if (priv->has_pm) {
2808 err = hermes_write_wordrec(hw, USER_BAP,
2809 HERMES_RID_CNFPMENABLED,
2810 priv->pm_on);
2811 if (err) {
2812 printk(KERN_ERR "%s: Error %d setting up PM\n",
2813 dev->name, err);
2814 return err;
2815 }
2816
2817 err = hermes_write_wordrec(hw, USER_BAP,
2818 HERMES_RID_CNFMULTICASTRECEIVE,
2819 priv->pm_mcast);
2820 if (err) {
2821 printk(KERN_ERR "%s: Error %d setting up PM\n",
2822 dev->name, err);
2823 return err;
2824 }
2825 err = hermes_write_wordrec(hw, USER_BAP,
2826 HERMES_RID_CNFMAXSLEEPDURATION,
2827 priv->pm_period);
2828 if (err) {
2829 printk(KERN_ERR "%s: Error %d setting up PM\n",
2830 dev->name, err);
2831 return err;
2832 }
2833 err = hermes_write_wordrec(hw, USER_BAP,
2834 HERMES_RID_CNFPMHOLDOVERDURATION,
2835 priv->pm_timeout);
2836 if (err) {
2837 printk(KERN_ERR "%s: Error %d setting up PM\n",
2838 dev->name, err);
2839 return err;
2840 }
2841 }
2842
2843 /* Set preamble - only for Symbol so far... */
2844 if (priv->has_preamble) {
2845 err = hermes_write_wordrec(hw, USER_BAP,
2846 HERMES_RID_CNFPREAMBLE_SYMBOL,
2847 priv->preamble);
2848 if (err) {
2849 printk(KERN_ERR "%s: Error %d setting preamble\n",
2850 dev->name, err);
2851 return err;
2852 }
2853 }
2854
2855 /* Set up encryption */
2856 if (priv->has_wep || priv->has_wpa) {
2857 err = __orinoco_hw_setup_enc(priv);
2858 if (err) {
2859 printk(KERN_ERR "%s: Error %d activating encryption\n",
2860 dev->name, err);
2861 return err;
2862 }
2863 }
2864
2865 if (priv->iw_mode == IW_MODE_MONITOR) {
2866 /* Enable monitor mode */
2867 dev->type = ARPHRD_IEEE80211;
2868 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
2869 HERMES_TEST_MONITOR, 0, NULL);
2870 } else {
2871 /* Disable monitor mode */
2872 dev->type = ARPHRD_ETHER;
2873 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
2874 HERMES_TEST_STOP, 0, NULL);
2875 }
2876 if (err)
2877 return err;
2878
2879 /* Set promiscuity / multicast*/
2880 priv->promiscuous = 0;
2881 priv->mc_count = 0;
2882
2883 /* FIXME: what about netif_tx_lock */
2884 __orinoco_set_multicast_list(dev);
2885
2886 return 0;
2887}
2888
2889/* FIXME: return int? */
2890static void
2891__orinoco_set_multicast_list(struct net_device *dev)
2892{
2893 struct orinoco_private *priv = netdev_priv(dev);
2894 hermes_t *hw = &priv->hw;
2895 int err = 0;
2896 int promisc, mc_count;
2897
2898 /* The Hermes doesn't seem to have an allmulti mode, so we go
2899 * into promiscuous mode and let the upper levels deal. */
2900 if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
2901 (dev->mc_count > MAX_MULTICAST(priv)) ) {
2902 promisc = 1;
2903 mc_count = 0;
2904 } else {
2905 promisc = 0;
2906 mc_count = dev->mc_count;
2907 }
2908
2909 if (promisc != priv->promiscuous) {
2910 err = hermes_write_wordrec(hw, USER_BAP,
2911 HERMES_RID_CNFPROMISCUOUSMODE,
2912 promisc);
2913 if (err) {
2914 printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
2915 dev->name, err);
2916 } else
2917 priv->promiscuous = promisc;
2918 }
2919
2920 /* If we're not in promiscuous mode, then we need to set the
2921 * group address if either we want to multicast, or if we were
2922 * multicasting and want to stop */
2923 if (! promisc && (mc_count || priv->mc_count) ) {
2924 struct dev_mc_list *p = dev->mc_list;
2925 struct hermes_multicast mclist;
2926 int i;
2927
2928 for (i = 0; i < mc_count; i++) {
2929 /* paranoia: is list shorter than mc_count? */
2930 BUG_ON(! p);
2931 /* paranoia: bad address size in list? */
2932 BUG_ON(p->dmi_addrlen != ETH_ALEN);
2933
2934 memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
2935 p = p->next;
2936 }
2937
2938 if (p)
2939 printk(KERN_WARNING "%s: Multicast list is "
2940 "longer than mc_count\n", dev->name);
2941
2942 err = hermes_write_ltv(hw, USER_BAP,
2943 HERMES_RID_CNFGROUPADDRESSES,
2944 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
2945 &mclist);
2946 if (err)
2947 printk(KERN_ERR "%s: Error %d setting multicast list.\n",
2948 dev->name, err);
2949 else
2950 priv->mc_count = mc_count;
2951 }
2952}
2953
2954/* This must be called from user context, without locks held - use
2955 * schedule_work() */
2956static void orinoco_reset(struct work_struct *work)
2957{
2958 struct orinoco_private *priv =
2959 container_of(work, struct orinoco_private, reset_work);
2960 struct net_device *dev = priv->ndev;
2961 struct hermes *hw = &priv->hw;
2962 int err;
2963 unsigned long flags;
2964
2965 if (orinoco_lock(priv, &flags) != 0)
2966 /* When the hardware becomes available again, whatever
2967 * detects that is responsible for re-initializing
2968 * it. So no need for anything further */
2969 return;
2970
2971 netif_stop_queue(dev);
2972
2973 /* Shut off interrupts. Depending on what state the hardware
2974 * is in, this might not work, but we'll try anyway */
2975 hermes_set_irqmask(hw, 0);
2976 hermes_write_regn(hw, EVACK, 0xffff);
2977
2978 priv->hw_unavailable++;
2979 priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
2980 netif_carrier_off(dev);
2981
2982 orinoco_unlock(priv, &flags);
2983
2984 /* Scanning support: Cleanup of driver struct */
2985 orinoco_clear_scan_results(priv, 0);
2986 priv->scan_inprogress = 0;
2987
2988 if (priv->hard_reset) {
2989 err = (*priv->hard_reset)(priv);
2990 if (err) {
2991 printk(KERN_ERR "%s: orinoco_reset: Error %d "
2992 "performing hard reset\n", dev->name, err);
2993 goto disable;
2994 }
2995 }
2996
2997 err = orinoco_reinit_firmware(dev);
2998 if (err) {
2999 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
3000 dev->name, err);
3001 goto disable;
3002 }
3003
3004 spin_lock_irq(&priv->lock); /* This has to be called from user context */
3005
3006 priv->hw_unavailable--;
3007
3008 /* priv->open or priv->hw_unavailable might have changed while
3009 * we dropped the lock */
3010 if (priv->open && (! priv->hw_unavailable)) {
3011 err = __orinoco_up(dev);
3012 if (err) {
3013 printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
3014 dev->name, err);
3015 } else
3016 dev->trans_start = jiffies;
3017 }
3018
3019 spin_unlock_irq(&priv->lock);
3020
3021 return;
3022 disable:
3023 hermes_set_irqmask(hw, 0);
3024 netif_device_detach(dev);
3025 printk(KERN_ERR "%s: Device has been disabled!\n", dev->name);
3026}
3027
3028/********************************************************************/
3029/* Interrupt handler */
3030/********************************************************************/
3031
3032static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
3033{
3034 printk(KERN_DEBUG "%s: TICK\n", dev->name);
3035}
3036
3037static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
3038{
3039 /* This seems to happen a fair bit under load, but ignoring it
3040 seems to work fine...*/
3041 printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
3042 dev->name);
3043}
3044
3045irqreturn_t orinoco_interrupt(int irq, void *dev_id)
3046{
3047 struct net_device *dev = dev_id;
3048 struct orinoco_private *priv = netdev_priv(dev);
3049 hermes_t *hw = &priv->hw;
3050 int count = MAX_IRQLOOPS_PER_IRQ;
3051 u16 evstat, events;
3052 /* These are used to detect a runaway interrupt situation */
3053 /* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
3054 * we panic and shut down the hardware */
3055 static int last_irq_jiffy = 0; /* jiffies value the last time
3056 * we were called */
3057 static int loops_this_jiffy = 0;
3058 unsigned long flags;
3059
3060 if (orinoco_lock(priv, &flags) != 0) {
3061 /* If hw is unavailable - we don't know if the irq was
3062 * for us or not */
3063 return IRQ_HANDLED;
3064 }
3065
3066 evstat = hermes_read_regn(hw, EVSTAT);
3067 events = evstat & hw->inten;
3068 if (! events) {
3069 orinoco_unlock(priv, &flags);
3070 return IRQ_NONE;
3071 }
3072
3073 if (jiffies != last_irq_jiffy)
3074 loops_this_jiffy = 0;
3075 last_irq_jiffy = jiffies;
3076
3077 while (events && count--) {
3078 if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
3079 printk(KERN_WARNING "%s: IRQ handler is looping too "
3080 "much! Resetting.\n", dev->name);
3081 /* Disable interrupts for now */
3082 hermes_set_irqmask(hw, 0);
3083 schedule_work(&priv->reset_work);
3084 break;
3085 }
3086
3087 /* Check the card hasn't been removed */
3088 if (! hermes_present(hw)) {
3089 DEBUG(0, "orinoco_interrupt(): card removed\n");
3090 break;
3091 }
3092
3093 if (events & HERMES_EV_TICK)
3094 __orinoco_ev_tick(dev, hw);
3095 if (events & HERMES_EV_WTERR)
3096 __orinoco_ev_wterr(dev, hw);
3097 if (events & HERMES_EV_INFDROP)
3098 __orinoco_ev_infdrop(dev, hw);
3099 if (events & HERMES_EV_INFO)
3100 __orinoco_ev_info(dev, hw);
3101 if (events & HERMES_EV_RX)
3102 __orinoco_ev_rx(dev, hw);
3103 if (events & HERMES_EV_TXEXC)
3104 __orinoco_ev_txexc(dev, hw);
3105 if (events & HERMES_EV_TX)
3106 __orinoco_ev_tx(dev, hw);
3107 if (events & HERMES_EV_ALLOC)
3108 __orinoco_ev_alloc(dev, hw);
3109
3110 hermes_write_regn(hw, EVACK, evstat);
3111
3112 evstat = hermes_read_regn(hw, EVSTAT);
3113 events = evstat & hw->inten;
3114 };
3115
3116 orinoco_unlock(priv, &flags);
3117 return IRQ_HANDLED;
3118}
3119
3120/********************************************************************/
3121/* Power management */
3122/********************************************************************/
3123#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT)
3124static int orinoco_pm_notifier(struct notifier_block *notifier,
3125 unsigned long pm_event,
3126 void *unused)
3127{
3128 struct orinoco_private *priv = container_of(notifier,
3129 struct orinoco_private,
3130 pm_notifier);
3131
3132 /* All we need to do is cache the firmware before suspend, and
3133 * release it when we come out.
3134 *
3135 * Only need to do this if we're downloading firmware. */
3136 if (!priv->do_fw_download)
3137 return NOTIFY_DONE;
3138
3139 switch (pm_event) {
3140 case PM_HIBERNATION_PREPARE:
3141 case PM_SUSPEND_PREPARE:
3142 orinoco_cache_fw(priv, 0);
3143 break;
3144
3145 case PM_POST_RESTORE:
3146 /* Restore from hibernation failed. We need to clean
3147 * up in exactly the same way, so fall through. */
3148 case PM_POST_HIBERNATION:
3149 case PM_POST_SUSPEND:
3150 orinoco_uncache_fw(priv);
3151 break;
3152
3153 case PM_RESTORE_PREPARE:
3154 default:
3155 break;
3156 }
3157
3158 return NOTIFY_DONE;
3159}
3160#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
3161#define orinoco_pm_notifier NULL
3162#endif
3163
3164/********************************************************************/
3165/* Initialization */
3166/********************************************************************/
3167
3168struct comp_id {
3169 u16 id, variant, major, minor;
3170} __attribute__ ((packed));
3171
3172static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
3173{
3174 if (nic_id->id < 0x8000)
3175 return FIRMWARE_TYPE_AGERE;
3176 else if (nic_id->id == 0x8000 && nic_id->major == 0)
3177 return FIRMWARE_TYPE_SYMBOL;
3178 else
3179 return FIRMWARE_TYPE_INTERSIL;
3180}
3181
3182/* Set priv->firmware type, determine firmware properties */
3183static int determine_firmware(struct net_device *dev)
3184{
3185 struct orinoco_private *priv = netdev_priv(dev);
3186 hermes_t *hw = &priv->hw;
3187 int err;
3188 struct comp_id nic_id, sta_id;
3189 unsigned int firmver;
3190 char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
3191
3192 /* Get the hardware version */
3193 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
3194 if (err) {
3195 printk(KERN_ERR "%s: Cannot read hardware identity: error %d\n",
3196 dev->name, err);
3197 return err;
3198 }
3199
3200 le16_to_cpus(&nic_id.id);
3201 le16_to_cpus(&nic_id.variant);
3202 le16_to_cpus(&nic_id.major);
3203 le16_to_cpus(&nic_id.minor);
3204 printk(KERN_DEBUG "%s: Hardware identity %04x:%04x:%04x:%04x\n",
3205 dev->name, nic_id.id, nic_id.variant,
3206 nic_id.major, nic_id.minor);
3207
3208 priv->firmware_type = determine_firmware_type(&nic_id);
3209
3210 /* Get the firmware version */
3211 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
3212 if (err) {
3213 printk(KERN_ERR "%s: Cannot read station identity: error %d\n",
3214 dev->name, err);
3215 return err;
3216 }
3217
3218 le16_to_cpus(&sta_id.id);
3219 le16_to_cpus(&sta_id.variant);
3220 le16_to_cpus(&sta_id.major);
3221 le16_to_cpus(&sta_id.minor);
3222 printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
3223 dev->name, sta_id.id, sta_id.variant,
3224 sta_id.major, sta_id.minor);
3225
3226 switch (sta_id.id) {
3227 case 0x15:
3228 printk(KERN_ERR "%s: Primary firmware is active\n",
3229 dev->name);
3230 return -ENODEV;
3231 case 0x14b:
3232 printk(KERN_ERR "%s: Tertiary firmware is active\n",
3233 dev->name);
3234 return -ENODEV;
3235 case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */
3236 case 0x21: /* Symbol Spectrum24 Trilogy */
3237 break;
3238 default:
3239 printk(KERN_NOTICE "%s: Unknown station ID, please report\n",
3240 dev->name);
3241 break;
3242 }
3243
3244 /* Default capabilities */
3245 priv->has_sensitivity = 1;
3246 priv->has_mwo = 0;
3247 priv->has_preamble = 0;
3248 priv->has_port3 = 1;
3249 priv->has_ibss = 1;
3250 priv->has_wep = 0;
3251 priv->has_big_wep = 0;
3252 priv->has_alt_txcntl = 0;
3253 priv->has_ext_scan = 0;
3254 priv->has_wpa = 0;
3255 priv->do_fw_download = 0;
3256
3257 /* Determine capabilities from the firmware version */
3258 switch (priv->firmware_type) {
3259 case FIRMWARE_TYPE_AGERE:
3260 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
3261 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
3262 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
3263 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor);
3264
3265 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
3266
3267 priv->has_ibss = (firmver >= 0x60006);
3268 priv->has_wep = (firmver >= 0x40020);
3269 priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
3270 Gold cards from the others? */
3271 priv->has_mwo = (firmver >= 0x60000);
3272 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
3273 priv->ibss_port = 1;
3274 priv->has_hostscan = (firmver >= 0x8000a);
3275 priv->do_fw_download = 1;
3276 priv->broken_monitor = (firmver >= 0x80000);
3277 priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
3278 priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
3279 priv->has_wpa = (firmver >= 0x9002a);
3280 /* Tested with Agere firmware :
3281 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
3282 * Tested CableTron firmware : 4.32 => Anton */
3283 break;
3284 case FIRMWARE_TYPE_SYMBOL:
3285 /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
3286 /* Intel MAC : 00:02:B3:* */
3287 /* 3Com MAC : 00:50:DA:* */
3288 memset(tmp, 0, sizeof(tmp));
3289 /* Get the Symbol firmware version */
3290 err = hermes_read_ltv(hw, USER_BAP,
3291 HERMES_RID_SECONDARYVERSION_SYMBOL,
3292 SYMBOL_MAX_VER_LEN, NULL, &tmp);
3293 if (err) {
3294 printk(KERN_WARNING
3295 "%s: Error %d reading Symbol firmware info. Wildly guessing capabilities...\n",
3296 dev->name, err);
3297 firmver = 0;
3298 tmp[0] = '\0';
3299 } else {
3300 /* The firmware revision is a string, the format is
3301 * something like : "V2.20-01".
3302 * Quick and dirty parsing... - Jean II
3303 */
3304 firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12)
3305 | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4)
3306 | (tmp[7] - '0');
3307
3308 tmp[SYMBOL_MAX_VER_LEN] = '\0';
3309 }
3310
3311 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
3312 "Symbol %s", tmp);
3313
3314 priv->has_ibss = (firmver >= 0x20000);
3315 priv->has_wep = (firmver >= 0x15012);
3316 priv->has_big_wep = (firmver >= 0x20000);
3317 priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) ||
3318 (firmver >= 0x29000 && firmver < 0x30000) ||
3319 firmver >= 0x31000;
3320 priv->has_preamble = (firmver >= 0x20000);
3321 priv->ibss_port = 4;
3322
3323 /* Symbol firmware is found on various cards, but
3324 * there has been no attempt to check firmware
3325 * download on non-spectrum_cs based cards.
3326 *
3327 * Given that the Agere firmware download works
3328 * differently, we should avoid doing a firmware
3329 * download with the Symbol algorithm on non-spectrum
3330 * cards.
3331 *
3332 * For now we can identify a spectrum_cs based card
3333 * because it has a firmware reset function.
3334 */
3335 priv->do_fw_download = (priv->stop_fw != NULL);
3336
3337 priv->broken_disableport = (firmver == 0x25013) ||
3338 (firmver >= 0x30000 && firmver <= 0x31000);
3339 priv->has_hostscan = (firmver >= 0x31001) ||
3340 (firmver >= 0x29057 && firmver < 0x30000);
3341 /* Tested with Intel firmware : 0x20015 => Jean II */
3342 /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
3343 break;
3344 case FIRMWARE_TYPE_INTERSIL:
3345 /* D-Link, Linksys, Adtron, ZoomAir, and many others...
3346 * Samsung, Compaq 100/200 and Proxim are slightly
3347 * different and less well tested */
3348 /* D-Link MAC : 00:40:05:* */
3349 /* Addtron MAC : 00:90:D1:* */
3350 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
3351 "Intersil %d.%d.%d", sta_id.major, sta_id.minor,
3352 sta_id.variant);
3353
3354 firmver = ((unsigned long)sta_id.major << 16) |
3355 ((unsigned long)sta_id.minor << 8) | sta_id.variant;
3356
3357 priv->has_ibss = (firmver >= 0x000700); /* FIXME */
3358 priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
3359 priv->has_pm = (firmver >= 0x000700);
3360 priv->has_hostscan = (firmver >= 0x010301);
3361
3362 if (firmver >= 0x000800)
3363 priv->ibss_port = 0;
3364 else {
3365 printk(KERN_NOTICE "%s: Intersil firmware earlier "
3366 "than v0.8.x - several features not supported\n",
3367 dev->name);
3368 priv->ibss_port = 1;
3369 }
3370 break;
3371 }
3372 printk(KERN_DEBUG "%s: Firmware determined as %s\n", dev->name,
3373 priv->fw_name);
3374
3375 return 0;
3376}
3377
3378static int orinoco_init(struct net_device *dev)
3379{
3380 struct orinoco_private *priv = netdev_priv(dev);
3381 hermes_t *hw = &priv->hw;
3382 int err = 0;
3383 struct hermes_idstring nickbuf;
3384 u16 reclen;
3385 int len;
3386
3387 /* No need to lock, the hw_unavailable flag is already set in
3388 * alloc_orinocodev() */
3389 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
3390
3391 /* Initialize the firmware */
3392 err = hermes_init(hw);
3393 if (err != 0) {
3394 printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
3395 dev->name, err);
3396 goto out;
3397 }
3398
3399 err = determine_firmware(dev);
3400 if (err != 0) {
3401 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
3402 dev->name);
3403 goto out;
3404 }
3405
3406 if (priv->do_fw_download) {
3407#ifdef CONFIG_HERMES_CACHE_FW_ON_INIT
3408 orinoco_cache_fw(priv, 0);
3409#endif
3410
3411 err = orinoco_download(priv);
3412 if (err)
3413 priv->do_fw_download = 0;
3414
3415 /* Check firmware version again */
3416 err = determine_firmware(dev);
3417 if (err != 0) {
3418 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
3419 dev->name);
3420 goto out;
3421 }
3422 }
3423
3424 if (priv->has_port3)
3425 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
3426 if (priv->has_ibss)
3427 printk(KERN_DEBUG "%s: IEEE standard IBSS ad-hoc mode supported\n",
3428 dev->name);
3429 if (priv->has_wep) {
3430 printk(KERN_DEBUG "%s: WEP supported, ", dev->name);
3431 if (priv->has_big_wep)
3432 printk("104-bit key\n");
3433 else
3434 printk("40-bit key\n");
3435 }
3436 if (priv->has_wpa) {
3437 printk(KERN_DEBUG "%s: WPA-PSK supported\n", dev->name);
3438 if (orinoco_mic_init(priv)) {
3439 printk(KERN_ERR "%s: Failed to setup MIC crypto "
3440 "algorithm. Disabling WPA support\n", dev->name);
3441 priv->has_wpa = 0;
3442 }
3443 }
3444
3445 /* Now we have the firmware capabilities, allocate appropiate
3446 * sized scan buffers */
3447 if (orinoco_bss_data_allocate(priv))
3448 goto out;
3449 orinoco_bss_data_init(priv);
3450
3451 /* Get the MAC address */
3452 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
3453 ETH_ALEN, NULL, dev->dev_addr);
3454 if (err) {
3455 printk(KERN_WARNING "%s: failed to read MAC address!\n",
3456 dev->name);
3457 goto out;
3458 }
3459
3460 printk(KERN_DEBUG "%s: MAC address %pM\n",
3461 dev->name, dev->dev_addr);
3462
3463 /* Get the station name */
3464 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
3465 sizeof(nickbuf), &reclen, &nickbuf);
3466 if (err) {
3467 printk(KERN_ERR "%s: failed to read station name\n",
3468 dev->name);
3469 goto out;
3470 }
3471 if (nickbuf.len)
3472 len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
3473 else
3474 len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
3475 memcpy(priv->nick, &nickbuf.val, len);
3476 priv->nick[len] = '\0';
3477
3478 printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
3479
3480 err = orinoco_allocate_fid(dev);
3481 if (err) {
3482 printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
3483 dev->name);
3484 goto out;
3485 }
3486
3487 /* Get allowed channels */
3488 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
3489 &priv->channel_mask);
3490 if (err) {
3491 printk(KERN_ERR "%s: failed to read channel list!\n",
3492 dev->name);
3493 goto out;
3494 }
3495
3496 /* Get initial AP density */
3497 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
3498 &priv->ap_density);
3499 if (err || priv->ap_density < 1 || priv->ap_density > 3) {
3500 priv->has_sensitivity = 0;
3501 }
3502
3503 /* Get initial RTS threshold */
3504 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
3505 &priv->rts_thresh);
3506 if (err) {
3507 printk(KERN_ERR "%s: failed to read RTS threshold!\n",
3508 dev->name);
3509 goto out;
3510 }
3511
3512 /* Get initial fragmentation settings */
3513 if (priv->has_mwo)
3514 err = hermes_read_wordrec(hw, USER_BAP,
3515 HERMES_RID_CNFMWOROBUST_AGERE,
3516 &priv->mwo_robust);
3517 else
3518 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
3519 &priv->frag_thresh);
3520 if (err) {
3521 printk(KERN_ERR "%s: failed to read fragmentation settings!\n",
3522 dev->name);
3523 goto out;
3524 }
3525
3526 /* Power management setup */
3527 if (priv->has_pm) {
3528 priv->pm_on = 0;
3529 priv->pm_mcast = 1;
3530 err = hermes_read_wordrec(hw, USER_BAP,
3531 HERMES_RID_CNFMAXSLEEPDURATION,
3532 &priv->pm_period);
3533 if (err) {
3534 printk(KERN_ERR "%s: failed to read power management period!\n",
3535 dev->name);
3536 goto out;
3537 }
3538 err = hermes_read_wordrec(hw, USER_BAP,
3539 HERMES_RID_CNFPMHOLDOVERDURATION,
3540 &priv->pm_timeout);
3541 if (err) {
3542 printk(KERN_ERR "%s: failed to read power management timeout!\n",
3543 dev->name);
3544 goto out;
3545 }
3546 }
3547
3548 /* Preamble setup */
3549 if (priv->has_preamble) {
3550 err = hermes_read_wordrec(hw, USER_BAP,
3551 HERMES_RID_CNFPREAMBLE_SYMBOL,
3552 &priv->preamble);
3553 if (err)
3554 goto out;
3555 }
3556
3557 /* Set up the default configuration */
3558 priv->iw_mode = IW_MODE_INFRA;
3559 /* By default use IEEE/IBSS ad-hoc mode if we have it */
3560 priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss);
3561 set_port_type(priv);
3562 priv->channel = 0; /* use firmware default */
3563
3564 priv->promiscuous = 0;
3565 priv->encode_alg = IW_ENCODE_ALG_NONE;
3566 priv->tx_key = 0;
3567 priv->wpa_enabled = 0;
3568 priv->tkip_cm_active = 0;
3569 priv->key_mgmt = 0;
3570 priv->wpa_ie_len = 0;
3571 priv->wpa_ie = NULL;
3572
3573 /* Make the hardware available, as long as it hasn't been
3574 * removed elsewhere (e.g. by PCMCIA hot unplug) */
3575 spin_lock_irq(&priv->lock);
3576 priv->hw_unavailable--;
3577 spin_unlock_irq(&priv->lock);
3578
3579 printk(KERN_DEBUG "%s: ready\n", dev->name);
3580
3581 out:
3582 return err;
3583}
3584
3585struct net_device
3586*alloc_orinocodev(int sizeof_card,
3587 struct device *device,
3588 int (*hard_reset)(struct orinoco_private *),
3589 int (*stop_fw)(struct orinoco_private *, int))
3590{
3591 struct net_device *dev;
3592 struct orinoco_private *priv;
3593
3594 dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
3595 if (! dev)
3596 return NULL;
3597 priv = netdev_priv(dev);
3598 priv->ndev = dev;
3599 if (sizeof_card)
3600 priv->card = (void *)((unsigned long)priv
3601 + sizeof(struct orinoco_private));
3602 else
3603 priv->card = NULL;
3604 priv->dev = device;
3605
3606 /* Setup / override net_device fields */
3607 dev->init = orinoco_init;
3608 dev->hard_start_xmit = orinoco_xmit;
3609 dev->tx_timeout = orinoco_tx_timeout;
3610 dev->watchdog_timeo = HZ; /* 1 second timeout */
3611 dev->get_stats = orinoco_get_stats;
3612 dev->ethtool_ops = &orinoco_ethtool_ops;
3613 dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
3614#ifdef WIRELESS_SPY
3615 priv->wireless_data.spy_data = &priv->spy_data;
3616 dev->wireless_data = &priv->wireless_data;
3617#endif
3618 dev->change_mtu = orinoco_change_mtu;
3619 dev->set_multicast_list = orinoco_set_multicast_list;
3620 /* we use the default eth_mac_addr for setting the MAC addr */
3621
3622 /* Reserve space in skb for the SNAP header */
3623 dev->hard_header_len += ENCAPS_OVERHEAD;
3624
3625 /* Set up default callbacks */
3626 dev->open = orinoco_open;
3627 dev->stop = orinoco_stop;
3628 priv->hard_reset = hard_reset;
3629 priv->stop_fw = stop_fw;
3630
3631 spin_lock_init(&priv->lock);
3632 priv->open = 0;
3633 priv->hw_unavailable = 1; /* orinoco_init() must clear this
3634 * before anything else touches the
3635 * hardware */
3636 INIT_WORK(&priv->reset_work, orinoco_reset);
3637 INIT_WORK(&priv->join_work, orinoco_join_ap);
3638 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
3639
3640 INIT_LIST_HEAD(&priv->rx_list);
3641 tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
3642 (unsigned long) dev);
3643
3644 netif_carrier_off(dev);
3645 priv->last_linkstatus = 0xffff;
3646
3647 priv->cached_pri_fw = NULL;
3648 priv->cached_fw = NULL;
3649
3650 /* Register PM notifiers */
3651 priv->pm_notifier.notifier_call = orinoco_pm_notifier;
3652 register_pm_notifier(&priv->pm_notifier);
3653
3654 return dev;
3655}
3656
3657void free_orinocodev(struct net_device *dev)
3658{
3659 struct orinoco_private *priv = netdev_priv(dev);
3660 struct orinoco_rx_data *rx_data, *temp;
3661
3662 /* If the tasklet is scheduled when we call tasklet_kill it
3663 * will run one final time. However the tasklet will only
3664 * drain priv->rx_list if the hw is still available. */
3665 tasklet_kill(&priv->rx_tasklet);
3666
3667 /* Explicitly drain priv->rx_list */
3668 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
3669 list_del(&rx_data->list);
3670
3671 dev_kfree_skb(rx_data->skb);
3672 kfree(rx_data->desc);
3673 kfree(rx_data);
3674 }
3675
3676 unregister_pm_notifier(&priv->pm_notifier);
3677 orinoco_uncache_fw(priv);
3678
3679 priv->wpa_ie_len = 0;
3680 kfree(priv->wpa_ie);
3681 orinoco_mic_free(priv);
3682 orinoco_bss_data_free(priv);
3683 free_netdev(dev);
3684}
3685
3686/********************************************************************/
3687/* Wireless extensions */
3688/********************************************************************/
3689
3690/* Return : < 0 -> error code ; >= 0 -> length */
3691static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
3692 char buf[IW_ESSID_MAX_SIZE+1])
3693{
3694 hermes_t *hw = &priv->hw;
3695 int err = 0;
3696 struct hermes_idstring essidbuf;
3697 char *p = (char *)(&essidbuf.val);
3698 int len;
3699 unsigned long flags;
3700
3701 if (orinoco_lock(priv, &flags) != 0)
3702 return -EBUSY;
3703
3704 if (strlen(priv->desired_essid) > 0) {
3705 /* We read the desired SSID from the hardware rather
3706 than from priv->desired_essid, just in case the
3707 firmware is allowed to change it on us. I'm not
3708 sure about this */
3709 /* My guess is that the OWNSSID should always be whatever
3710 * we set to the card, whereas CURRENT_SSID is the one that
3711 * may change... - Jean II */
3712 u16 rid;
3713
3714 *active = 1;
3715
3716 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
3717 HERMES_RID_CNFDESIREDSSID;
3718
3719 err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
3720 NULL, &essidbuf);
3721 if (err)
3722 goto fail_unlock;
3723 } else {
3724 *active = 0;
3725
3726 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
3727 sizeof(essidbuf), NULL, &essidbuf);
3728 if (err)
3729 goto fail_unlock;
3730 }
3731
3732 len = le16_to_cpu(essidbuf.len);
3733 BUG_ON(len > IW_ESSID_MAX_SIZE);
3734
3735 memset(buf, 0, IW_ESSID_MAX_SIZE);
3736 memcpy(buf, p, len);
3737 err = len;
3738
3739 fail_unlock:
3740 orinoco_unlock(priv, &flags);
3741
3742 return err;
3743}
3744
3745static long orinoco_hw_get_freq(struct orinoco_private *priv)
3746{
3747
3748 hermes_t *hw = &priv->hw;
3749 int err = 0;
3750 u16 channel;
3751 long freq = 0;
3752 unsigned long flags;
3753
3754 if (orinoco_lock(priv, &flags) != 0)
3755 return -EBUSY;
3756
3757 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
3758 if (err)
3759 goto out;
3760
3761 /* Intersil firmware 1.3.5 returns 0 when the interface is down */
3762 if (channel == 0) {
3763 err = -EBUSY;
3764 goto out;
3765 }
3766
3767 if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
3768 printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
3769 priv->ndev->name, channel);
3770 err = -EBUSY;
3771 goto out;
3772
3773 }
3774 freq = channel_frequency[channel-1] * 100000;
3775
3776 out:
3777 orinoco_unlock(priv, &flags);
3778
3779 if (err > 0)
3780 err = -EBUSY;
3781 return err ? err : freq;
3782}
3783
3784static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
3785 int *numrates, s32 *rates, int max)
3786{
3787 hermes_t *hw = &priv->hw;
3788 struct hermes_idstring list;
3789 unsigned char *p = (unsigned char *)&list.val;
3790 int err = 0;
3791 int num;
3792 int i;
3793 unsigned long flags;
3794
3795 if (orinoco_lock(priv, &flags) != 0)
3796 return -EBUSY;
3797
3798 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
3799 sizeof(list), NULL, &list);
3800 orinoco_unlock(priv, &flags);
3801
3802 if (err)
3803 return err;
3804
3805 num = le16_to_cpu(list.len);
3806 *numrates = num;
3807 num = min(num, max);
3808
3809 for (i = 0; i < num; i++) {
3810 rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
3811 }
3812
3813 return 0;
3814}
3815
3816static int orinoco_ioctl_getname(struct net_device *dev,
3817 struct iw_request_info *info,
3818 char *name,
3819 char *extra)
3820{
3821 struct orinoco_private *priv = netdev_priv(dev);
3822 int numrates;
3823 int err;
3824
3825 err = orinoco_hw_get_bitratelist(priv, &numrates, NULL, 0);
3826
3827 if (!err && (numrates > 2))
3828 strcpy(name, "IEEE 802.11b");
3829 else
3830 strcpy(name, "IEEE 802.11-DS");
3831
3832 return 0;
3833}
3834
3835static int orinoco_ioctl_setwap(struct net_device *dev,
3836 struct iw_request_info *info,
3837 struct sockaddr *ap_addr,
3838 char *extra)
3839{
3840 struct orinoco_private *priv = netdev_priv(dev);
3841 int err = -EINPROGRESS; /* Call commit handler */
3842 unsigned long flags;
3843 static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
3844 static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3845
3846 if (orinoco_lock(priv, &flags) != 0)
3847 return -EBUSY;
3848
3849 /* Enable automatic roaming - no sanity checks are needed */
3850 if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 ||
3851 memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) {
3852 priv->bssid_fixed = 0;
3853 memset(priv->desired_bssid, 0, ETH_ALEN);
3854
3855 /* "off" means keep existing connection */
3856 if (ap_addr->sa_data[0] == 0) {
3857 __orinoco_hw_set_wap(priv);
3858 err = 0;
3859 }
3860 goto out;
3861 }
3862
3863 if (priv->firmware_type == FIRMWARE_TYPE_AGERE) {
3864 printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't "
3865 "support manual roaming\n",
3866 dev->name);
3867 err = -EOPNOTSUPP;
3868 goto out;
3869 }
3870
3871 if (priv->iw_mode != IW_MODE_INFRA) {
3872 printk(KERN_WARNING "%s: Manual roaming supported only in "
3873 "managed mode\n", dev->name);
3874 err = -EOPNOTSUPP;
3875 goto out;
3876 }
3877
3878 /* Intersil firmware hangs without Desired ESSID */
3879 if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL &&
3880 strlen(priv->desired_essid) == 0) {
3881 printk(KERN_WARNING "%s: Desired ESSID must be set for "
3882 "manual roaming\n", dev->name);
3883 err = -EOPNOTSUPP;
3884 goto out;
3885 }
3886
3887 /* Finally, enable manual roaming */
3888 priv->bssid_fixed = 1;
3889 memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN);
3890
3891 out:
3892 orinoco_unlock(priv, &flags);
3893 return err;
3894}
3895
3896static int orinoco_ioctl_getwap(struct net_device *dev,
3897 struct iw_request_info *info,
3898 struct sockaddr *ap_addr,
3899 char *extra)
3900{
3901 struct orinoco_private *priv = netdev_priv(dev);
3902
3903 hermes_t *hw = &priv->hw;
3904 int err = 0;
3905 unsigned long flags;
3906
3907 if (orinoco_lock(priv, &flags) != 0)
3908 return -EBUSY;
3909
3910 ap_addr->sa_family = ARPHRD_ETHER;
3911 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
3912 ETH_ALEN, NULL, ap_addr->sa_data);
3913
3914 orinoco_unlock(priv, &flags);
3915
3916 return err;
3917}
3918
3919static int orinoco_ioctl_setmode(struct net_device *dev,
3920 struct iw_request_info *info,
3921 u32 *mode,
3922 char *extra)
3923{
3924 struct orinoco_private *priv = netdev_priv(dev);
3925 int err = -EINPROGRESS; /* Call commit handler */
3926 unsigned long flags;
3927
3928 if (priv->iw_mode == *mode)
3929 return 0;
3930
3931 if (orinoco_lock(priv, &flags) != 0)
3932 return -EBUSY;
3933
3934 switch (*mode) {
3935 case IW_MODE_ADHOC:
3936 if (!priv->has_ibss && !priv->has_port3)
3937 err = -EOPNOTSUPP;
3938 break;
3939
3940 case IW_MODE_INFRA:
3941 break;
3942
3943 case IW_MODE_MONITOR:
3944 if (priv->broken_monitor && !force_monitor) {
3945 printk(KERN_WARNING "%s: Monitor mode support is "
3946 "buggy in this firmware, not enabling\n",
3947 dev->name);
3948 err = -EOPNOTSUPP;
3949 }
3950 break;
3951
3952 default:
3953 err = -EOPNOTSUPP;
3954 break;
3955 }
3956
3957 if (err == -EINPROGRESS) {
3958 priv->iw_mode = *mode;
3959 set_port_type(priv);
3960 }
3961
3962 orinoco_unlock(priv, &flags);
3963
3964 return err;
3965}
3966
3967static int orinoco_ioctl_getmode(struct net_device *dev,
3968 struct iw_request_info *info,
3969 u32 *mode,
3970 char *extra)
3971{
3972 struct orinoco_private *priv = netdev_priv(dev);
3973
3974 *mode = priv->iw_mode;
3975 return 0;
3976}
3977
3978static int orinoco_ioctl_getiwrange(struct net_device *dev,
3979 struct iw_request_info *info,
3980 struct iw_point *rrq,
3981 char *extra)
3982{
3983 struct orinoco_private *priv = netdev_priv(dev);
3984 int err = 0;
3985 struct iw_range *range = (struct iw_range *) extra;
3986 int numrates;
3987 int i, k;
3988
3989 rrq->length = sizeof(struct iw_range);
3990 memset(range, 0, sizeof(struct iw_range));
3991
3992 range->we_version_compiled = WIRELESS_EXT;
3993 range->we_version_source = 22;
3994
3995 /* Set available channels/frequencies */
3996 range->num_channels = NUM_CHANNELS;
3997 k = 0;
3998 for (i = 0; i < NUM_CHANNELS; i++) {
3999 if (priv->channel_mask & (1 << i)) {
4000 range->freq[k].i = i + 1;
4001 range->freq[k].m = channel_frequency[i] * 100000;
4002 range->freq[k].e = 1;
4003 k++;
4004 }
4005
4006 if (k >= IW_MAX_FREQUENCIES)
4007 break;
4008 }
4009 range->num_frequency = k;
4010 range->sensitivity = 3;
4011
4012 if (priv->has_wep) {
4013 range->max_encoding_tokens = ORINOCO_MAX_KEYS;
4014 range->encoding_size[0] = SMALL_KEY_SIZE;
4015 range->num_encoding_sizes = 1;
4016
4017 if (priv->has_big_wep) {
4018 range->encoding_size[1] = LARGE_KEY_SIZE;
4019 range->num_encoding_sizes = 2;
4020 }
4021 }
4022
4023 if (priv->has_wpa)
4024 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
4025
4026 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
4027 /* Quality stats meaningless in ad-hoc mode */
4028 } else {
4029 range->max_qual.qual = 0x8b - 0x2f;
4030 range->max_qual.level = 0x2f - 0x95 - 1;
4031 range->max_qual.noise = 0x2f - 0x95 - 1;
4032 /* Need to get better values */
4033 range->avg_qual.qual = 0x24;
4034 range->avg_qual.level = 0xC2;
4035 range->avg_qual.noise = 0x9E;
4036 }
4037
4038 err = orinoco_hw_get_bitratelist(priv, &numrates,
4039 range->bitrate, IW_MAX_BITRATES);
4040 if (err)
4041 return err;
4042 range->num_bitrates = numrates;
4043
4044 /* Set an indication of the max TCP throughput in bit/s that we can
4045 * expect using this interface. May be use for QoS stuff...
4046 * Jean II */
4047 if (numrates > 2)
4048 range->throughput = 5 * 1000 * 1000; /* ~5 Mb/s */
4049 else
4050 range->throughput = 1.5 * 1000 * 1000; /* ~1.5 Mb/s */
4051
4052 range->min_rts = 0;
4053 range->max_rts = 2347;
4054 range->min_frag = 256;
4055 range->max_frag = 2346;
4056
4057 range->min_pmp = 0;
4058 range->max_pmp = 65535000;
4059 range->min_pmt = 0;
4060 range->max_pmt = 65535 * 1000; /* ??? */
4061 range->pmp_flags = IW_POWER_PERIOD;
4062 range->pmt_flags = IW_POWER_TIMEOUT;
4063 range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_UNICAST_R;
4064
4065 range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
4066 range->retry_flags = IW_RETRY_LIMIT;
4067 range->r_time_flags = IW_RETRY_LIFETIME;
4068 range->min_retry = 0;
4069 range->max_retry = 65535; /* ??? */
4070 range->min_r_time = 0;
4071 range->max_r_time = 65535 * 1000; /* ??? */
4072
4073 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
4074 range->scan_capa = IW_SCAN_CAPA_ESSID;
4075 else
4076 range->scan_capa = IW_SCAN_CAPA_NONE;
4077
4078 /* Event capability (kernel) */
4079 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
4080 /* Event capability (driver) */
4081 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
4082 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
4083 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
4084 IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
4085
4086 return 0;
4087}
4088
4089static int orinoco_ioctl_setiwencode(struct net_device *dev,
4090 struct iw_request_info *info,
4091 struct iw_point *erq,
4092 char *keybuf)
4093{
4094 struct orinoco_private *priv = netdev_priv(dev);
4095 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
4096 int setindex = priv->tx_key;
4097 int encode_alg = priv->encode_alg;
4098 int restricted = priv->wep_restrict;
4099 u16 xlen = 0;
4100 int err = -EINPROGRESS; /* Call commit handler */
4101 unsigned long flags;
4102
4103 if (! priv->has_wep)
4104 return -EOPNOTSUPP;
4105
4106 if (erq->pointer) {
4107 /* We actually have a key to set - check its length */
4108 if (erq->length > LARGE_KEY_SIZE)
4109 return -E2BIG;
4110
4111 if ( (erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep )
4112 return -E2BIG;
4113 }
4114
4115 if (orinoco_lock(priv, &flags) != 0)
4116 return -EBUSY;
4117
4118 /* Clear any TKIP key we have */
4119 if ((priv->has_wpa) && (priv->encode_alg == IW_ENCODE_ALG_TKIP))
4120 (void) orinoco_clear_tkip_key(priv, setindex);
4121
4122 if (erq->length > 0) {
4123 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
4124 index = priv->tx_key;
4125
4126 /* Adjust key length to a supported value */
4127 if (erq->length > SMALL_KEY_SIZE) {
4128 xlen = LARGE_KEY_SIZE;
4129 } else if (erq->length > 0) {
4130 xlen = SMALL_KEY_SIZE;
4131 } else
4132 xlen = 0;
4133
4134 /* Switch on WEP if off */
4135 if ((encode_alg != IW_ENCODE_ALG_WEP) && (xlen > 0)) {
4136 setindex = index;
4137 encode_alg = IW_ENCODE_ALG_WEP;
4138 }
4139 } else {
4140 /* Important note : if the user do "iwconfig eth0 enc off",
4141 * we will arrive there with an index of -1. This is valid
4142 * but need to be taken care off... Jean II */
4143 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
4144 if((index != -1) || (erq->flags == 0)) {
4145 err = -EINVAL;
4146 goto out;
4147 }
4148 } else {
4149 /* Set the index : Check that the key is valid */
4150 if(priv->keys[index].len == 0) {
4151 err = -EINVAL;
4152 goto out;
4153 }
4154 setindex = index;
4155 }
4156 }
4157
4158 if (erq->flags & IW_ENCODE_DISABLED)
4159 encode_alg = IW_ENCODE_ALG_NONE;
4160 if (erq->flags & IW_ENCODE_OPEN)
4161 restricted = 0;
4162 if (erq->flags & IW_ENCODE_RESTRICTED)
4163 restricted = 1;
4164
4165 if (erq->pointer && erq->length > 0) {
4166 priv->keys[index].len = cpu_to_le16(xlen);
4167 memset(priv->keys[index].data, 0,
4168 sizeof(priv->keys[index].data));
4169 memcpy(priv->keys[index].data, keybuf, erq->length);
4170 }
4171 priv->tx_key = setindex;
4172
4173 /* Try fast key change if connected and only keys are changed */
4174 if ((priv->encode_alg == encode_alg) &&
4175 (priv->wep_restrict == restricted) &&
4176 netif_carrier_ok(dev)) {
4177 err = __orinoco_hw_setup_wepkeys(priv);
4178 /* No need to commit if successful */
4179 goto out;
4180 }
4181
4182 priv->encode_alg = encode_alg;
4183 priv->wep_restrict = restricted;
4184
4185 out:
4186 orinoco_unlock(priv, &flags);
4187
4188 return err;
4189}
4190
4191static int orinoco_ioctl_getiwencode(struct net_device *dev,
4192 struct iw_request_info *info,
4193 struct iw_point *erq,
4194 char *keybuf)
4195{
4196 struct orinoco_private *priv = netdev_priv(dev);
4197 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
4198 u16 xlen = 0;
4199 unsigned long flags;
4200
4201 if (! priv->has_wep)
4202 return -EOPNOTSUPP;
4203
4204 if (orinoco_lock(priv, &flags) != 0)
4205 return -EBUSY;
4206
4207 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
4208 index = priv->tx_key;
4209
4210 erq->flags = 0;
4211 if (!priv->encode_alg)
4212 erq->flags |= IW_ENCODE_DISABLED;
4213 erq->flags |= index + 1;
4214
4215 if (priv->wep_restrict)
4216 erq->flags |= IW_ENCODE_RESTRICTED;
4217 else
4218 erq->flags |= IW_ENCODE_OPEN;
4219
4220 xlen = le16_to_cpu(priv->keys[index].len);
4221
4222 erq->length = xlen;
4223
4224 memcpy(keybuf, priv->keys[index].data, ORINOCO_MAX_KEY_SIZE);
4225
4226 orinoco_unlock(priv, &flags);
4227 return 0;
4228}
4229
4230static int orinoco_ioctl_setessid(struct net_device *dev,
4231 struct iw_request_info *info,
4232 struct iw_point *erq,
4233 char *essidbuf)
4234{
4235 struct orinoco_private *priv = netdev_priv(dev);
4236 unsigned long flags;
4237
4238 /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
4239 * anyway... - Jean II */
4240
4241 /* Hum... Should not use Wireless Extension constant (may change),
4242 * should use our own... - Jean II */
4243 if (erq->length > IW_ESSID_MAX_SIZE)
4244 return -E2BIG;
4245
4246 if (orinoco_lock(priv, &flags) != 0)
4247 return -EBUSY;
4248
4249 /* NULL the string (for NULL termination & ESSID = ANY) - Jean II */
4250 memset(priv->desired_essid, 0, sizeof(priv->desired_essid));
4251
4252 /* If not ANY, get the new ESSID */
4253 if (erq->flags) {
4254 memcpy(priv->desired_essid, essidbuf, erq->length);
4255 }
4256
4257 orinoco_unlock(priv, &flags);
4258
4259 return -EINPROGRESS; /* Call commit handler */
4260}
4261
4262static int orinoco_ioctl_getessid(struct net_device *dev,
4263 struct iw_request_info *info,
4264 struct iw_point *erq,
4265 char *essidbuf)
4266{
4267 struct orinoco_private *priv = netdev_priv(dev);
4268 int active;
4269 int err = 0;
4270 unsigned long flags;
4271
4272 if (netif_running(dev)) {
4273 err = orinoco_hw_get_essid(priv, &active, essidbuf);
4274 if (err < 0)
4275 return err;
4276 erq->length = err;
4277 } else {
4278 if (orinoco_lock(priv, &flags) != 0)
4279 return -EBUSY;
4280 memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE);
4281 erq->length = strlen(priv->desired_essid);
4282 orinoco_unlock(priv, &flags);
4283 }
4284
4285 erq->flags = 1;
4286
4287 return 0;
4288}
4289
4290static int orinoco_ioctl_setnick(struct net_device *dev,
4291 struct iw_request_info *info,
4292 struct iw_point *nrq,
4293 char *nickbuf)
4294{
4295 struct orinoco_private *priv = netdev_priv(dev);
4296 unsigned long flags;
4297
4298 if (nrq->length > IW_ESSID_MAX_SIZE)
4299 return -E2BIG;
4300
4301 if (orinoco_lock(priv, &flags) != 0)
4302 return -EBUSY;
4303
4304 memset(priv->nick, 0, sizeof(priv->nick));
4305 memcpy(priv->nick, nickbuf, nrq->length);
4306
4307 orinoco_unlock(priv, &flags);
4308
4309 return -EINPROGRESS; /* Call commit handler */
4310}
4311
4312static int orinoco_ioctl_getnick(struct net_device *dev,
4313 struct iw_request_info *info,
4314 struct iw_point *nrq,
4315 char *nickbuf)
4316{
4317 struct orinoco_private *priv = netdev_priv(dev);
4318 unsigned long flags;
4319
4320 if (orinoco_lock(priv, &flags) != 0)
4321 return -EBUSY;
4322
4323 memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE);
4324 orinoco_unlock(priv, &flags);
4325
4326 nrq->length = strlen(priv->nick);
4327
4328 return 0;
4329}
4330
4331static int orinoco_ioctl_setfreq(struct net_device *dev,
4332 struct iw_request_info *info,
4333 struct iw_freq *frq,
4334 char *extra)
4335{
4336 struct orinoco_private *priv = netdev_priv(dev);
4337 int chan = -1;
4338 unsigned long flags;
4339 int err = -EINPROGRESS; /* Call commit handler */
4340
4341 /* In infrastructure mode the AP sets the channel */
4342 if (priv->iw_mode == IW_MODE_INFRA)
4343 return -EBUSY;
4344
4345 if ( (frq->e == 0) && (frq->m <= 1000) ) {
4346 /* Setting by channel number */
4347 chan = frq->m;
4348 } else {
4349 /* Setting by frequency - search the table */
4350 int mult = 1;
4351 int i;
4352
4353 for (i = 0; i < (6 - frq->e); i++)
4354 mult *= 10;
4355
4356 for (i = 0; i < NUM_CHANNELS; i++)
4357 if (frq->m == (channel_frequency[i] * mult))
4358 chan = i+1;
4359 }
4360
4361 if ( (chan < 1) || (chan > NUM_CHANNELS) ||
4362 ! (priv->channel_mask & (1 << (chan-1)) ) )
4363 return -EINVAL;
4364
4365 if (orinoco_lock(priv, &flags) != 0)
4366 return -EBUSY;
4367
4368 priv->channel = chan;
4369 if (priv->iw_mode == IW_MODE_MONITOR) {
4370 /* Fast channel change - no commit if successful */
4371 hermes_t *hw = &priv->hw;
4372 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
4373 HERMES_TEST_SET_CHANNEL,
4374 chan, NULL);
4375 }
4376 orinoco_unlock(priv, &flags);
4377
4378 return err;
4379}
4380
4381static int orinoco_ioctl_getfreq(struct net_device *dev,
4382 struct iw_request_info *info,
4383 struct iw_freq *frq,
4384 char *extra)
4385{
4386 struct orinoco_private *priv = netdev_priv(dev);
4387 int tmp;
4388
4389 /* Locking done in there */
4390 tmp = orinoco_hw_get_freq(priv);
4391 if (tmp < 0) {
4392 return tmp;
4393 }
4394
4395 frq->m = tmp;
4396 frq->e = 1;
4397
4398 return 0;
4399}
4400
4401static int orinoco_ioctl_getsens(struct net_device *dev,
4402 struct iw_request_info *info,
4403 struct iw_param *srq,
4404 char *extra)
4405{
4406 struct orinoco_private *priv = netdev_priv(dev);
4407 hermes_t *hw = &priv->hw;
4408 u16 val;
4409 int err;
4410 unsigned long flags;
4411
4412 if (!priv->has_sensitivity)
4413 return -EOPNOTSUPP;
4414
4415 if (orinoco_lock(priv, &flags) != 0)
4416 return -EBUSY;
4417 err = hermes_read_wordrec(hw, USER_BAP,
4418 HERMES_RID_CNFSYSTEMSCALE, &val);
4419 orinoco_unlock(priv, &flags);
4420
4421 if (err)
4422 return err;
4423
4424 srq->value = val;
4425 srq->fixed = 0; /* auto */
4426
4427 return 0;
4428}
4429
4430static int orinoco_ioctl_setsens(struct net_device *dev,
4431 struct iw_request_info *info,
4432 struct iw_param *srq,
4433 char *extra)
4434{
4435 struct orinoco_private *priv = netdev_priv(dev);
4436 int val = srq->value;
4437 unsigned long flags;
4438
4439 if (!priv->has_sensitivity)
4440 return -EOPNOTSUPP;
4441
4442 if ((val < 1) || (val > 3))
4443 return -EINVAL;
4444
4445 if (orinoco_lock(priv, &flags) != 0)
4446 return -EBUSY;
4447 priv->ap_density = val;
4448 orinoco_unlock(priv, &flags);
4449
4450 return -EINPROGRESS; /* Call commit handler */
4451}
4452
4453static int orinoco_ioctl_setrts(struct net_device *dev,
4454 struct iw_request_info *info,
4455 struct iw_param *rrq,
4456 char *extra)
4457{
4458 struct orinoco_private *priv = netdev_priv(dev);
4459 int val = rrq->value;
4460 unsigned long flags;
4461
4462 if (rrq->disabled)
4463 val = 2347;
4464
4465 if ( (val < 0) || (val > 2347) )
4466 return -EINVAL;
4467
4468 if (orinoco_lock(priv, &flags) != 0)
4469 return -EBUSY;
4470
4471 priv->rts_thresh = val;
4472 orinoco_unlock(priv, &flags);
4473
4474 return -EINPROGRESS; /* Call commit handler */
4475}
4476
4477static int orinoco_ioctl_getrts(struct net_device *dev,
4478 struct iw_request_info *info,
4479 struct iw_param *rrq,
4480 char *extra)
4481{
4482 struct orinoco_private *priv = netdev_priv(dev);
4483
4484 rrq->value = priv->rts_thresh;
4485 rrq->disabled = (rrq->value == 2347);
4486 rrq->fixed = 1;
4487
4488 return 0;
4489}
4490
4491static int orinoco_ioctl_setfrag(struct net_device *dev,
4492 struct iw_request_info *info,
4493 struct iw_param *frq,
4494 char *extra)
4495{
4496 struct orinoco_private *priv = netdev_priv(dev);
4497 int err = -EINPROGRESS; /* Call commit handler */
4498 unsigned long flags;
4499
4500 if (orinoco_lock(priv, &flags) != 0)
4501 return -EBUSY;
4502
4503 if (priv->has_mwo) {
4504 if (frq->disabled)
4505 priv->mwo_robust = 0;
4506 else {
4507 if (frq->fixed)
4508 printk(KERN_WARNING "%s: Fixed fragmentation is "
4509 "not supported on this firmware. "
4510 "Using MWO robust instead.\n", dev->name);
4511 priv->mwo_robust = 1;
4512 }
4513 } else {
4514 if (frq->disabled)
4515 priv->frag_thresh = 2346;
4516 else {
4517 if ( (frq->value < 256) || (frq->value > 2346) )
4518 err = -EINVAL;
4519 else
4520 priv->frag_thresh = frq->value & ~0x1; /* must be even */
4521 }
4522 }
4523
4524 orinoco_unlock(priv, &flags);
4525
4526 return err;
4527}
4528
4529static int orinoco_ioctl_getfrag(struct net_device *dev,
4530 struct iw_request_info *info,
4531 struct iw_param *frq,
4532 char *extra)
4533{
4534 struct orinoco_private *priv = netdev_priv(dev);
4535 hermes_t *hw = &priv->hw;
4536 int err;
4537 u16 val;
4538 unsigned long flags;
4539
4540 if (orinoco_lock(priv, &flags) != 0)
4541 return -EBUSY;
4542
4543 if (priv->has_mwo) {
4544 err = hermes_read_wordrec(hw, USER_BAP,
4545 HERMES_RID_CNFMWOROBUST_AGERE,
4546 &val);
4547 if (err)
4548 val = 0;
4549
4550 frq->value = val ? 2347 : 0;
4551 frq->disabled = ! val;
4552 frq->fixed = 0;
4553 } else {
4554 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
4555 &val);
4556 if (err)
4557 val = 0;
4558
4559 frq->value = val;
4560 frq->disabled = (val >= 2346);
4561 frq->fixed = 1;
4562 }
4563
4564 orinoco_unlock(priv, &flags);
4565
4566 return err;
4567}
4568
4569static int orinoco_ioctl_setrate(struct net_device *dev,
4570 struct iw_request_info *info,
4571 struct iw_param *rrq,
4572 char *extra)
4573{
4574 struct orinoco_private *priv = netdev_priv(dev);
4575 int ratemode = -1;
4576 int bitrate; /* 100s of kilobits */
4577 int i;
4578 unsigned long flags;
4579
4580 /* As the user space doesn't know our highest rate, it uses -1
4581 * to ask us to set the highest rate. Test it using "iwconfig
4582 * ethX rate auto" - Jean II */
4583 if (rrq->value == -1)
4584 bitrate = 110;
4585 else {
4586 if (rrq->value % 100000)
4587 return -EINVAL;
4588 bitrate = rrq->value / 100000;
4589 }
4590
4591 if ( (bitrate != 10) && (bitrate != 20) &&
4592 (bitrate != 55) && (bitrate != 110) )
4593 return -EINVAL;
4594
4595 for (i = 0; i < BITRATE_TABLE_SIZE; i++)
4596 if ( (bitrate_table[i].bitrate == bitrate) &&
4597 (bitrate_table[i].automatic == ! rrq->fixed) ) {
4598 ratemode = i;
4599 break;
4600 }
4601
4602 if (ratemode == -1)
4603 return -EINVAL;
4604
4605 if (orinoco_lock(priv, &flags) != 0)
4606 return -EBUSY;
4607 priv->bitratemode = ratemode;
4608 orinoco_unlock(priv, &flags);
4609
4610 return -EINPROGRESS;
4611}
4612
4613static int orinoco_ioctl_getrate(struct net_device *dev,
4614 struct iw_request_info *info,
4615 struct iw_param *rrq,
4616 char *extra)
4617{
4618 struct orinoco_private *priv = netdev_priv(dev);
4619 hermes_t *hw = &priv->hw;
4620 int err = 0;
4621 int ratemode;
4622 int i;
4623 u16 val;
4624 unsigned long flags;
4625
4626 if (orinoco_lock(priv, &flags) != 0)
4627 return -EBUSY;
4628
4629 ratemode = priv->bitratemode;
4630
4631 BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
4632
4633 rrq->value = bitrate_table[ratemode].bitrate * 100000;
4634 rrq->fixed = ! bitrate_table[ratemode].automatic;
4635 rrq->disabled = 0;
4636
4637 /* If the interface is running we try to find more about the
4638 current mode */
4639 if (netif_running(dev)) {
4640 err = hermes_read_wordrec(hw, USER_BAP,
4641 HERMES_RID_CURRENTTXRATE, &val);
4642 if (err)
4643 goto out;
4644
4645 switch (priv->firmware_type) {
4646 case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
4647 /* Note : in Lucent firmware, the return value of
4648 * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
4649 * and therefore is totally different from the
4650 * encoding of HERMES_RID_CNFTXRATECONTROL.
4651 * Don't forget that 6Mb/s is really 5.5Mb/s */
4652 if (val == 6)
4653 rrq->value = 5500000;
4654 else
4655 rrq->value = val * 1000000;
4656 break;
4657 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
4658 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
4659 for (i = 0; i < BITRATE_TABLE_SIZE; i++)
4660 if (bitrate_table[i].intersil_txratectrl == val) {
4661 ratemode = i;
4662 break;
4663 }
4664 if (i >= BITRATE_TABLE_SIZE)
4665 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
4666 dev->name, val);
4667
4668 rrq->value = bitrate_table[ratemode].bitrate * 100000;
4669 break;
4670 default:
4671 BUG();
4672 }
4673 }
4674
4675 out:
4676 orinoco_unlock(priv, &flags);
4677
4678 return err;
4679}
4680
4681static int orinoco_ioctl_setpower(struct net_device *dev,
4682 struct iw_request_info *info,
4683 struct iw_param *prq,
4684 char *extra)
4685{
4686 struct orinoco_private *priv = netdev_priv(dev);
4687 int err = -EINPROGRESS; /* Call commit handler */
4688 unsigned long flags;
4689
4690 if (orinoco_lock(priv, &flags) != 0)
4691 return -EBUSY;
4692
4693 if (prq->disabled) {
4694 priv->pm_on = 0;
4695 } else {
4696 switch (prq->flags & IW_POWER_MODE) {
4697 case IW_POWER_UNICAST_R:
4698 priv->pm_mcast = 0;
4699 priv->pm_on = 1;
4700 break;
4701 case IW_POWER_ALL_R:
4702 priv->pm_mcast = 1;
4703 priv->pm_on = 1;
4704 break;
4705 case IW_POWER_ON:
4706 /* No flags : but we may have a value - Jean II */
4707 break;
4708 default:
4709 err = -EINVAL;
4710 goto out;
4711 }
4712
4713 if (prq->flags & IW_POWER_TIMEOUT) {
4714 priv->pm_on = 1;
4715 priv->pm_timeout = prq->value / 1000;
4716 }
4717 if (prq->flags & IW_POWER_PERIOD) {
4718 priv->pm_on = 1;
4719 priv->pm_period = prq->value / 1000;
4720 }
4721 /* It's valid to not have a value if we are just toggling
4722 * the flags... Jean II */
4723 if(!priv->pm_on) {
4724 err = -EINVAL;
4725 goto out;
4726 }
4727 }
4728
4729 out:
4730 orinoco_unlock(priv, &flags);
4731
4732 return err;
4733}
4734
4735static int orinoco_ioctl_getpower(struct net_device *dev,
4736 struct iw_request_info *info,
4737 struct iw_param *prq,
4738 char *extra)
4739{
4740 struct orinoco_private *priv = netdev_priv(dev);
4741 hermes_t *hw = &priv->hw;
4742 int err = 0;
4743 u16 enable, period, timeout, mcast;
4744 unsigned long flags;
4745
4746 if (orinoco_lock(priv, &flags) != 0)
4747 return -EBUSY;
4748
4749 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable);
4750 if (err)
4751 goto out;
4752
4753 err = hermes_read_wordrec(hw, USER_BAP,
4754 HERMES_RID_CNFMAXSLEEPDURATION, &period);
4755 if (err)
4756 goto out;
4757
4758 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
4759 if (err)
4760 goto out;
4761
4762 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
4763 if (err)
4764 goto out;
4765
4766 prq->disabled = !enable;
4767 /* Note : by default, display the period */
4768 if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
4769 prq->flags = IW_POWER_TIMEOUT;
4770 prq->value = timeout * 1000;
4771 } else {
4772 prq->flags = IW_POWER_PERIOD;
4773 prq->value = period * 1000;
4774 }
4775 if (mcast)
4776 prq->flags |= IW_POWER_ALL_R;
4777 else
4778 prq->flags |= IW_POWER_UNICAST_R;
4779
4780 out:
4781 orinoco_unlock(priv, &flags);
4782
4783 return err;
4784}
4785
4786static int orinoco_ioctl_set_encodeext(struct net_device *dev,
4787 struct iw_request_info *info,
4788 union iwreq_data *wrqu,
4789 char *extra)
4790{
4791 struct orinoco_private *priv = netdev_priv(dev);
4792 struct iw_point *encoding = &wrqu->encoding;
4793 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4794 int idx, alg = ext->alg, set_key = 1;
4795 unsigned long flags;
4796 int err = -EINVAL;
4797 u16 key_len;
4798
4799 if (orinoco_lock(priv, &flags) != 0)
4800 return -EBUSY;
4801
4802 /* Determine and validate the key index */
4803 idx = encoding->flags & IW_ENCODE_INDEX;
4804 if (idx) {
4805 if ((idx < 1) || (idx > 4))
4806 goto out;
4807 idx--;
4808 } else
4809 idx = priv->tx_key;
4810
4811 if (encoding->flags & IW_ENCODE_DISABLED)
4812 alg = IW_ENCODE_ALG_NONE;
4813
4814 if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
4815 /* Clear any TKIP TX key we had */
4816 (void) orinoco_clear_tkip_key(priv, priv->tx_key);
4817 }
4818
4819 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
4820 priv->tx_key = idx;
4821 set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
4822 (ext->key_len > 0)) ? 1 : 0;
4823 }
4824
4825 if (set_key) {
4826 /* Set the requested key first */
4827 switch (alg) {
4828 case IW_ENCODE_ALG_NONE:
4829 priv->encode_alg = alg;
4830 priv->keys[idx].len = 0;
4831 break;
4832
4833 case IW_ENCODE_ALG_WEP:
4834 if (ext->key_len > SMALL_KEY_SIZE)
4835 key_len = LARGE_KEY_SIZE;
4836 else if (ext->key_len > 0)
4837 key_len = SMALL_KEY_SIZE;
4838 else
4839 goto out;
4840
4841 priv->encode_alg = alg;
4842 priv->keys[idx].len = cpu_to_le16(key_len);
4843
4844 key_len = min(ext->key_len, key_len);
4845
4846 memset(priv->keys[idx].data, 0, ORINOCO_MAX_KEY_SIZE);
4847 memcpy(priv->keys[idx].data, ext->key, key_len);
4848 break;
4849
4850 case IW_ENCODE_ALG_TKIP:
4851 {
4852 hermes_t *hw = &priv->hw;
4853 u8 *tkip_iv = NULL;
4854
4855 if (!priv->has_wpa ||
4856 (ext->key_len > sizeof(priv->tkip_key[0])))
4857 goto out;
4858
4859 priv->encode_alg = alg;
4860 memset(&priv->tkip_key[idx], 0,
4861 sizeof(priv->tkip_key[idx]));
4862 memcpy(&priv->tkip_key[idx], ext->key, ext->key_len);
4863
4864 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
4865 tkip_iv = &ext->rx_seq[0];
4866
4867 err = __orinoco_hw_set_tkip_key(hw, idx,
4868 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
4869 (u8 *) &priv->tkip_key[idx],
4870 tkip_iv, NULL);
4871 if (err)
4872 printk(KERN_ERR "%s: Error %d setting TKIP key"
4873 "\n", dev->name, err);
4874
4875 goto out;
4876 }
4877 default:
4878 goto out;
4879 }
4880 }
4881 err = -EINPROGRESS;
4882 out:
4883 orinoco_unlock(priv, &flags);
4884
4885 return err;
4886}
4887
4888static int orinoco_ioctl_get_encodeext(struct net_device *dev,
4889 struct iw_request_info *info,
4890 union iwreq_data *wrqu,
4891 char *extra)
4892{
4893 struct orinoco_private *priv = netdev_priv(dev);
4894 struct iw_point *encoding = &wrqu->encoding;
4895 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4896 int idx, max_key_len;
4897 unsigned long flags;
4898 int err;
4899
4900 if (orinoco_lock(priv, &flags) != 0)
4901 return -EBUSY;
4902
4903 err = -EINVAL;
4904 max_key_len = encoding->length - sizeof(*ext);
4905 if (max_key_len < 0)
4906 goto out;
4907
4908 idx = encoding->flags & IW_ENCODE_INDEX;
4909 if (idx) {
4910 if ((idx < 1) || (idx > 4))
4911 goto out;
4912 idx--;
4913 } else
4914 idx = priv->tx_key;
4915
4916 encoding->flags = idx + 1;
4917 memset(ext, 0, sizeof(*ext));
4918
4919 ext->alg = priv->encode_alg;
4920 switch (priv->encode_alg) {
4921 case IW_ENCODE_ALG_NONE:
4922 ext->key_len = 0;
4923 encoding->flags |= IW_ENCODE_DISABLED;
4924 break;
4925 case IW_ENCODE_ALG_WEP:
4926 ext->key_len = min_t(u16, le16_to_cpu(priv->keys[idx].len),
4927 max_key_len);
4928 memcpy(ext->key, priv->keys[idx].data, ext->key_len);
4929 encoding->flags |= IW_ENCODE_ENABLED;
4930 break;
4931 case IW_ENCODE_ALG_TKIP:
4932 ext->key_len = min_t(u16, sizeof(struct orinoco_tkip_key),
4933 max_key_len);
4934 memcpy(ext->key, &priv->tkip_key[idx], ext->key_len);
4935 encoding->flags |= IW_ENCODE_ENABLED;
4936 break;
4937 }
4938
4939 err = 0;
4940 out:
4941 orinoco_unlock(priv, &flags);
4942
4943 return err;
4944}
4945
4946static int orinoco_ioctl_set_auth(struct net_device *dev,
4947 struct iw_request_info *info,
4948 union iwreq_data *wrqu, char *extra)
4949{
4950 struct orinoco_private *priv = netdev_priv(dev);
4951 hermes_t *hw = &priv->hw;
4952 struct iw_param *param = &wrqu->param;
4953 unsigned long flags;
4954 int ret = -EINPROGRESS;
4955
4956 if (orinoco_lock(priv, &flags) != 0)
4957 return -EBUSY;
4958
4959 switch (param->flags & IW_AUTH_INDEX) {
4960 case IW_AUTH_WPA_VERSION:
4961 case IW_AUTH_CIPHER_PAIRWISE:
4962 case IW_AUTH_CIPHER_GROUP:
4963 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
4964 case IW_AUTH_PRIVACY_INVOKED:
4965 case IW_AUTH_DROP_UNENCRYPTED:
4966 /*
4967 * orinoco does not use these parameters
4968 */
4969 break;
4970
4971 case IW_AUTH_KEY_MGMT:
4972 /* wl_lkm implies value 2 == PSK for Hermes I
4973 * which ties in with WEXT
4974 * no other hints tho :(
4975 */
4976 priv->key_mgmt = param->value;
4977 break;
4978
4979 case IW_AUTH_TKIP_COUNTERMEASURES:
4980 /* When countermeasures are enabled, shut down the
4981 * card; when disabled, re-enable the card. This must
4982 * take effect immediately.
4983 *
4984 * TODO: Make sure that the EAPOL message is getting
4985 * out before card disabled
4986 */
4987 if (param->value) {
4988 priv->tkip_cm_active = 1;
4989 ret = hermes_enable_port(hw, 0);
4990 } else {
4991 priv->tkip_cm_active = 0;
4992 ret = hermes_disable_port(hw, 0);
4993 }
4994 break;
4995
4996 case IW_AUTH_80211_AUTH_ALG:
4997 if (param->value & IW_AUTH_ALG_SHARED_KEY)
4998 priv->wep_restrict = 1;
4999 else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
5000 priv->wep_restrict = 0;
5001 else
5002 ret = -EINVAL;
5003 break;
5004
5005 case IW_AUTH_WPA_ENABLED:
5006 if (priv->has_wpa) {
5007 priv->wpa_enabled = param->value ? 1 : 0;
5008 } else {
5009 if (param->value)
5010 ret = -EOPNOTSUPP;
5011 /* else silently accept disable of WPA */
5012 priv->wpa_enabled = 0;
5013 }
5014 break;
5015
5016 default:
5017 ret = -EOPNOTSUPP;
5018 }
5019
5020 orinoco_unlock(priv, &flags);
5021 return ret;
5022}
5023
5024static int orinoco_ioctl_get_auth(struct net_device *dev,
5025 struct iw_request_info *info,
5026 union iwreq_data *wrqu, char *extra)
5027{
5028 struct orinoco_private *priv = netdev_priv(dev);
5029 struct iw_param *param = &wrqu->param;
5030 unsigned long flags;
5031 int ret = 0;
5032
5033 if (orinoco_lock(priv, &flags) != 0)
5034 return -EBUSY;
5035
5036 switch (param->flags & IW_AUTH_INDEX) {
5037 case IW_AUTH_KEY_MGMT:
5038 param->value = priv->key_mgmt;
5039 break;
5040
5041 case IW_AUTH_TKIP_COUNTERMEASURES:
5042 param->value = priv->tkip_cm_active;
5043 break;
5044
5045 case IW_AUTH_80211_AUTH_ALG:
5046 if (priv->wep_restrict)
5047 param->value = IW_AUTH_ALG_SHARED_KEY;
5048 else
5049 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
5050 break;
5051
5052 case IW_AUTH_WPA_ENABLED:
5053 param->value = priv->wpa_enabled;
5054 break;
5055
5056 default:
5057 ret = -EOPNOTSUPP;
5058 }
5059
5060 orinoco_unlock(priv, &flags);
5061 return ret;
5062}
5063
5064static int orinoco_ioctl_set_genie(struct net_device *dev,
5065 struct iw_request_info *info,
5066 union iwreq_data *wrqu, char *extra)
5067{
5068 struct orinoco_private *priv = netdev_priv(dev);
5069 u8 *buf;
5070 unsigned long flags;
5071
5072 /* cut off at IEEE80211_MAX_DATA_LEN */
5073 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
5074 (wrqu->data.length && (extra == NULL)))
5075 return -EINVAL;
5076
5077 if (wrqu->data.length) {
5078 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
5079 if (buf == NULL)
5080 return -ENOMEM;
5081
5082 memcpy(buf, extra, wrqu->data.length);
5083 } else
5084 buf = NULL;
5085
5086 if (orinoco_lock(priv, &flags) != 0) {
5087 kfree(buf);
5088 return -EBUSY;
5089 }
5090
5091 kfree(priv->wpa_ie);
5092 priv->wpa_ie = buf;
5093 priv->wpa_ie_len = wrqu->data.length;
5094
5095 if (priv->wpa_ie) {
5096 /* Looks like wl_lkm wants to check the auth alg, and
5097 * somehow pass it to the firmware.
5098 * Instead it just calls the key mgmt rid
5099 * - we do this in set auth.
5100 */
5101 }
5102
5103 orinoco_unlock(priv, &flags);
5104 return 0;
5105}
5106
5107static int orinoco_ioctl_get_genie(struct net_device *dev,
5108 struct iw_request_info *info,
5109 union iwreq_data *wrqu, char *extra)
5110{
5111 struct orinoco_private *priv = netdev_priv(dev);
5112 unsigned long flags;
5113 int err = 0;
5114
5115 if (orinoco_lock(priv, &flags) != 0)
5116 return -EBUSY;
5117
5118 if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
5119 wrqu->data.length = 0;
5120 goto out;
5121 }
5122
5123 if (wrqu->data.length < priv->wpa_ie_len) {
5124 err = -E2BIG;
5125 goto out;
5126 }
5127
5128 wrqu->data.length = priv->wpa_ie_len;
5129 memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
5130
5131out:
5132 orinoco_unlock(priv, &flags);
5133 return err;
5134}
5135
5136static int orinoco_ioctl_set_mlme(struct net_device *dev,
5137 struct iw_request_info *info,
5138 union iwreq_data *wrqu, char *extra)
5139{
5140 struct orinoco_private *priv = netdev_priv(dev);
5141 hermes_t *hw = &priv->hw;
5142 struct iw_mlme *mlme = (struct iw_mlme *)extra;
5143 unsigned long flags;
5144 int ret = 0;
5145
5146 if (orinoco_lock(priv, &flags) != 0)
5147 return -EBUSY;
5148
5149 switch (mlme->cmd) {
5150 case IW_MLME_DEAUTH:
5151 /* silently ignore */
5152 break;
5153
5154 case IW_MLME_DISASSOC:
5155 {
5156 struct {
5157 u8 addr[ETH_ALEN];
5158 __le16 reason_code;
5159 } __attribute__ ((packed)) buf;
5160
5161 memcpy(buf.addr, mlme->addr.sa_data, ETH_ALEN);
5162 buf.reason_code = cpu_to_le16(mlme->reason_code);
5163 ret = HERMES_WRITE_RECORD(hw, USER_BAP,
5164 HERMES_RID_CNFDISASSOCIATE,
5165 &buf);
5166 break;
5167 }
5168 default:
5169 ret = -EOPNOTSUPP;
5170 }
5171
5172 orinoco_unlock(priv, &flags);
5173 return ret;
5174}
5175
5176static int orinoco_ioctl_getretry(struct net_device *dev,
5177 struct iw_request_info *info,
5178 struct iw_param *rrq,
5179 char *extra)
5180{
5181 struct orinoco_private *priv = netdev_priv(dev);
5182 hermes_t *hw = &priv->hw;
5183 int err = 0;
5184 u16 short_limit, long_limit, lifetime;
5185 unsigned long flags;
5186
5187 if (orinoco_lock(priv, &flags) != 0)
5188 return -EBUSY;
5189
5190 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
5191 &short_limit);
5192 if (err)
5193 goto out;
5194
5195 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
5196 &long_limit);
5197 if (err)
5198 goto out;
5199
5200 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
5201 &lifetime);
5202 if (err)
5203 goto out;
5204
5205 rrq->disabled = 0; /* Can't be disabled */
5206
5207 /* Note : by default, display the retry number */
5208 if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
5209 rrq->flags = IW_RETRY_LIFETIME;
5210 rrq->value = lifetime * 1000; /* ??? */
5211 } else {
5212 /* By default, display the min number */
5213 if ((rrq->flags & IW_RETRY_LONG)) {
5214 rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
5215 rrq->value = long_limit;
5216 } else {
5217 rrq->flags = IW_RETRY_LIMIT;
5218 rrq->value = short_limit;
5219 if(short_limit != long_limit)
5220 rrq->flags |= IW_RETRY_SHORT;
5221 }
5222 }
5223
5224 out:
5225 orinoco_unlock(priv, &flags);
5226
5227 return err;
5228}
5229
5230static int orinoco_ioctl_reset(struct net_device *dev,
5231 struct iw_request_info *info,
5232 void *wrqu,
5233 char *extra)
5234{
5235 struct orinoco_private *priv = netdev_priv(dev);
5236
5237 if (! capable(CAP_NET_ADMIN))
5238 return -EPERM;
5239
5240 if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) {
5241 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
5242
5243 /* Firmware reset */
5244 orinoco_reset(&priv->reset_work);
5245 } else {
5246 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
5247
5248 schedule_work(&priv->reset_work);
5249 }
5250
5251 return 0;
5252}
5253
5254static int orinoco_ioctl_setibssport(struct net_device *dev,
5255 struct iw_request_info *info,
5256 void *wrqu,
5257 char *extra)
5258
5259{
5260 struct orinoco_private *priv = netdev_priv(dev);
5261 int val = *( (int *) extra );
5262 unsigned long flags;
5263
5264 if (orinoco_lock(priv, &flags) != 0)
5265 return -EBUSY;
5266
5267 priv->ibss_port = val ;
5268
5269 /* Actually update the mode we are using */
5270 set_port_type(priv);
5271
5272 orinoco_unlock(priv, &flags);
5273 return -EINPROGRESS; /* Call commit handler */
5274}
5275
5276static int orinoco_ioctl_getibssport(struct net_device *dev,
5277 struct iw_request_info *info,
5278 void *wrqu,
5279 char *extra)
5280{
5281 struct orinoco_private *priv = netdev_priv(dev);
5282 int *val = (int *) extra;
5283
5284 *val = priv->ibss_port;
5285 return 0;
5286}
5287
5288static int orinoco_ioctl_setport3(struct net_device *dev,
5289 struct iw_request_info *info,
5290 void *wrqu,
5291 char *extra)
5292{
5293 struct orinoco_private *priv = netdev_priv(dev);
5294 int val = *( (int *) extra );
5295 int err = 0;
5296 unsigned long flags;
5297
5298 if (orinoco_lock(priv, &flags) != 0)
5299 return -EBUSY;
5300
5301 switch (val) {
5302 case 0: /* Try to do IEEE ad-hoc mode */
5303 if (! priv->has_ibss) {
5304 err = -EINVAL;
5305 break;
5306 }
5307 priv->prefer_port3 = 0;
5308
5309 break;
5310
5311 case 1: /* Try to do Lucent proprietary ad-hoc mode */
5312 if (! priv->has_port3) {
5313 err = -EINVAL;
5314 break;
5315 }
5316 priv->prefer_port3 = 1;
5317 break;
5318
5319 default:
5320 err = -EINVAL;
5321 }
5322
5323 if (! err) {
5324 /* Actually update the mode we are using */
5325 set_port_type(priv);
5326 err = -EINPROGRESS;
5327 }
5328
5329 orinoco_unlock(priv, &flags);
5330
5331 return err;
5332}
5333
5334static int orinoco_ioctl_getport3(struct net_device *dev,
5335 struct iw_request_info *info,
5336 void *wrqu,
5337 char *extra)
5338{
5339 struct orinoco_private *priv = netdev_priv(dev);
5340 int *val = (int *) extra;
5341
5342 *val = priv->prefer_port3;
5343 return 0;
5344}
5345
5346static int orinoco_ioctl_setpreamble(struct net_device *dev,
5347 struct iw_request_info *info,
5348 void *wrqu,
5349 char *extra)
5350{
5351 struct orinoco_private *priv = netdev_priv(dev);
5352 unsigned long flags;
5353 int val;
5354
5355 if (! priv->has_preamble)
5356 return -EOPNOTSUPP;
5357
5358 /* 802.11b has recently defined some short preamble.
5359 * Basically, the Phy header has been reduced in size.
5360 * This increase performance, especially at high rates
5361 * (the preamble is transmitted at 1Mb/s), unfortunately
5362 * this give compatibility troubles... - Jean II */
5363 val = *( (int *) extra );
5364
5365 if (orinoco_lock(priv, &flags) != 0)
5366 return -EBUSY;
5367
5368 if (val)
5369 priv->preamble = 1;
5370 else
5371 priv->preamble = 0;
5372
5373 orinoco_unlock(priv, &flags);
5374
5375 return -EINPROGRESS; /* Call commit handler */
5376}
5377
5378static int orinoco_ioctl_getpreamble(struct net_device *dev,
5379 struct iw_request_info *info,
5380 void *wrqu,
5381 char *extra)
5382{
5383 struct orinoco_private *priv = netdev_priv(dev);
5384 int *val = (int *) extra;
5385
5386 if (! priv->has_preamble)
5387 return -EOPNOTSUPP;
5388
5389 *val = priv->preamble;
5390 return 0;
5391}
5392
5393/* ioctl interface to hermes_read_ltv()
5394 * To use with iwpriv, pass the RID as the token argument, e.g.
5395 * iwpriv get_rid [0xfc00]
5396 * At least Wireless Tools 25 is required to use iwpriv.
5397 * For Wireless Tools 25 and 26 append "dummy" are the end. */
5398static int orinoco_ioctl_getrid(struct net_device *dev,
5399 struct iw_request_info *info,
5400 struct iw_point *data,
5401 char *extra)
5402{
5403 struct orinoco_private *priv = netdev_priv(dev);
5404 hermes_t *hw = &priv->hw;
5405 int rid = data->flags;
5406 u16 length;
5407 int err;
5408 unsigned long flags;
5409
5410 /* It's a "get" function, but we don't want users to access the
5411 * WEP key and other raw firmware data */
5412 if (! capable(CAP_NET_ADMIN))
5413 return -EPERM;
5414
5415 if (rid < 0xfc00 || rid > 0xffff)
5416 return -EINVAL;
5417
5418 if (orinoco_lock(priv, &flags) != 0)
5419 return -EBUSY;
5420
5421 err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
5422 extra);
5423 if (err)
5424 goto out;
5425
5426 data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length),
5427 MAX_RID_LEN);
5428
5429 out:
5430 orinoco_unlock(priv, &flags);
5431 return err;
5432}
5433
5434/* Trigger a scan (look for other cells in the vicinity) */
5435static int orinoco_ioctl_setscan(struct net_device *dev,
5436 struct iw_request_info *info,
5437 struct iw_point *srq,
5438 char *extra)
5439{
5440 struct orinoco_private *priv = netdev_priv(dev);
5441 hermes_t *hw = &priv->hw;
5442 struct iw_scan_req *si = (struct iw_scan_req *) extra;
5443 int err = 0;
5444 unsigned long flags;
5445
5446 /* Note : you may have realised that, as this is a SET operation,
5447 * this is privileged and therefore a normal user can't
5448 * perform scanning.
5449 * This is not an error, while the device perform scanning,
5450 * traffic doesn't flow, so it's a perfect DoS...
5451 * Jean II */
5452
5453 if (orinoco_lock(priv, &flags) != 0)
5454 return -EBUSY;
5455
5456 /* Scanning with port 0 disabled would fail */
5457 if (!netif_running(dev)) {
5458 err = -ENETDOWN;
5459 goto out;
5460 }
5461
5462 /* In monitor mode, the scan results are always empty.
5463 * Probe responses are passed to the driver as received
5464 * frames and could be processed in software. */
5465 if (priv->iw_mode == IW_MODE_MONITOR) {
5466 err = -EOPNOTSUPP;
5467 goto out;
5468 }
5469
5470 /* Note : because we don't lock out the irq handler, the way
5471 * we access scan variables in priv is critical.
5472 * o scan_inprogress : not touched by irq handler
5473 * o scan_mode : not touched by irq handler
5474 * Before modifying anything on those variables, please think hard !
5475 * Jean II */
5476
5477 /* Save flags */
5478 priv->scan_mode = srq->flags;
5479
5480 /* Always trigger scanning, even if it's in progress.
5481 * This way, if the info frame get lost, we will recover somewhat
5482 * gracefully - Jean II */
5483
5484 if (priv->has_hostscan) {
5485 switch (priv->firmware_type) {
5486 case FIRMWARE_TYPE_SYMBOL:
5487 err = hermes_write_wordrec(hw, USER_BAP,
5488 HERMES_RID_CNFHOSTSCAN_SYMBOL,
5489 HERMES_HOSTSCAN_SYMBOL_ONCE |
5490 HERMES_HOSTSCAN_SYMBOL_BCAST);
5491 break;
5492 case FIRMWARE_TYPE_INTERSIL: {
5493 __le16 req[3];
5494
5495 req[0] = cpu_to_le16(0x3fff); /* All channels */
5496 req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
5497 req[2] = 0; /* Any ESSID */
5498 err = HERMES_WRITE_RECORD(hw, USER_BAP,
5499 HERMES_RID_CNFHOSTSCAN, &req);
5500 }
5501 break;
5502 case FIRMWARE_TYPE_AGERE:
5503 if (priv->scan_mode & IW_SCAN_THIS_ESSID) {
5504 struct hermes_idstring idbuf;
5505 size_t len = min(sizeof(idbuf.val),
5506 (size_t) si->essid_len);
5507 idbuf.len = cpu_to_le16(len);
5508 memcpy(idbuf.val, si->essid, len);
5509
5510 err = hermes_write_ltv(hw, USER_BAP,
5511 HERMES_RID_CNFSCANSSID_AGERE,
5512 HERMES_BYTES_TO_RECLEN(len + 2),
5513 &idbuf);
5514 } else
5515 err = hermes_write_wordrec(hw, USER_BAP,
5516 HERMES_RID_CNFSCANSSID_AGERE,
5517 0); /* Any ESSID */
5518 if (err)
5519 break;
5520
5521 if (priv->has_ext_scan) {
5522 /* Clear scan results at the start of
5523 * an extended scan */
5524 orinoco_clear_scan_results(priv,
5525 msecs_to_jiffies(15000));
5526
5527 /* TODO: Is this available on older firmware?
5528 * Can we use it to scan specific channels
5529 * for IW_SCAN_THIS_FREQ? */
5530 err = hermes_write_wordrec(hw, USER_BAP,
5531 HERMES_RID_CNFSCANCHANNELS2GHZ,
5532 0x7FFF);
5533 if (err)
5534 goto out;
5535
5536 err = hermes_inquire(hw,
5537 HERMES_INQ_CHANNELINFO);
5538 } else
5539 err = hermes_inquire(hw, HERMES_INQ_SCAN);
5540 break;
5541 }
5542 } else
5543 err = hermes_inquire(hw, HERMES_INQ_SCAN);
5544
5545 /* One more client */
5546 if (! err)
5547 priv->scan_inprogress = 1;
5548
5549 out:
5550 orinoco_unlock(priv, &flags);
5551 return err;
5552}
5553
5554#define MAX_CUSTOM_LEN 64
5555
5556/* Translate scan data returned from the card to a card independant
5557 * format that the Wireless Tools will understand - Jean II */
5558static inline char *orinoco_translate_scan(struct net_device *dev,
5559 struct iw_request_info *info,
5560 char *current_ev,
5561 char *end_buf,
5562 union hermes_scan_info *bss,
5563 unsigned long last_scanned)
5564{
5565 struct orinoco_private *priv = netdev_priv(dev);
5566 u16 capabilities;
5567 u16 channel;
5568 struct iw_event iwe; /* Temporary buffer */
5569 char custom[MAX_CUSTOM_LEN];
5570
5571 memset(&iwe, 0, sizeof(iwe));
5572
5573 /* First entry *MUST* be the AP MAC address */
5574 iwe.cmd = SIOCGIWAP;
5575 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
5576 memcpy(iwe.u.ap_addr.sa_data, bss->a.bssid, ETH_ALEN);
5577 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5578 &iwe, IW_EV_ADDR_LEN);
5579
5580 /* Other entries will be displayed in the order we give them */
5581
5582 /* Add the ESSID */
5583 iwe.u.data.length = le16_to_cpu(bss->a.essid_len);
5584 if (iwe.u.data.length > 32)
5585 iwe.u.data.length = 32;
5586 iwe.cmd = SIOCGIWESSID;
5587 iwe.u.data.flags = 1;
5588 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5589 &iwe, bss->a.essid);
5590
5591 /* Add mode */
5592 iwe.cmd = SIOCGIWMODE;
5593 capabilities = le16_to_cpu(bss->a.capabilities);
5594 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
5595 if (capabilities & WLAN_CAPABILITY_ESS)
5596 iwe.u.mode = IW_MODE_MASTER;
5597 else
5598 iwe.u.mode = IW_MODE_ADHOC;
5599 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5600 &iwe, IW_EV_UINT_LEN);
5601 }
5602
5603 channel = bss->s.channel;
5604 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
5605 /* Add channel and frequency */
5606 iwe.cmd = SIOCGIWFREQ;
5607 iwe.u.freq.m = channel;
5608 iwe.u.freq.e = 0;
5609 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5610 &iwe, IW_EV_FREQ_LEN);
5611
5612 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
5613 iwe.u.freq.e = 1;
5614 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5615 &iwe, IW_EV_FREQ_LEN);
5616 }
5617
5618 /* Add quality statistics. level and noise in dB. No link quality */
5619 iwe.cmd = IWEVQUAL;
5620 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
5621 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95;
5622 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95;
5623 /* Wireless tools prior to 27.pre22 will show link quality
5624 * anyway, so we provide a reasonable value. */
5625 if (iwe.u.qual.level > iwe.u.qual.noise)
5626 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
5627 else
5628 iwe.u.qual.qual = 0;
5629 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5630 &iwe, IW_EV_QUAL_LEN);
5631
5632 /* Add encryption capability */
5633 iwe.cmd = SIOCGIWENCODE;
5634 if (capabilities & WLAN_CAPABILITY_PRIVACY)
5635 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
5636 else
5637 iwe.u.data.flags = IW_ENCODE_DISABLED;
5638 iwe.u.data.length = 0;
5639 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5640 &iwe, NULL);
5641
5642 /* Bit rate is not available in Lucent/Agere firmwares */
5643 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
5644 char *current_val = current_ev + iwe_stream_lcp_len(info);
5645 int i;
5646 int step;
5647
5648 if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
5649 step = 2;
5650 else
5651 step = 1;
5652
5653 iwe.cmd = SIOCGIWRATE;
5654 /* Those two flags are ignored... */
5655 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
5656 /* Max 10 values */
5657 for (i = 0; i < 10; i += step) {
5658 /* NULL terminated */
5659 if (bss->p.rates[i] == 0x0)
5660 break;
5661 /* Bit rate given in 500 kb/s units (+ 0x80) */
5662 iwe.u.bitrate.value =
5663 ((bss->p.rates[i] & 0x7f) * 500000);
5664 current_val = iwe_stream_add_value(info, current_ev,
5665 current_val,
5666 end_buf, &iwe,
5667 IW_EV_PARAM_LEN);
5668 }
5669 /* Check if we added any event */
5670 if ((current_val - current_ev) > iwe_stream_lcp_len(info))
5671 current_ev = current_val;
5672 }
5673
5674 /* Beacon interval */
5675 iwe.cmd = IWEVCUSTOM;
5676 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5677 "bcn_int=%d",
5678 le16_to_cpu(bss->a.beacon_interv));
5679 if (iwe.u.data.length)
5680 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5681 &iwe, custom);
5682
5683 /* Capabilites */
5684 iwe.cmd = IWEVCUSTOM;
5685 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5686 "capab=0x%04x",
5687 capabilities);
5688 if (iwe.u.data.length)
5689 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5690 &iwe, custom);
5691
5692 /* Add EXTRA: Age to display seconds since last beacon/probe response
5693 * for given network. */
5694 iwe.cmd = IWEVCUSTOM;
5695 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5696 " Last beacon: %dms ago",
5697 jiffies_to_msecs(jiffies - last_scanned));
5698 if (iwe.u.data.length)
5699 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5700 &iwe, custom);
5701
5702 return current_ev;
5703}
5704
5705static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5706 struct iw_request_info *info,
5707 char *current_ev,
5708 char *end_buf,
5709 struct agere_ext_scan_info *bss,
5710 unsigned long last_scanned)
5711{
5712 u16 capabilities;
5713 u16 channel;
5714 struct iw_event iwe; /* Temporary buffer */
5715 char custom[MAX_CUSTOM_LEN];
5716 u8 *ie;
5717
5718 memset(&iwe, 0, sizeof(iwe));
5719
5720 /* First entry *MUST* be the AP MAC address */
5721 iwe.cmd = SIOCGIWAP;
5722 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
5723 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
5724 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5725 &iwe, IW_EV_ADDR_LEN);
5726
5727 /* Other entries will be displayed in the order we give them */
5728
5729 /* Add the ESSID */
5730 ie = bss->data;
5731 iwe.u.data.length = ie[1];
5732 if (iwe.u.data.length) {
5733 if (iwe.u.data.length > 32)
5734 iwe.u.data.length = 32;
5735 iwe.cmd = SIOCGIWESSID;
5736 iwe.u.data.flags = 1;
5737 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5738 &iwe, &ie[2]);
5739 }
5740
5741 /* Add mode */
5742 capabilities = le16_to_cpu(bss->capabilities);
5743 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
5744 iwe.cmd = SIOCGIWMODE;
5745 if (capabilities & WLAN_CAPABILITY_ESS)
5746 iwe.u.mode = IW_MODE_MASTER;
5747 else
5748 iwe.u.mode = IW_MODE_ADHOC;
5749 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5750 &iwe, IW_EV_UINT_LEN);
5751 }
5752
5753 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_DS_PARAMS);
5754 channel = ie ? ie[2] : 0;
5755 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
5756 /* Add channel and frequency */
5757 iwe.cmd = SIOCGIWFREQ;
5758 iwe.u.freq.m = channel;
5759 iwe.u.freq.e = 0;
5760 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5761 &iwe, IW_EV_FREQ_LEN);
5762
5763 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
5764 iwe.u.freq.e = 1;
5765 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5766 &iwe, IW_EV_FREQ_LEN);
5767 }
5768
5769 /* Add quality statistics. level and noise in dB. No link quality */
5770 iwe.cmd = IWEVQUAL;
5771 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
5772 iwe.u.qual.level = bss->level - 0x95;
5773 iwe.u.qual.noise = bss->noise - 0x95;
5774 /* Wireless tools prior to 27.pre22 will show link quality
5775 * anyway, so we provide a reasonable value. */
5776 if (iwe.u.qual.level > iwe.u.qual.noise)
5777 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
5778 else
5779 iwe.u.qual.qual = 0;
5780 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5781 &iwe, IW_EV_QUAL_LEN);
5782
5783 /* Add encryption capability */
5784 iwe.cmd = SIOCGIWENCODE;
5785 if (capabilities & WLAN_CAPABILITY_PRIVACY)
5786 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
5787 else
5788 iwe.u.data.flags = IW_ENCODE_DISABLED;
5789 iwe.u.data.length = 0;
5790 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5791 &iwe, NULL);
5792
5793 /* WPA IE */
5794 ie = orinoco_get_wpa_ie(bss->data, sizeof(bss->data));
5795 if (ie) {
5796 iwe.cmd = IWEVGENIE;
5797 iwe.u.data.length = ie[1] + 2;
5798 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5799 &iwe, ie);
5800 }
5801
5802 /* RSN IE */
5803 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_RSN);
5804 if (ie) {
5805 iwe.cmd = IWEVGENIE;
5806 iwe.u.data.length = ie[1] + 2;
5807 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5808 &iwe, ie);
5809 }
5810
5811 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_SUPP_RATES);
5812 if (ie) {
5813 char *p = current_ev + iwe_stream_lcp_len(info);
5814 int i;
5815
5816 iwe.cmd = SIOCGIWRATE;
5817 /* Those two flags are ignored... */
5818 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
5819
5820 for (i = 2; i < (ie[1] + 2); i++) {
5821 iwe.u.bitrate.value = ((ie[i] & 0x7F) * 500000);
5822 p = iwe_stream_add_value(info, current_ev, p, end_buf,
5823 &iwe, IW_EV_PARAM_LEN);
5824 }
5825 /* Check if we added any event */
5826 if (p > (current_ev + iwe_stream_lcp_len(info)))
5827 current_ev = p;
5828 }
5829
5830 /* Timestamp */
5831 iwe.cmd = IWEVCUSTOM;
5832 iwe.u.data.length =
5833 snprintf(custom, MAX_CUSTOM_LEN, "tsf=%016llx",
5834 (unsigned long long) le64_to_cpu(bss->timestamp));
5835 if (iwe.u.data.length)
5836 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5837 &iwe, custom);
5838
5839 /* Beacon interval */
5840 iwe.cmd = IWEVCUSTOM;
5841 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5842 "bcn_int=%d",
5843 le16_to_cpu(bss->beacon_interval));
5844 if (iwe.u.data.length)
5845 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5846 &iwe, custom);
5847
5848 /* Capabilites */
5849 iwe.cmd = IWEVCUSTOM;
5850 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5851 "capab=0x%04x",
5852 capabilities);
5853 if (iwe.u.data.length)
5854 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5855 &iwe, custom);
5856
5857 /* Add EXTRA: Age to display seconds since last beacon/probe response
5858 * for given network. */
5859 iwe.cmd = IWEVCUSTOM;
5860 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5861 " Last beacon: %dms ago",
5862 jiffies_to_msecs(jiffies - last_scanned));
5863 if (iwe.u.data.length)
5864 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5865 &iwe, custom);
5866
5867 return current_ev;
5868}
5869
5870/* Return results of a scan */
5871static int orinoco_ioctl_getscan(struct net_device *dev,
5872 struct iw_request_info *info,
5873 struct iw_point *srq,
5874 char *extra)
5875{
5876 struct orinoco_private *priv = netdev_priv(dev);
5877 int err = 0;
5878 unsigned long flags;
5879 char *current_ev = extra;
5880
5881 if (orinoco_lock(priv, &flags) != 0)
5882 return -EBUSY;
5883
5884 if (priv->scan_inprogress) {
5885 /* Important note : we don't want to block the caller
5886 * until results are ready for various reasons.
5887 * First, managing wait queues is complex and racy.
5888 * Second, we grab some rtnetlink lock before comming
5889 * here (in dev_ioctl()).
5890 * Third, we generate an Wireless Event, so the
5891 * caller can wait itself on that - Jean II */
5892 err = -EAGAIN;
5893 goto out;
5894 }
5895
5896 if (priv->has_ext_scan) {
5897 struct xbss_element *bss;
5898
5899 list_for_each_entry(bss, &priv->bss_list, list) {
5900 /* Translate this entry to WE format */
5901 current_ev =
5902 orinoco_translate_ext_scan(dev, info,
5903 current_ev,
5904 extra + srq->length,
5905 &bss->bss,
5906 bss->last_scanned);
5907
5908 /* Check if there is space for one more entry */
5909 if ((extra + srq->length - current_ev)
5910 <= IW_EV_ADDR_LEN) {
5911 /* Ask user space to try again with a
5912 * bigger buffer */
5913 err = -E2BIG;
5914 goto out;
5915 }
5916 }
5917
5918 } else {
5919 struct bss_element *bss;
5920
5921 list_for_each_entry(bss, &priv->bss_list, list) {
5922 /* Translate this entry to WE format */
5923 current_ev = orinoco_translate_scan(dev, info,
5924 current_ev,
5925 extra + srq->length,
5926 &bss->bss,
5927 bss->last_scanned);
5928
5929 /* Check if there is space for one more entry */
5930 if ((extra + srq->length - current_ev)
5931 <= IW_EV_ADDR_LEN) {
5932 /* Ask user space to try again with a
5933 * bigger buffer */
5934 err = -E2BIG;
5935 goto out;
5936 }
5937 }
5938 }
5939
5940 srq->length = (current_ev - extra);
5941 srq->flags = (__u16) priv->scan_mode;
5942
5943out:
5944 orinoco_unlock(priv, &flags);
5945 return err;
5946}
5947
5948/* Commit handler, called after set operations */
5949static int orinoco_ioctl_commit(struct net_device *dev,
5950 struct iw_request_info *info,
5951 void *wrqu,
5952 char *extra)
5953{
5954 struct orinoco_private *priv = netdev_priv(dev);
5955 struct hermes *hw = &priv->hw;
5956 unsigned long flags;
5957 int err = 0;
5958
5959 if (!priv->open)
5960 return 0;
5961
5962 if (priv->broken_disableport) {
5963 orinoco_reset(&priv->reset_work);
5964 return 0;
5965 }
5966
5967 if (orinoco_lock(priv, &flags) != 0)
5968 return err;
5969
5970 err = hermes_disable_port(hw, 0);
5971 if (err) {
5972 printk(KERN_WARNING "%s: Unable to disable port "
5973 "while reconfiguring card\n", dev->name);
5974 priv->broken_disableport = 1;
5975 goto out;
5976 }
5977
5978 err = __orinoco_program_rids(dev);
5979 if (err) {
5980 printk(KERN_WARNING "%s: Unable to reconfigure card\n",
5981 dev->name);
5982 goto out;
5983 }
5984
5985 err = hermes_enable_port(hw, 0);
5986 if (err) {
5987 printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
5988 dev->name);
5989 goto out;
5990 }
5991
5992 out:
5993 if (err) {
5994 printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
5995 schedule_work(&priv->reset_work);
5996 err = 0;
5997 }
5998
5999 orinoco_unlock(priv, &flags);
6000 return err;
6001}
6002
6003static const struct iw_priv_args orinoco_privtab[] = {
6004 { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
6005 { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
6006 { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6007 0, "set_port3" },
6008 { SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6009 "get_port3" },
6010 { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6011 0, "set_preamble" },
6012 { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6013 "get_preamble" },
6014 { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6015 0, "set_ibssport" },
6016 { SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6017 "get_ibssport" },
6018 { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN,
6019 "get_rid" },
6020};
6021
6022
6023/*
6024 * Structures to export the Wireless Handlers
6025 */
6026
6027#define STD_IW_HANDLER(id, func) \
6028 [IW_IOCTL_IDX(id)] = (iw_handler) func
6029static const iw_handler orinoco_handler[] = {
6030 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
6031 STD_IW_HANDLER(SIOCGIWNAME, orinoco_ioctl_getname),
6032 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
6033 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
6034 STD_IW_HANDLER(SIOCSIWMODE, orinoco_ioctl_setmode),
6035 STD_IW_HANDLER(SIOCGIWMODE, orinoco_ioctl_getmode),
6036 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
6037 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
6038 STD_IW_HANDLER(SIOCGIWRANGE, orinoco_ioctl_getiwrange),
6039 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
6040 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
6041 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
6042 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
6043 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
6044 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
6045 STD_IW_HANDLER(SIOCSIWSCAN, orinoco_ioctl_setscan),
6046 STD_IW_HANDLER(SIOCGIWSCAN, orinoco_ioctl_getscan),
6047 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
6048 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
6049 STD_IW_HANDLER(SIOCSIWNICKN, orinoco_ioctl_setnick),
6050 STD_IW_HANDLER(SIOCGIWNICKN, orinoco_ioctl_getnick),
6051 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
6052 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
6053 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
6054 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
6055 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
6056 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
6057 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
6058 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
6059 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
6060 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
6061 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
6062 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
6063 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
6064 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
6065 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
6066 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
6067 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
6068 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
6069};
6070
6071
6072/*
6073 Added typecasting since we no longer use iwreq_data -- Moustafa
6074 */
6075static const iw_handler orinoco_private_handler[] = {
6076 [0] = (iw_handler) orinoco_ioctl_reset,
6077 [1] = (iw_handler) orinoco_ioctl_reset,
6078 [2] = (iw_handler) orinoco_ioctl_setport3,
6079 [3] = (iw_handler) orinoco_ioctl_getport3,
6080 [4] = (iw_handler) orinoco_ioctl_setpreamble,
6081 [5] = (iw_handler) orinoco_ioctl_getpreamble,
6082 [6] = (iw_handler) orinoco_ioctl_setibssport,
6083 [7] = (iw_handler) orinoco_ioctl_getibssport,
6084 [9] = (iw_handler) orinoco_ioctl_getrid,
6085};
6086
6087static const struct iw_handler_def orinoco_handler_def = {
6088 .num_standard = ARRAY_SIZE(orinoco_handler),
6089 .num_private = ARRAY_SIZE(orinoco_private_handler),
6090 .num_private_args = ARRAY_SIZE(orinoco_privtab),
6091 .standard = orinoco_handler,
6092 .private = orinoco_private_handler,
6093 .private_args = orinoco_privtab,
6094 .get_wireless_stats = orinoco_get_wireless_stats,
6095};
6096
6097static void orinoco_get_drvinfo(struct net_device *dev,
6098 struct ethtool_drvinfo *info)
6099{
6100 struct orinoco_private *priv = netdev_priv(dev);
6101
6102 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
6103 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
6104 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
6105 if (dev->dev.parent)
6106 strncpy(info->bus_info, dev_name(dev->dev.parent),
6107 sizeof(info->bus_info) - 1);
6108 else
6109 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
6110 "PCMCIA %p", priv->hw.iobase);
6111}
6112
6113static const struct ethtool_ops orinoco_ethtool_ops = {
6114 .get_drvinfo = orinoco_get_drvinfo,
6115 .get_link = ethtool_op_get_link,
6116};
6117
6118/********************************************************************/
6119/* Module initialization */
6120/********************************************************************/
6121
6122EXPORT_SYMBOL(alloc_orinocodev);
6123EXPORT_SYMBOL(free_orinocodev);
6124
6125EXPORT_SYMBOL(__orinoco_up);
6126EXPORT_SYMBOL(__orinoco_down);
6127EXPORT_SYMBOL(orinoco_reinit_firmware);
6128
6129EXPORT_SYMBOL(orinoco_interrupt);
6130
6131/* Can't be declared "const" or the whole __initdata section will
6132 * become const */
6133static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
6134 " (David Gibson <hermes@gibson.dropbear.id.au>, "
6135 "Pavel Roskin <proski@gnu.org>, et al)";
6136
6137static int __init init_orinoco(void)
6138{
6139 printk(KERN_DEBUG "%s\n", version);
6140 return 0;
6141}
6142
6143static void __exit exit_orinoco(void)
6144{
6145}
6146
6147module_init(init_orinoco);
6148module_exit(exit_orinoco);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 00750c8ba7d..f3f94b28ce6 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -1,5 +1,5 @@
1/* orinoco.h 1/* orinoco.h
2 * 2 *
3 * Common definitions to all pieces of the various orinoco 3 * Common definitions to all pieces of the various orinoco
4 * drivers 4 * drivers
5 */ 5 */
@@ -18,9 +18,9 @@
18#include "hermes.h" 18#include "hermes.h"
19 19
20/* To enable debug messages */ 20/* To enable debug messages */
21//#define ORINOCO_DEBUG 3 21/*#define ORINOCO_DEBUG 3*/
22 22
23#define WIRELESS_SPY // enable iwspy support 23#define WIRELESS_SPY /* enable iwspy support */
24 24
25#define MAX_SCAN_LEN 4096 25#define MAX_SCAN_LEN 4096
26 26
@@ -59,14 +59,6 @@ struct xbss_element {
59 struct list_head list; 59 struct list_head list;
60}; 60};
61 61
62struct hermes_rx_descriptor;
63
64struct orinoco_rx_data {
65 struct hermes_rx_descriptor *desc;
66 struct sk_buff *skb;
67 struct list_head list;
68};
69
70struct firmware; 62struct firmware;
71 63
72struct orinoco_private { 64struct orinoco_private {
@@ -83,7 +75,6 @@ struct orinoco_private {
83 /* Interrupt tasklets */ 75 /* Interrupt tasklets */
84 struct tasklet_struct rx_tasklet; 76 struct tasklet_struct rx_tasklet;
85 struct list_head rx_list; 77 struct list_head rx_list;
86 struct orinoco_rx_data *rx_data;
87 78
88 /* driver state */ 79 /* driver state */
89 int open; 80 int open;
@@ -130,7 +121,7 @@ struct orinoco_private {
130 u16 encode_alg, wep_restrict, tx_key; 121 u16 encode_alg, wep_restrict, tx_key;
131 struct orinoco_key keys[ORINOCO_MAX_KEYS]; 122 struct orinoco_key keys[ORINOCO_MAX_KEYS];
132 int bitratemode; 123 int bitratemode;
133 char nick[IW_ESSID_MAX_SIZE+1]; 124 char nick[IW_ESSID_MAX_SIZE+1];
134 char desired_essid[IW_ESSID_MAX_SIZE+1]; 125 char desired_essid[IW_ESSID_MAX_SIZE+1];
135 char desired_bssid[ETH_ALEN]; 126 char desired_bssid[ETH_ALEN];
136 int bssid_fixed; 127 int bssid_fixed;
@@ -140,7 +131,7 @@ struct orinoco_private {
140 u16 pm_on, pm_mcast, pm_period, pm_timeout; 131 u16 pm_on, pm_mcast, pm_period, pm_timeout;
141 u16 preamble; 132 u16 preamble;
142#ifdef WIRELESS_SPY 133#ifdef WIRELESS_SPY
143 struct iw_spy_data spy_data; /* iwspy support */ 134 struct iw_spy_data spy_data; /* iwspy support */
144 struct iw_public_data wireless_data; 135 struct iw_public_data wireless_data;
145#endif 136#endif
146 137
@@ -177,7 +168,10 @@ struct orinoco_private {
177 168
178#ifdef ORINOCO_DEBUG 169#ifdef ORINOCO_DEBUG
179extern int orinoco_debug; 170extern int orinoco_debug;
180#define DEBUG(n, args...) do { if (orinoco_debug>(n)) printk(KERN_DEBUG args); } while(0) 171#define DEBUG(n, args...) do { \
172 if (orinoco_debug > (n)) \
173 printk(KERN_DEBUG args); \
174} while (0)
181#else 175#else
182#define DEBUG(n, args...) do { } while (0) 176#define DEBUG(n, args...) do { } while (0)
183#endif /* ORINOCO_DEBUG */ 177#endif /* ORINOCO_DEBUG */
@@ -194,7 +188,7 @@ extern void free_orinocodev(struct net_device *dev);
194extern int __orinoco_up(struct net_device *dev); 188extern int __orinoco_up(struct net_device *dev);
195extern int __orinoco_down(struct net_device *dev); 189extern int __orinoco_down(struct net_device *dev);
196extern int orinoco_reinit_firmware(struct net_device *dev); 190extern int orinoco_reinit_firmware(struct net_device *dev);
197extern irqreturn_t orinoco_interrupt(int irq, void * dev_id); 191extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
198 192
199/********************************************************************/ 193/********************************************************************/
200/* Locking and synchronization functions */ 194/* Locking and synchronization functions */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 0b32215d3f5..b381aed24d7 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -6,8 +6,8 @@
6 * It should also be usable on various Prism II based cards such as the 6 * It should also be usable on various Prism II based cards such as the
7 * Linksys, D-Link and Farallon Skyline. It should also work on Symbol 7 * Linksys, D-Link and Farallon Skyline. It should also work on Symbol
8 * cards such as the 3Com AirConnect and Ericsson WLAN. 8 * cards such as the 3Com AirConnect and Ericsson WLAN.
9 * 9 *
10 * Copyright notice & release notes in file orinoco.c 10 * Copyright notice & release notes in file main.c
11 */ 11 */
12 12
13#define DRIVER_NAME "orinoco_cs" 13#define DRIVER_NAME "orinoco_cs"
@@ -30,7 +30,8 @@
30/********************************************************************/ 30/********************************************************************/
31 31
32MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>"); 32MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
33MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco, Prism II based and similar wireless cards"); 33MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco,"
34 " Prism II based and similar wireless cards");
34MODULE_LICENSE("Dual MPL/GPL"); 35MODULE_LICENSE("Dual MPL/GPL");
35 36
36/* Module parameters */ 37/* Module parameters */
@@ -53,8 +54,8 @@ struct orinoco_pccard {
53 54
54 /* Used to handle hard reset */ 55 /* Used to handle hard reset */
55 /* yuck, we need this hack to work around the insanity of the 56 /* yuck, we need this hack to work around the insanity of the
56 * PCMCIA layer */ 57 * PCMCIA layer */
57 unsigned long hard_reset_in_progress; 58 unsigned long hard_reset_in_progress;
58}; 59};
59 60
60 61
@@ -98,7 +99,7 @@ orinoco_cs_hard_reset(struct orinoco_private *priv)
98 * This creates an "instance" of the driver, allocating local data 99 * This creates an "instance" of the driver, allocating local data
99 * structures for one device. The device is registered with Card 100 * structures for one device. The device is registered with Card
100 * Services. 101 * Services.
101 * 102 *
102 * The dev_link structure is initialized, but we don't actually 103 * The dev_link structure is initialized, but we don't actually
103 * configure the card at this point -- we wait until we receive a card 104 * configure the card at this point -- we wait until we receive a card
104 * insertion event. */ 105 * insertion event. */
@@ -111,7 +112,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
111 112
112 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 113 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
113 orinoco_cs_hard_reset, NULL); 114 orinoco_cs_hard_reset, NULL);
114 if (! dev) 115 if (!dev)
115 return -ENOMEM; 116 return -ENOMEM;
116 priv = netdev_priv(dev); 117 priv = netdev_priv(dev);
117 card = priv->card; 118 card = priv->card;
@@ -124,7 +125,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
124 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 125 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
125 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 126 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
126 link->irq.Handler = orinoco_interrupt; 127 link->irq.Handler = orinoco_interrupt;
127 link->irq.Instance = dev; 128 link->irq.Instance = dev;
128 129
129 /* General socket configuration defaults can go here. In this 130 /* General socket configuration defaults can go here. In this
130 * client, we assume very little, and rely on the CIS for 131 * client, we assume very little, and rely on the CIS for
@@ -162,8 +163,10 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
162 */ 163 */
163 164
164#define CS_CHECK(fn, ret) do { \ 165#define CS_CHECK(fn, ret) do { \
165 last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \ 166 last_fn = (fn); \
166 } while (0) 167 if ((last_ret = (ret)) != 0) \
168 goto cs_failed; \
169} while (0)
167 170
168static int orinoco_cs_config_check(struct pcmcia_device *p_dev, 171static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
169 cistpl_cftable_entry_t *cfg, 172 cistpl_cftable_entry_t *cfg,
@@ -307,8 +310,8 @@ orinoco_cs_config(struct pcmcia_device *link)
307 * initialized and arranged in a linked list at link->dev_node. */ 310 * initialized and arranged in a linked list at link->dev_node. */
308 strcpy(card->node.dev_name, dev->name); 311 strcpy(card->node.dev_name, dev->name);
309 link->dev_node = &card->node; /* link->dev_node being non-NULL is also 312 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
310 used to indicate that the 313 * used to indicate that the
311 net_device has been registered */ 314 * net_device has been registered */
312 315
313 /* Finally, report what we've done */ 316 /* Finally, report what we've done */
314 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 317 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
@@ -359,7 +362,7 @@ static int orinoco_cs_suspend(struct pcmcia_device *link)
359 /* This is probably racy, but I can't think of 362 /* This is probably racy, but I can't think of
360 a better way, short of rewriting the PCMCIA 363 a better way, short of rewriting the PCMCIA
361 layer to not suck :-( */ 364 layer to not suck :-( */
362 if (! test_bit(0, &card->hard_reset_in_progress)) { 365 if (!test_bit(0, &card->hard_reset_in_progress)) {
363 spin_lock_irqsave(&priv->lock, flags); 366 spin_lock_irqsave(&priv->lock, flags);
364 367
365 err = __orinoco_down(dev); 368 err = __orinoco_down(dev);
@@ -384,7 +387,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
384 int err = 0; 387 int err = 0;
385 unsigned long flags; 388 unsigned long flags;
386 389
387 if (! test_bit(0, &card->hard_reset_in_progress)) { 390 if (!test_bit(0, &card->hard_reset_in_progress)) {
388 err = orinoco_reinit_firmware(dev); 391 err = orinoco_reinit_firmware(dev);
389 if (err) { 392 if (err) {
390 printk(KERN_ERR "%s: Error %d re-initializing firmware\n", 393 printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
@@ -397,7 +400,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
397 netif_device_attach(dev); 400 netif_device_attach(dev);
398 priv->hw_unavailable--; 401 priv->hw_unavailable--;
399 402
400 if (priv->open && ! priv->hw_unavailable) { 403 if (priv->open && !priv->hw_unavailable) {
401 err = __orinoco_up(dev); 404 err = __orinoco_up(dev);
402 if (err) 405 if (err)
403 printk(KERN_ERR "%s: Error %d restarting card\n", 406 printk(KERN_ERR "%s: Error %d restarting card\n",
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 2fc86596302..b01726255c6 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -9,12 +9,12 @@
9 * 9 *
10 * Some of this code is borrowed from orinoco_plx.c 10 * Some of this code is borrowed from orinoco_plx.c
11 * Copyright (C) 2001 Daniel Barlow 11 * Copyright (C) 2001 Daniel Barlow
12 * Some of this code is borrowed from orinoco_pci.c 12 * Some of this code is borrowed from orinoco_pci.c
13 * Copyright (C) 2001 Jean Tourrilhes 13 * Copyright (C) 2001 Jean Tourrilhes
14 * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing 14 * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
15 * has been copied from it. linux-wlan-ng-0.1.10 is originally : 15 * has been copied from it. linux-wlan-ng-0.1.10 is originally :
16 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 16 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
17 * 17 *
18 * The contents of this file are subject to the Mozilla Public License 18 * The contents of this file are subject to the Mozilla Public License
19 * Version 1.1 (the "License"); you may not use this file except in 19 * Version 1.1 (the "License"); you may not use this file except in
20 * compliance with the License. You may obtain a copy of the License 20 * compliance with the License. You may obtain a copy of the License
@@ -103,9 +103,8 @@ static int orinoco_nortel_hw_init(struct orinoco_pci_card *card)
103 iowrite16(0x8, card->bridge_io + 2); 103 iowrite16(0x8, card->bridge_io + 2);
104 for (i = 0; i < 30; i++) { 104 for (i = 0; i < 30; i++) {
105 mdelay(30); 105 mdelay(30);
106 if (ioread16(card->bridge_io) & 0x10) { 106 if (ioread16(card->bridge_io) & 0x10)
107 break; 107 break;
108 }
109 } 108 }
110 if (i == 30) { 109 if (i == 30) {
111 printk(KERN_ERR PFX "brg1 timed out\n"); 110 printk(KERN_ERR PFX "brg1 timed out\n");
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 4ebd638a073..78cafff1fb2 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -1,5 +1,5 @@
1/* orinoco_pci.c 1/* orinoco_pci.c
2 * 2 *
3 * Driver for Prism 2.5/3 devices that have a direct PCI interface 3 * Driver for Prism 2.5/3 devices that have a direct PCI interface
4 * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge). 4 * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge).
5 * The card contains only one PCI region, which contains all the usual 5 * The card contains only one PCI region, which contains all the usual
@@ -237,7 +237,8 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
237 " (Pavel Roskin <proski@gnu.org>," 237 " (Pavel Roskin <proski@gnu.org>,"
238 " David Gibson <hermes@gibson.dropbear.id.au> &" 238 " David Gibson <hermes@gibson.dropbear.id.au> &"
239 " Jean Tourrilhes <jt@hpl.hp.com>)"; 239 " Jean Tourrilhes <jt@hpl.hp.com>)";
240MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>"); 240MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> &"
241 " David Gibson <hermes@gibson.dropbear.id.au>");
241MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface"); 242MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface");
242MODULE_LICENSE("Dual MPL/GPL"); 243MODULE_LICENSE("Dual MPL/GPL");
243 244
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.h b/drivers/net/wireless/orinoco/orinoco_pci.h
index f4e5e06760c..c655b4a3de1 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco/orinoco_pci.h
@@ -1,10 +1,10 @@
1/* orinoco_pci.h 1/* orinoco_pci.h
2 * 2 *
3 * Common code for all Orinoco drivers for PCI devices, including 3 * Common code for all Orinoco drivers for PCI devices, including
4 * both native PCI and PCMCIA-to-PCI bridges. 4 * both native PCI and PCMCIA-to-PCI bridges.
5 * 5 *
6 * Copyright (C) 2005, Pavel Roskin. 6 * Copyright (C) 2005, Pavel Roskin.
7 * See orinoco.c for license. 7 * See main.c for license.
8 */ 8 */
9 9
10#ifndef _ORINOCO_PCI_H 10#ifndef _ORINOCO_PCI_H
@@ -37,11 +37,11 @@ static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
37 if (err) 37 if (err)
38 printk(KERN_WARNING "%s: error %d bringing interface down " 38 printk(KERN_WARNING "%s: error %d bringing interface down "
39 "for suspend\n", dev->name, err); 39 "for suspend\n", dev->name, err);
40 40
41 netif_device_detach(dev); 41 netif_device_detach(dev);
42 42
43 priv->hw_unavailable++; 43 priv->hw_unavailable++;
44 44
45 orinoco_unlock(priv, &flags); 45 orinoco_unlock(priv, &flags);
46 46
47 free_irq(pdev->irq, dev); 47 free_irq(pdev->irq, dev);
@@ -90,13 +90,13 @@ static int orinoco_pci_resume(struct pci_dev *pdev)
90 90
91 priv->hw_unavailable--; 91 priv->hw_unavailable--;
92 92
93 if (priv->open && (! priv->hw_unavailable)) { 93 if (priv->open && (!priv->hw_unavailable)) {
94 err = __orinoco_up(dev); 94 err = __orinoco_up(dev);
95 if (err) 95 if (err)
96 printk(KERN_ERR "%s: Error %d restarting card on resume\n", 96 printk(KERN_ERR "%s: Error %d restarting card on resume\n",
97 dev->name, err); 97 dev->name, err);
98 } 98 }
99 99
100 spin_unlock_irqrestore(&priv->lock, flags); 100 spin_unlock_irqrestore(&priv->lock, flags);
101 101
102 return 0; 102 return 0;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index ef761857bb3..a2a4471c033 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -146,9 +146,8 @@ static int orinoco_plx_hw_init(struct orinoco_pci_card *card)
146 }; 146 };
147 147
148 printk(KERN_DEBUG PFX "CIS: "); 148 printk(KERN_DEBUG PFX "CIS: ");
149 for (i = 0; i < 16; i++) { 149 for (i = 0; i < 16; i++)
150 printk("%02X:", ioread8(card->attr_io + (i << 1))); 150 printk("%02X:", ioread8(card->attr_io + (i << 1)));
151 }
152 printk("\n"); 151 printk("\n");
153 152
154 /* Verify whether a supported PC card is present */ 153 /* Verify whether a supported PC card is present */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index ede24ec309c..cda0e6e4d7a 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -1,7 +1,7 @@
1/* orinoco_tmd.c 1/* orinoco_tmd.c
2 * 2 *
3 * Driver for Prism II devices which would usually be driven by orinoco_cs, 3 * Driver for Prism II devices which would usually be driven by orinoco_cs,
4 * but are connected to the PCI bus by a TMD7160. 4 * but are connected to the PCI bus by a TMD7160.
5 * 5 *
6 * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net> 6 * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net>
7 * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow 7 * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow
@@ -27,7 +27,7 @@
27 * provisions above, a recipient may use your version of this file 27 * provisions above, a recipient may use your version of this file
28 * under either the MPL or the GPL. 28 * under either the MPL or the GPL.
29 * 29 *
30 * The actual driving is done by orinoco.c, this is just resource 30 * The actual driving is done by main.c, this is just resource
31 * allocation stuff. 31 * allocation stuff.
32 * 32 *
33 * This driver is modeled after the orinoco_plx driver. The main 33 * This driver is modeled after the orinoco_plx driver. The main
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
new file mode 100644
index 00000000000..89d699d4dfe
--- /dev/null
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -0,0 +1,233 @@
1/* Helpers for managing scan queues
2 *
3 * See copyright notice in main.c
4 */
5
6#include <linux/kernel.h>
7#include <linux/string.h>
8#include <linux/etherdevice.h>
9
10#include "hermes.h"
11#include "orinoco.h"
12
13#include "scan.h"
14
15#define ORINOCO_MAX_BSS_COUNT 64
16
17#define PRIV_BSS ((struct bss_element *)priv->bss_xbss_data)
18#define PRIV_XBSS ((struct xbss_element *)priv->bss_xbss_data)
19
20int orinoco_bss_data_allocate(struct orinoco_private *priv)
21{
22 if (priv->bss_xbss_data)
23 return 0;
24
25 if (priv->has_ext_scan)
26 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
27 sizeof(struct xbss_element),
28 GFP_KERNEL);
29 else
30 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
31 sizeof(struct bss_element),
32 GFP_KERNEL);
33
34 if (!priv->bss_xbss_data) {
35 printk(KERN_WARNING "Out of memory allocating beacons");
36 return -ENOMEM;
37 }
38 return 0;
39}
40
41void orinoco_bss_data_free(struct orinoco_private *priv)
42{
43 kfree(priv->bss_xbss_data);
44 priv->bss_xbss_data = NULL;
45}
46
47void orinoco_bss_data_init(struct orinoco_private *priv)
48{
49 int i;
50
51 INIT_LIST_HEAD(&priv->bss_free_list);
52 INIT_LIST_HEAD(&priv->bss_list);
53 if (priv->has_ext_scan)
54 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
55 list_add_tail(&(PRIV_XBSS[i].list),
56 &priv->bss_free_list);
57 else
58 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
59 list_add_tail(&(PRIV_BSS[i].list),
60 &priv->bss_free_list);
61
62}
63
64void orinoco_clear_scan_results(struct orinoco_private *priv,
65 unsigned long scan_age)
66{
67 if (priv->has_ext_scan) {
68 struct xbss_element *bss;
69 struct xbss_element *tmp_bss;
70
71 /* Blow away current list of scan results */
72 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
73 if (!scan_age ||
74 time_after(jiffies, bss->last_scanned + scan_age)) {
75 list_move_tail(&bss->list,
76 &priv->bss_free_list);
77 /* Don't blow away ->list, just BSS data */
78 memset(&bss->bss, 0, sizeof(bss->bss));
79 bss->last_scanned = 0;
80 }
81 }
82 } else {
83 struct bss_element *bss;
84 struct bss_element *tmp_bss;
85
86 /* Blow away current list of scan results */
87 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
88 if (!scan_age ||
89 time_after(jiffies, bss->last_scanned + scan_age)) {
90 list_move_tail(&bss->list,
91 &priv->bss_free_list);
92 /* Don't blow away ->list, just BSS data */
93 memset(&bss->bss, 0, sizeof(bss->bss));
94 bss->last_scanned = 0;
95 }
96 }
97 }
98}
99
100void orinoco_add_ext_scan_result(struct orinoco_private *priv,
101 struct agere_ext_scan_info *atom)
102{
103 struct xbss_element *bss = NULL;
104 int found = 0;
105
106 /* Try to update an existing bss first */
107 list_for_each_entry(bss, &priv->bss_list, list) {
108 if (compare_ether_addr(bss->bss.bssid, atom->bssid))
109 continue;
110 /* ESSID lengths */
111 if (bss->bss.data[1] != atom->data[1])
112 continue;
113 if (memcmp(&bss->bss.data[2], &atom->data[2],
114 atom->data[1]))
115 continue;
116 found = 1;
117 break;
118 }
119
120 /* Grab a bss off the free list */
121 if (!found && !list_empty(&priv->bss_free_list)) {
122 bss = list_entry(priv->bss_free_list.next,
123 struct xbss_element, list);
124 list_del(priv->bss_free_list.next);
125
126 list_add_tail(&bss->list, &priv->bss_list);
127 }
128
129 if (bss) {
130 /* Always update the BSS to get latest beacon info */
131 memcpy(&bss->bss, atom, sizeof(bss->bss));
132 bss->last_scanned = jiffies;
133 }
134}
135
136int orinoco_process_scan_results(struct orinoco_private *priv,
137 unsigned char *buf,
138 int len)
139{
140 int offset; /* In the scan data */
141 union hermes_scan_info *atom;
142 int atom_len;
143
144 switch (priv->firmware_type) {
145 case FIRMWARE_TYPE_AGERE:
146 atom_len = sizeof(struct agere_scan_apinfo);
147 offset = 0;
148 break;
149 case FIRMWARE_TYPE_SYMBOL:
150 /* Lack of documentation necessitates this hack.
151 * Different firmwares have 68 or 76 byte long atoms.
152 * We try modulo first. If the length divides by both,
153 * we check what would be the channel in the second
154 * frame for a 68-byte atom. 76-byte atoms have 0 there.
155 * Valid channel cannot be 0. */
156 if (len % 76)
157 atom_len = 68;
158 else if (len % 68)
159 atom_len = 76;
160 else if (len >= 1292 && buf[68] == 0)
161 atom_len = 76;
162 else
163 atom_len = 68;
164 offset = 0;
165 break;
166 case FIRMWARE_TYPE_INTERSIL:
167 offset = 4;
168 if (priv->has_hostscan) {
169 atom_len = le16_to_cpup((__le16 *)buf);
170 /* Sanity check for atom_len */
171 if (atom_len < sizeof(struct prism2_scan_apinfo)) {
172 printk(KERN_ERR "%s: Invalid atom_len in scan "
173 "data: %d\n", priv->ndev->name,
174 atom_len);
175 return -EIO;
176 }
177 } else
178 atom_len = offsetof(struct prism2_scan_apinfo, atim);
179 break;
180 default:
181 return -EOPNOTSUPP;
182 }
183
184 /* Check that we got an whole number of atoms */
185 if ((len - offset) % atom_len) {
186 printk(KERN_ERR "%s: Unexpected scan data length %d, "
187 "atom_len %d, offset %d\n", priv->ndev->name, len,
188 atom_len, offset);
189 return -EIO;
190 }
191
192 orinoco_clear_scan_results(priv, msecs_to_jiffies(15000));
193
194 /* Read the entries one by one */
195 for (; offset + atom_len <= len; offset += atom_len) {
196 int found = 0;
197 struct bss_element *bss = NULL;
198
199 /* Get next atom */
200 atom = (union hermes_scan_info *) (buf + offset);
201
202 /* Try to update an existing bss first */
203 list_for_each_entry(bss, &priv->bss_list, list) {
204 if (compare_ether_addr(bss->bss.a.bssid, atom->a.bssid))
205 continue;
206 if (le16_to_cpu(bss->bss.a.essid_len) !=
207 le16_to_cpu(atom->a.essid_len))
208 continue;
209 if (memcmp(bss->bss.a.essid, atom->a.essid,
210 le16_to_cpu(atom->a.essid_len)))
211 continue;
212 found = 1;
213 break;
214 }
215
216 /* Grab a bss off the free list */
217 if (!found && !list_empty(&priv->bss_free_list)) {
218 bss = list_entry(priv->bss_free_list.next,
219 struct bss_element, list);
220 list_del(priv->bss_free_list.next);
221
222 list_add_tail(&bss->list, &priv->bss_list);
223 }
224
225 if (bss) {
226 /* Always update the BSS to get latest beacon info */
227 memcpy(&bss->bss, atom, sizeof(bss->bss));
228 bss->last_scanned = jiffies;
229 }
230 }
231
232 return 0;
233}
diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/orinoco/scan.h
new file mode 100644
index 00000000000..f319f7466af
--- /dev/null
+++ b/drivers/net/wireless/orinoco/scan.h
@@ -0,0 +1,29 @@
1/* Helpers for managing scan queues
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_SCAN_H_
6#define _ORINOCO_SCAN_H_
7
8/* Forward declarations */
9struct orinoco_private;
10struct agere_ext_scan_info;
11
12/* Setup and free memory for scan results */
13int orinoco_bss_data_allocate(struct orinoco_private *priv);
14void orinoco_bss_data_free(struct orinoco_private *priv);
15void orinoco_bss_data_init(struct orinoco_private *priv);
16
17/* Add scan results */
18void orinoco_add_ext_scan_result(struct orinoco_private *priv,
19 struct agere_ext_scan_info *atom);
20int orinoco_process_scan_results(struct orinoco_private *dev,
21 unsigned char *buf,
22 int len);
23
24/* Clear scan results */
25void orinoco_clear_scan_results(struct orinoco_private *priv,
26 unsigned long scan_age);
27
28
29#endif /* _ORINOCO_SCAN_H_ */
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index b2ca2e39c2c..38e5198e44c 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -4,7 +4,7 @@
4 * Communications and Intel PRO/Wireless 2011B. 4 * Communications and Intel PRO/Wireless 2011B.
5 * 5 *
6 * The driver implements Symbol firmware download. The rest is handled 6 * The driver implements Symbol firmware download. The rest is handled
7 * in hermes.c and orinoco.c. 7 * in hermes.c and main.c.
8 * 8 *
9 * Utilities for downloading the Symbol firmware are available at 9 * Utilities for downloading the Symbol firmware are available at
10 * http://sourceforge.net/projects/orinoco/ 10 * http://sourceforge.net/projects/orinoco/
@@ -15,7 +15,7 @@
15 * Portions based on Spectrum24tDnld.c from original spectrum24 driver: 15 * Portions based on Spectrum24tDnld.c from original spectrum24 driver:
16 * Copyright (C) Symbol Technologies. 16 * Copyright (C) Symbol Technologies.
17 * 17 *
18 * See copyright notice in file orinoco.c. 18 * See copyright notice in file main.c.
19 */ 19 */
20 20
21#define DRIVER_NAME "spectrum_cs" 21#define DRIVER_NAME "spectrum_cs"
@@ -133,7 +133,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
133 udelay(1000); 133 udelay(1000);
134 return 0; 134 return 0;
135 135
136 cs_failed: 136cs_failed:
137 cs_error(link, last_fn, last_ret); 137 cs_error(link, last_fn, last_ret);
138 return -ENODEV; 138 return -ENODEV;
139} 139}
@@ -171,7 +171,7 @@ spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
171 * This creates an "instance" of the driver, allocating local data 171 * This creates an "instance" of the driver, allocating local data
172 * structures for one device. The device is registered with Card 172 * structures for one device. The device is registered with Card
173 * Services. 173 * Services.
174 * 174 *
175 * The dev_link structure is initialized, but we don't actually 175 * The dev_link structure is initialized, but we don't actually
176 * configure the card at this point -- we wait until we receive a card 176 * configure the card at this point -- we wait until we receive a card
177 * insertion event. */ 177 * insertion event. */
@@ -185,7 +185,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
185 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 185 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
186 spectrum_cs_hard_reset, 186 spectrum_cs_hard_reset,
187 spectrum_cs_stop_firmware); 187 spectrum_cs_stop_firmware);
188 if (! dev) 188 if (!dev)
189 return -ENOMEM; 189 return -ENOMEM;
190 priv = netdev_priv(dev); 190 priv = netdev_priv(dev);
191 card = priv->card; 191 card = priv->card;
@@ -198,7 +198,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
199 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 199 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
200 link->irq.Handler = orinoco_interrupt; 200 link->irq.Handler = orinoco_interrupt;
201 link->irq.Instance = dev; 201 link->irq.Instance = dev;
202 202
203 /* General socket configuration defaults can go here. In this 203 /* General socket configuration defaults can go here. In this
204 * client, we assume very little, and rely on the CIS for 204 * client, we assume very little, and rely on the CIS for
@@ -367,9 +367,8 @@ spectrum_cs_config(struct pcmcia_device *link)
367 card->node.major = card->node.minor = 0; 367 card->node.major = card->node.minor = 0;
368 368
369 /* Reset card */ 369 /* Reset card */
370 if (spectrum_cs_hard_reset(priv) != 0) { 370 if (spectrum_cs_hard_reset(priv) != 0)
371 goto failed; 371 goto failed;
372 }
373 372
374 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 373 SET_NETDEV_DEV(dev, &handle_to_dev(link));
375 /* Tell the stack we exist */ 374 /* Tell the stack we exist */
@@ -382,8 +381,8 @@ spectrum_cs_config(struct pcmcia_device *link)
382 * initialized and arranged in a linked list at link->dev_node. */ 381 * initialized and arranged in a linked list at link->dev_node. */
383 strcpy(card->node.dev_name, dev->name); 382 strcpy(card->node.dev_name, dev->name);
384 link->dev_node = &card->node; /* link->dev_node being non-NULL is also 383 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
385 used to indicate that the 384 * used to indicate that the
386 net_device has been registered */ 385 * net_device has been registered */
387 386
388 /* Finally, report what we've done */ 387 /* Finally, report what we've done */
389 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 388 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
new file mode 100644
index 00000000000..3f081423439
--- /dev/null
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -0,0 +1,2325 @@
1/* Wireless extensions support.
2 *
3 * See copyright notice in main.c
4 */
5#include <linux/kernel.h>
6#include <linux/if_arp.h>
7#include <linux/wireless.h>
8#include <linux/ieee80211.h>
9#include <net/iw_handler.h>
10
11#include "hermes.h"
12#include "hermes_rid.h"
13#include "orinoco.h"
14
15#include "hw.h"
16#include "mic.h"
17#include "scan.h"
18#include "main.h"
19
20#include "wext.h"
21
22#define MAX_RID_LEN 1024
23
24static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
25{
26 struct orinoco_private *priv = netdev_priv(dev);
27 hermes_t *hw = &priv->hw;
28 struct iw_statistics *wstats = &priv->wstats;
29 int err;
30 unsigned long flags;
31
32 if (!netif_device_present(dev)) {
33 printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
34 dev->name);
35 return NULL; /* FIXME: Can we do better than this? */
36 }
37
38 /* If busy, return the old stats. Returning NULL may cause
39 * the interface to disappear from /proc/net/wireless */
40 if (orinoco_lock(priv, &flags) != 0)
41 return wstats;
42
43 /* We can't really wait for the tallies inquiry command to
44 * complete, so we just use the previous results and trigger
45 * a new tallies inquiry command for next time - Jean II */
46 /* FIXME: Really we should wait for the inquiry to come back -
47 * as it is the stats we give don't make a whole lot of sense.
48 * Unfortunately, it's not clear how to do that within the
49 * wireless extensions framework: I think we're in user
50 * context, but a lock seems to be held by the time we get in
51 * here so we're not safe to sleep here. */
52 hermes_inquire(hw, HERMES_INQ_TALLIES);
53
54 if (priv->iw_mode == IW_MODE_ADHOC) {
55 memset(&wstats->qual, 0, sizeof(wstats->qual));
56 /* If a spy address is defined, we report stats of the
57 * first spy address - Jean II */
58 if (SPY_NUMBER(priv)) {
59 wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
60 wstats->qual.level = priv->spy_data.spy_stat[0].level;
61 wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
62 wstats->qual.updated =
63 priv->spy_data.spy_stat[0].updated;
64 }
65 } else {
66 struct {
67 __le16 qual, signal, noise, unused;
68 } __attribute__ ((packed)) cq;
69
70 err = HERMES_READ_RECORD(hw, USER_BAP,
71 HERMES_RID_COMMSQUALITY, &cq);
72
73 if (!err) {
74 wstats->qual.qual = (int)le16_to_cpu(cq.qual);
75 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
76 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
77 wstats->qual.updated =
78 IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
79 }
80 }
81
82 orinoco_unlock(priv, &flags);
83 return wstats;
84}
85
86/********************************************************************/
87/* Wireless extensions */
88/********************************************************************/
89
90static int orinoco_ioctl_getname(struct net_device *dev,
91 struct iw_request_info *info,
92 char *name,
93 char *extra)
94{
95 struct orinoco_private *priv = netdev_priv(dev);
96 int numrates;
97 int err;
98
99 err = orinoco_hw_get_bitratelist(priv, &numrates, NULL, 0);
100
101 if (!err && (numrates > 2))
102 strcpy(name, "IEEE 802.11b");
103 else
104 strcpy(name, "IEEE 802.11-DS");
105
106 return 0;
107}
108
109static int orinoco_ioctl_setwap(struct net_device *dev,
110 struct iw_request_info *info,
111 struct sockaddr *ap_addr,
112 char *extra)
113{
114 struct orinoco_private *priv = netdev_priv(dev);
115 int err = -EINPROGRESS; /* Call commit handler */
116 unsigned long flags;
117 static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
118 static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
119
120 if (orinoco_lock(priv, &flags) != 0)
121 return -EBUSY;
122
123 /* Enable automatic roaming - no sanity checks are needed */
124 if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 ||
125 memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) {
126 priv->bssid_fixed = 0;
127 memset(priv->desired_bssid, 0, ETH_ALEN);
128
129 /* "off" means keep existing connection */
130 if (ap_addr->sa_data[0] == 0) {
131 __orinoco_hw_set_wap(priv);
132 err = 0;
133 }
134 goto out;
135 }
136
137 if (priv->firmware_type == FIRMWARE_TYPE_AGERE) {
138 printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't "
139 "support manual roaming\n",
140 dev->name);
141 err = -EOPNOTSUPP;
142 goto out;
143 }
144
145 if (priv->iw_mode != IW_MODE_INFRA) {
146 printk(KERN_WARNING "%s: Manual roaming supported only in "
147 "managed mode\n", dev->name);
148 err = -EOPNOTSUPP;
149 goto out;
150 }
151
152 /* Intersil firmware hangs without Desired ESSID */
153 if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL &&
154 strlen(priv->desired_essid) == 0) {
155 printk(KERN_WARNING "%s: Desired ESSID must be set for "
156 "manual roaming\n", dev->name);
157 err = -EOPNOTSUPP;
158 goto out;
159 }
160
161 /* Finally, enable manual roaming */
162 priv->bssid_fixed = 1;
163 memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN);
164
165 out:
166 orinoco_unlock(priv, &flags);
167 return err;
168}
169
170static int orinoco_ioctl_getwap(struct net_device *dev,
171 struct iw_request_info *info,
172 struct sockaddr *ap_addr,
173 char *extra)
174{
175 struct orinoco_private *priv = netdev_priv(dev);
176
177 hermes_t *hw = &priv->hw;
178 int err = 0;
179 unsigned long flags;
180
181 if (orinoco_lock(priv, &flags) != 0)
182 return -EBUSY;
183
184 ap_addr->sa_family = ARPHRD_ETHER;
185 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
186 ETH_ALEN, NULL, ap_addr->sa_data);
187
188 orinoco_unlock(priv, &flags);
189
190 return err;
191}
192
193static int orinoco_ioctl_setmode(struct net_device *dev,
194 struct iw_request_info *info,
195 u32 *mode,
196 char *extra)
197{
198 struct orinoco_private *priv = netdev_priv(dev);
199 int err = -EINPROGRESS; /* Call commit handler */
200 unsigned long flags;
201
202 if (priv->iw_mode == *mode)
203 return 0;
204
205 if (orinoco_lock(priv, &flags) != 0)
206 return -EBUSY;
207
208 switch (*mode) {
209 case IW_MODE_ADHOC:
210 if (!priv->has_ibss && !priv->has_port3)
211 err = -EOPNOTSUPP;
212 break;
213
214 case IW_MODE_INFRA:
215 break;
216
217 case IW_MODE_MONITOR:
218 if (priv->broken_monitor && !force_monitor) {
219 printk(KERN_WARNING "%s: Monitor mode support is "
220 "buggy in this firmware, not enabling\n",
221 dev->name);
222 err = -EOPNOTSUPP;
223 }
224 break;
225
226 default:
227 err = -EOPNOTSUPP;
228 break;
229 }
230
231 if (err == -EINPROGRESS) {
232 priv->iw_mode = *mode;
233 set_port_type(priv);
234 }
235
236 orinoco_unlock(priv, &flags);
237
238 return err;
239}
240
241static int orinoco_ioctl_getmode(struct net_device *dev,
242 struct iw_request_info *info,
243 u32 *mode,
244 char *extra)
245{
246 struct orinoco_private *priv = netdev_priv(dev);
247
248 *mode = priv->iw_mode;
249 return 0;
250}
251
252static int orinoco_ioctl_getiwrange(struct net_device *dev,
253 struct iw_request_info *info,
254 struct iw_point *rrq,
255 char *extra)
256{
257 struct orinoco_private *priv = netdev_priv(dev);
258 int err = 0;
259 struct iw_range *range = (struct iw_range *) extra;
260 int numrates;
261 int i, k;
262
263 rrq->length = sizeof(struct iw_range);
264 memset(range, 0, sizeof(struct iw_range));
265
266 range->we_version_compiled = WIRELESS_EXT;
267 range->we_version_source = 22;
268
269 /* Set available channels/frequencies */
270 range->num_channels = NUM_CHANNELS;
271 k = 0;
272 for (i = 0; i < NUM_CHANNELS; i++) {
273 if (priv->channel_mask & (1 << i)) {
274 range->freq[k].i = i + 1;
275 range->freq[k].m = (ieee80211_dsss_chan_to_freq(i + 1) *
276 100000);
277 range->freq[k].e = 1;
278 k++;
279 }
280
281 if (k >= IW_MAX_FREQUENCIES)
282 break;
283 }
284 range->num_frequency = k;
285 range->sensitivity = 3;
286
287 if (priv->has_wep) {
288 range->max_encoding_tokens = ORINOCO_MAX_KEYS;
289 range->encoding_size[0] = SMALL_KEY_SIZE;
290 range->num_encoding_sizes = 1;
291
292 if (priv->has_big_wep) {
293 range->encoding_size[1] = LARGE_KEY_SIZE;
294 range->num_encoding_sizes = 2;
295 }
296 }
297
298 if (priv->has_wpa)
299 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
300
301 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))) {
302 /* Quality stats meaningless in ad-hoc mode */
303 } else {
304 range->max_qual.qual = 0x8b - 0x2f;
305 range->max_qual.level = 0x2f - 0x95 - 1;
306 range->max_qual.noise = 0x2f - 0x95 - 1;
307 /* Need to get better values */
308 range->avg_qual.qual = 0x24;
309 range->avg_qual.level = 0xC2;
310 range->avg_qual.noise = 0x9E;
311 }
312
313 err = orinoco_hw_get_bitratelist(priv, &numrates,
314 range->bitrate, IW_MAX_BITRATES);
315 if (err)
316 return err;
317 range->num_bitrates = numrates;
318
319 /* Set an indication of the max TCP throughput in bit/s that we can
320 * expect using this interface. May be use for QoS stuff...
321 * Jean II */
322 if (numrates > 2)
323 range->throughput = 5 * 1000 * 1000; /* ~5 Mb/s */
324 else
325 range->throughput = 1.5 * 1000 * 1000; /* ~1.5 Mb/s */
326
327 range->min_rts = 0;
328 range->max_rts = 2347;
329 range->min_frag = 256;
330 range->max_frag = 2346;
331
332 range->min_pmp = 0;
333 range->max_pmp = 65535000;
334 range->min_pmt = 0;
335 range->max_pmt = 65535 * 1000; /* ??? */
336 range->pmp_flags = IW_POWER_PERIOD;
337 range->pmt_flags = IW_POWER_TIMEOUT;
338 range->pm_capa = (IW_POWER_PERIOD | IW_POWER_TIMEOUT |
339 IW_POWER_UNICAST_R);
340
341 range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
342 range->retry_flags = IW_RETRY_LIMIT;
343 range->r_time_flags = IW_RETRY_LIFETIME;
344 range->min_retry = 0;
345 range->max_retry = 65535; /* ??? */
346 range->min_r_time = 0;
347 range->max_r_time = 65535 * 1000; /* ??? */
348
349 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
350 range->scan_capa = IW_SCAN_CAPA_ESSID;
351 else
352 range->scan_capa = IW_SCAN_CAPA_NONE;
353
354 /* Event capability (kernel) */
355 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
356 /* Event capability (driver) */
357 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
358 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
359 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
360 IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
361
362 return 0;
363}
364
365static int orinoco_ioctl_setiwencode(struct net_device *dev,
366 struct iw_request_info *info,
367 struct iw_point *erq,
368 char *keybuf)
369{
370 struct orinoco_private *priv = netdev_priv(dev);
371 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
372 int setindex = priv->tx_key;
373 int encode_alg = priv->encode_alg;
374 int restricted = priv->wep_restrict;
375 u16 xlen = 0;
376 int err = -EINPROGRESS; /* Call commit handler */
377 unsigned long flags;
378
379 if (!priv->has_wep)
380 return -EOPNOTSUPP;
381
382 if (erq->pointer) {
383 /* We actually have a key to set - check its length */
384 if (erq->length > LARGE_KEY_SIZE)
385 return -E2BIG;
386
387 if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep)
388 return -E2BIG;
389 }
390
391 if (orinoco_lock(priv, &flags) != 0)
392 return -EBUSY;
393
394 /* Clear any TKIP key we have */
395 if ((priv->has_wpa) && (priv->encode_alg == IW_ENCODE_ALG_TKIP))
396 (void) orinoco_clear_tkip_key(priv, setindex);
397
398 if (erq->length > 0) {
399 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
400 index = priv->tx_key;
401
402 /* Adjust key length to a supported value */
403 if (erq->length > SMALL_KEY_SIZE)
404 xlen = LARGE_KEY_SIZE;
405 else if (erq->length > 0)
406 xlen = SMALL_KEY_SIZE;
407 else
408 xlen = 0;
409
410 /* Switch on WEP if off */
411 if ((encode_alg != IW_ENCODE_ALG_WEP) && (xlen > 0)) {
412 setindex = index;
413 encode_alg = IW_ENCODE_ALG_WEP;
414 }
415 } else {
416 /* Important note : if the user do "iwconfig eth0 enc off",
417 * we will arrive there with an index of -1. This is valid
418 * but need to be taken care off... Jean II */
419 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
420 if ((index != -1) || (erq->flags == 0)) {
421 err = -EINVAL;
422 goto out;
423 }
424 } else {
425 /* Set the index : Check that the key is valid */
426 if (priv->keys[index].len == 0) {
427 err = -EINVAL;
428 goto out;
429 }
430 setindex = index;
431 }
432 }
433
434 if (erq->flags & IW_ENCODE_DISABLED)
435 encode_alg = IW_ENCODE_ALG_NONE;
436 if (erq->flags & IW_ENCODE_OPEN)
437 restricted = 0;
438 if (erq->flags & IW_ENCODE_RESTRICTED)
439 restricted = 1;
440
441 if (erq->pointer && erq->length > 0) {
442 priv->keys[index].len = cpu_to_le16(xlen);
443 memset(priv->keys[index].data, 0,
444 sizeof(priv->keys[index].data));
445 memcpy(priv->keys[index].data, keybuf, erq->length);
446 }
447 priv->tx_key = setindex;
448
449 /* Try fast key change if connected and only keys are changed */
450 if ((priv->encode_alg == encode_alg) &&
451 (priv->wep_restrict == restricted) &&
452 netif_carrier_ok(dev)) {
453 err = __orinoco_hw_setup_wepkeys(priv);
454 /* No need to commit if successful */
455 goto out;
456 }
457
458 priv->encode_alg = encode_alg;
459 priv->wep_restrict = restricted;
460
461 out:
462 orinoco_unlock(priv, &flags);
463
464 return err;
465}
466
467static int orinoco_ioctl_getiwencode(struct net_device *dev,
468 struct iw_request_info *info,
469 struct iw_point *erq,
470 char *keybuf)
471{
472 struct orinoco_private *priv = netdev_priv(dev);
473 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
474 u16 xlen = 0;
475 unsigned long flags;
476
477 if (!priv->has_wep)
478 return -EOPNOTSUPP;
479
480 if (orinoco_lock(priv, &flags) != 0)
481 return -EBUSY;
482
483 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
484 index = priv->tx_key;
485
486 erq->flags = 0;
487 if (!priv->encode_alg)
488 erq->flags |= IW_ENCODE_DISABLED;
489 erq->flags |= index + 1;
490
491 if (priv->wep_restrict)
492 erq->flags |= IW_ENCODE_RESTRICTED;
493 else
494 erq->flags |= IW_ENCODE_OPEN;
495
496 xlen = le16_to_cpu(priv->keys[index].len);
497
498 erq->length = xlen;
499
500 memcpy(keybuf, priv->keys[index].data, ORINOCO_MAX_KEY_SIZE);
501
502 orinoco_unlock(priv, &flags);
503 return 0;
504}
505
506static int orinoco_ioctl_setessid(struct net_device *dev,
507 struct iw_request_info *info,
508 struct iw_point *erq,
509 char *essidbuf)
510{
511 struct orinoco_private *priv = netdev_priv(dev);
512 unsigned long flags;
513
514 /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
515 * anyway... - Jean II */
516
517 /* Hum... Should not use Wireless Extension constant (may change),
518 * should use our own... - Jean II */
519 if (erq->length > IW_ESSID_MAX_SIZE)
520 return -E2BIG;
521
522 if (orinoco_lock(priv, &flags) != 0)
523 return -EBUSY;
524
525 /* NULL the string (for NULL termination & ESSID = ANY) - Jean II */
526 memset(priv->desired_essid, 0, sizeof(priv->desired_essid));
527
528 /* If not ANY, get the new ESSID */
529 if (erq->flags)
530 memcpy(priv->desired_essid, essidbuf, erq->length);
531
532 orinoco_unlock(priv, &flags);
533
534 return -EINPROGRESS; /* Call commit handler */
535}
536
537static int orinoco_ioctl_getessid(struct net_device *dev,
538 struct iw_request_info *info,
539 struct iw_point *erq,
540 char *essidbuf)
541{
542 struct orinoco_private *priv = netdev_priv(dev);
543 int active;
544 int err = 0;
545 unsigned long flags;
546
547 if (netif_running(dev)) {
548 err = orinoco_hw_get_essid(priv, &active, essidbuf);
549 if (err < 0)
550 return err;
551 erq->length = err;
552 } else {
553 if (orinoco_lock(priv, &flags) != 0)
554 return -EBUSY;
555 memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE);
556 erq->length = strlen(priv->desired_essid);
557 orinoco_unlock(priv, &flags);
558 }
559
560 erq->flags = 1;
561
562 return 0;
563}
564
565static int orinoco_ioctl_setnick(struct net_device *dev,
566 struct iw_request_info *info,
567 struct iw_point *nrq,
568 char *nickbuf)
569{
570 struct orinoco_private *priv = netdev_priv(dev);
571 unsigned long flags;
572
573 if (nrq->length > IW_ESSID_MAX_SIZE)
574 return -E2BIG;
575
576 if (orinoco_lock(priv, &flags) != 0)
577 return -EBUSY;
578
579 memset(priv->nick, 0, sizeof(priv->nick));
580 memcpy(priv->nick, nickbuf, nrq->length);
581
582 orinoco_unlock(priv, &flags);
583
584 return -EINPROGRESS; /* Call commit handler */
585}
586
587static int orinoco_ioctl_getnick(struct net_device *dev,
588 struct iw_request_info *info,
589 struct iw_point *nrq,
590 char *nickbuf)
591{
592 struct orinoco_private *priv = netdev_priv(dev);
593 unsigned long flags;
594
595 if (orinoco_lock(priv, &flags) != 0)
596 return -EBUSY;
597
598 memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE);
599 orinoco_unlock(priv, &flags);
600
601 nrq->length = strlen(priv->nick);
602
603 return 0;
604}
605
606static int orinoco_ioctl_setfreq(struct net_device *dev,
607 struct iw_request_info *info,
608 struct iw_freq *frq,
609 char *extra)
610{
611 struct orinoco_private *priv = netdev_priv(dev);
612 int chan = -1;
613 unsigned long flags;
614 int err = -EINPROGRESS; /* Call commit handler */
615
616 /* In infrastructure mode the AP sets the channel */
617 if (priv->iw_mode == IW_MODE_INFRA)
618 return -EBUSY;
619
620 if ((frq->e == 0) && (frq->m <= 1000)) {
621 /* Setting by channel number */
622 chan = frq->m;
623 } else {
624 /* Setting by frequency */
625 int denom = 1;
626 int i;
627
628 /* Calculate denominator to rescale to MHz */
629 for (i = 0; i < (6 - frq->e); i++)
630 denom *= 10;
631
632 chan = ieee80211_freq_to_dsss_chan(frq->m / denom);
633 }
634
635 if ((chan < 1) || (chan > NUM_CHANNELS) ||
636 !(priv->channel_mask & (1 << (chan-1))))
637 return -EINVAL;
638
639 if (orinoco_lock(priv, &flags) != 0)
640 return -EBUSY;
641
642 priv->channel = chan;
643 if (priv->iw_mode == IW_MODE_MONITOR) {
644 /* Fast channel change - no commit if successful */
645 hermes_t *hw = &priv->hw;
646 err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
647 HERMES_TEST_SET_CHANNEL,
648 chan, NULL);
649 }
650 orinoco_unlock(priv, &flags);
651
652 return err;
653}
654
655static int orinoco_ioctl_getfreq(struct net_device *dev,
656 struct iw_request_info *info,
657 struct iw_freq *frq,
658 char *extra)
659{
660 struct orinoco_private *priv = netdev_priv(dev);
661 int tmp;
662
663 /* Locking done in there */
664 tmp = orinoco_hw_get_freq(priv);
665 if (tmp < 0)
666 return tmp;
667
668 frq->m = tmp * 100000;
669 frq->e = 1;
670
671 return 0;
672}
673
674static int orinoco_ioctl_getsens(struct net_device *dev,
675 struct iw_request_info *info,
676 struct iw_param *srq,
677 char *extra)
678{
679 struct orinoco_private *priv = netdev_priv(dev);
680 hermes_t *hw = &priv->hw;
681 u16 val;
682 int err;
683 unsigned long flags;
684
685 if (!priv->has_sensitivity)
686 return -EOPNOTSUPP;
687
688 if (orinoco_lock(priv, &flags) != 0)
689 return -EBUSY;
690 err = hermes_read_wordrec(hw, USER_BAP,
691 HERMES_RID_CNFSYSTEMSCALE, &val);
692 orinoco_unlock(priv, &flags);
693
694 if (err)
695 return err;
696
697 srq->value = val;
698 srq->fixed = 0; /* auto */
699
700 return 0;
701}
702
703static int orinoco_ioctl_setsens(struct net_device *dev,
704 struct iw_request_info *info,
705 struct iw_param *srq,
706 char *extra)
707{
708 struct orinoco_private *priv = netdev_priv(dev);
709 int val = srq->value;
710 unsigned long flags;
711
712 if (!priv->has_sensitivity)
713 return -EOPNOTSUPP;
714
715 if ((val < 1) || (val > 3))
716 return -EINVAL;
717
718 if (orinoco_lock(priv, &flags) != 0)
719 return -EBUSY;
720 priv->ap_density = val;
721 orinoco_unlock(priv, &flags);
722
723 return -EINPROGRESS; /* Call commit handler */
724}
725
726static int orinoco_ioctl_setrts(struct net_device *dev,
727 struct iw_request_info *info,
728 struct iw_param *rrq,
729 char *extra)
730{
731 struct orinoco_private *priv = netdev_priv(dev);
732 int val = rrq->value;
733 unsigned long flags;
734
735 if (rrq->disabled)
736 val = 2347;
737
738 if ((val < 0) || (val > 2347))
739 return -EINVAL;
740
741 if (orinoco_lock(priv, &flags) != 0)
742 return -EBUSY;
743
744 priv->rts_thresh = val;
745 orinoco_unlock(priv, &flags);
746
747 return -EINPROGRESS; /* Call commit handler */
748}
749
750static int orinoco_ioctl_getrts(struct net_device *dev,
751 struct iw_request_info *info,
752 struct iw_param *rrq,
753 char *extra)
754{
755 struct orinoco_private *priv = netdev_priv(dev);
756
757 rrq->value = priv->rts_thresh;
758 rrq->disabled = (rrq->value == 2347);
759 rrq->fixed = 1;
760
761 return 0;
762}
763
764static int orinoco_ioctl_setfrag(struct net_device *dev,
765 struct iw_request_info *info,
766 struct iw_param *frq,
767 char *extra)
768{
769 struct orinoco_private *priv = netdev_priv(dev);
770 int err = -EINPROGRESS; /* Call commit handler */
771 unsigned long flags;
772
773 if (orinoco_lock(priv, &flags) != 0)
774 return -EBUSY;
775
776 if (priv->has_mwo) {
777 if (frq->disabled)
778 priv->mwo_robust = 0;
779 else {
780 if (frq->fixed)
781 printk(KERN_WARNING "%s: Fixed fragmentation "
782 "is not supported on this firmware. "
783 "Using MWO robust instead.\n",
784 dev->name);
785 priv->mwo_robust = 1;
786 }
787 } else {
788 if (frq->disabled)
789 priv->frag_thresh = 2346;
790 else {
791 if ((frq->value < 256) || (frq->value > 2346))
792 err = -EINVAL;
793 else
794 /* must be even */
795 priv->frag_thresh = frq->value & ~0x1;
796 }
797 }
798
799 orinoco_unlock(priv, &flags);
800
801 return err;
802}
803
804static int orinoco_ioctl_getfrag(struct net_device *dev,
805 struct iw_request_info *info,
806 struct iw_param *frq,
807 char *extra)
808{
809 struct orinoco_private *priv = netdev_priv(dev);
810 hermes_t *hw = &priv->hw;
811 int err;
812 u16 val;
813 unsigned long flags;
814
815 if (orinoco_lock(priv, &flags) != 0)
816 return -EBUSY;
817
818 if (priv->has_mwo) {
819 err = hermes_read_wordrec(hw, USER_BAP,
820 HERMES_RID_CNFMWOROBUST_AGERE,
821 &val);
822 if (err)
823 val = 0;
824
825 frq->value = val ? 2347 : 0;
826 frq->disabled = !val;
827 frq->fixed = 0;
828 } else {
829 err = hermes_read_wordrec(hw, USER_BAP,
830 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
831 &val);
832 if (err)
833 val = 0;
834
835 frq->value = val;
836 frq->disabled = (val >= 2346);
837 frq->fixed = 1;
838 }
839
840 orinoco_unlock(priv, &flags);
841
842 return err;
843}
844
845static int orinoco_ioctl_setrate(struct net_device *dev,
846 struct iw_request_info *info,
847 struct iw_param *rrq,
848 char *extra)
849{
850 struct orinoco_private *priv = netdev_priv(dev);
851 int ratemode;
852 int bitrate; /* 100s of kilobits */
853 unsigned long flags;
854
855 /* As the user space doesn't know our highest rate, it uses -1
856 * to ask us to set the highest rate. Test it using "iwconfig
857 * ethX rate auto" - Jean II */
858 if (rrq->value == -1)
859 bitrate = 110;
860 else {
861 if (rrq->value % 100000)
862 return -EINVAL;
863 bitrate = rrq->value / 100000;
864 }
865
866 ratemode = orinoco_get_bitratemode(bitrate, !rrq->fixed);
867
868 if (ratemode == -1)
869 return -EINVAL;
870
871 if (orinoco_lock(priv, &flags) != 0)
872 return -EBUSY;
873 priv->bitratemode = ratemode;
874 orinoco_unlock(priv, &flags);
875
876 return -EINPROGRESS;
877}
878
879static int orinoco_ioctl_getrate(struct net_device *dev,
880 struct iw_request_info *info,
881 struct iw_param *rrq,
882 char *extra)
883{
884 struct orinoco_private *priv = netdev_priv(dev);
885 int err = 0;
886 int bitrate, automatic;
887 unsigned long flags;
888
889 if (orinoco_lock(priv, &flags) != 0)
890 return -EBUSY;
891
892 orinoco_get_ratemode_cfg(priv->bitratemode, &bitrate, &automatic);
893
894 /* If the interface is running we try to find more about the
895 current mode */
896 if (netif_running(dev))
897 err = orinoco_hw_get_act_bitrate(priv, &bitrate);
898
899 orinoco_unlock(priv, &flags);
900
901 rrq->value = bitrate;
902 rrq->fixed = !automatic;
903 rrq->disabled = 0;
904
905 return err;
906}
907
908static int orinoco_ioctl_setpower(struct net_device *dev,
909 struct iw_request_info *info,
910 struct iw_param *prq,
911 char *extra)
912{
913 struct orinoco_private *priv = netdev_priv(dev);
914 int err = -EINPROGRESS; /* Call commit handler */
915 unsigned long flags;
916
917 if (orinoco_lock(priv, &flags) != 0)
918 return -EBUSY;
919
920 if (prq->disabled) {
921 priv->pm_on = 0;
922 } else {
923 switch (prq->flags & IW_POWER_MODE) {
924 case IW_POWER_UNICAST_R:
925 priv->pm_mcast = 0;
926 priv->pm_on = 1;
927 break;
928 case IW_POWER_ALL_R:
929 priv->pm_mcast = 1;
930 priv->pm_on = 1;
931 break;
932 case IW_POWER_ON:
933 /* No flags : but we may have a value - Jean II */
934 break;
935 default:
936 err = -EINVAL;
937 goto out;
938 }
939
940 if (prq->flags & IW_POWER_TIMEOUT) {
941 priv->pm_on = 1;
942 priv->pm_timeout = prq->value / 1000;
943 }
944 if (prq->flags & IW_POWER_PERIOD) {
945 priv->pm_on = 1;
946 priv->pm_period = prq->value / 1000;
947 }
948 /* It's valid to not have a value if we are just toggling
949 * the flags... Jean II */
950 if (!priv->pm_on) {
951 err = -EINVAL;
952 goto out;
953 }
954 }
955
956 out:
957 orinoco_unlock(priv, &flags);
958
959 return err;
960}
961
962static int orinoco_ioctl_getpower(struct net_device *dev,
963 struct iw_request_info *info,
964 struct iw_param *prq,
965 char *extra)
966{
967 struct orinoco_private *priv = netdev_priv(dev);
968 hermes_t *hw = &priv->hw;
969 int err = 0;
970 u16 enable, period, timeout, mcast;
971 unsigned long flags;
972
973 if (orinoco_lock(priv, &flags) != 0)
974 return -EBUSY;
975
976 err = hermes_read_wordrec(hw, USER_BAP,
977 HERMES_RID_CNFPMENABLED, &enable);
978 if (err)
979 goto out;
980
981 err = hermes_read_wordrec(hw, USER_BAP,
982 HERMES_RID_CNFMAXSLEEPDURATION, &period);
983 if (err)
984 goto out;
985
986 err = hermes_read_wordrec(hw, USER_BAP,
987 HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
988 if (err)
989 goto out;
990
991 err = hermes_read_wordrec(hw, USER_BAP,
992 HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
993 if (err)
994 goto out;
995
996 prq->disabled = !enable;
997 /* Note : by default, display the period */
998 if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
999 prq->flags = IW_POWER_TIMEOUT;
1000 prq->value = timeout * 1000;
1001 } else {
1002 prq->flags = IW_POWER_PERIOD;
1003 prq->value = period * 1000;
1004 }
1005 if (mcast)
1006 prq->flags |= IW_POWER_ALL_R;
1007 else
1008 prq->flags |= IW_POWER_UNICAST_R;
1009
1010 out:
1011 orinoco_unlock(priv, &flags);
1012
1013 return err;
1014}
1015
1016static int orinoco_ioctl_set_encodeext(struct net_device *dev,
1017 struct iw_request_info *info,
1018 union iwreq_data *wrqu,
1019 char *extra)
1020{
1021 struct orinoco_private *priv = netdev_priv(dev);
1022 struct iw_point *encoding = &wrqu->encoding;
1023 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1024 int idx, alg = ext->alg, set_key = 1;
1025 unsigned long flags;
1026 int err = -EINVAL;
1027 u16 key_len;
1028
1029 if (orinoco_lock(priv, &flags) != 0)
1030 return -EBUSY;
1031
1032 /* Determine and validate the key index */
1033 idx = encoding->flags & IW_ENCODE_INDEX;
1034 if (idx) {
1035 if ((idx < 1) || (idx > 4))
1036 goto out;
1037 idx--;
1038 } else
1039 idx = priv->tx_key;
1040
1041 if (encoding->flags & IW_ENCODE_DISABLED)
1042 alg = IW_ENCODE_ALG_NONE;
1043
1044 if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
1045 /* Clear any TKIP TX key we had */
1046 (void) orinoco_clear_tkip_key(priv, priv->tx_key);
1047 }
1048
1049 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
1050 priv->tx_key = idx;
1051 set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
1052 (ext->key_len > 0)) ? 1 : 0;
1053 }
1054
1055 if (set_key) {
1056 /* Set the requested key first */
1057 switch (alg) {
1058 case IW_ENCODE_ALG_NONE:
1059 priv->encode_alg = alg;
1060 priv->keys[idx].len = 0;
1061 break;
1062
1063 case IW_ENCODE_ALG_WEP:
1064 if (ext->key_len > SMALL_KEY_SIZE)
1065 key_len = LARGE_KEY_SIZE;
1066 else if (ext->key_len > 0)
1067 key_len = SMALL_KEY_SIZE;
1068 else
1069 goto out;
1070
1071 priv->encode_alg = alg;
1072 priv->keys[idx].len = cpu_to_le16(key_len);
1073
1074 key_len = min(ext->key_len, key_len);
1075
1076 memset(priv->keys[idx].data, 0, ORINOCO_MAX_KEY_SIZE);
1077 memcpy(priv->keys[idx].data, ext->key, key_len);
1078 break;
1079
1080 case IW_ENCODE_ALG_TKIP:
1081 {
1082 hermes_t *hw = &priv->hw;
1083 u8 *tkip_iv = NULL;
1084
1085 if (!priv->has_wpa ||
1086 (ext->key_len > sizeof(priv->tkip_key[0])))
1087 goto out;
1088
1089 priv->encode_alg = alg;
1090 memset(&priv->tkip_key[idx], 0,
1091 sizeof(priv->tkip_key[idx]));
1092 memcpy(&priv->tkip_key[idx], ext->key, ext->key_len);
1093
1094 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
1095 tkip_iv = &ext->rx_seq[0];
1096
1097 err = __orinoco_hw_set_tkip_key(hw, idx,
1098 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
1099 (u8 *) &priv->tkip_key[idx],
1100 tkip_iv, NULL);
1101 if (err)
1102 printk(KERN_ERR "%s: Error %d setting TKIP key"
1103 "\n", dev->name, err);
1104
1105 goto out;
1106 }
1107 default:
1108 goto out;
1109 }
1110 }
1111 err = -EINPROGRESS;
1112 out:
1113 orinoco_unlock(priv, &flags);
1114
1115 return err;
1116}
1117
1118static int orinoco_ioctl_get_encodeext(struct net_device *dev,
1119 struct iw_request_info *info,
1120 union iwreq_data *wrqu,
1121 char *extra)
1122{
1123 struct orinoco_private *priv = netdev_priv(dev);
1124 struct iw_point *encoding = &wrqu->encoding;
1125 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1126 int idx, max_key_len;
1127 unsigned long flags;
1128 int err;
1129
1130 if (orinoco_lock(priv, &flags) != 0)
1131 return -EBUSY;
1132
1133 err = -EINVAL;
1134 max_key_len = encoding->length - sizeof(*ext);
1135 if (max_key_len < 0)
1136 goto out;
1137
1138 idx = encoding->flags & IW_ENCODE_INDEX;
1139 if (idx) {
1140 if ((idx < 1) || (idx > 4))
1141 goto out;
1142 idx--;
1143 } else
1144 idx = priv->tx_key;
1145
1146 encoding->flags = idx + 1;
1147 memset(ext, 0, sizeof(*ext));
1148
1149 ext->alg = priv->encode_alg;
1150 switch (priv->encode_alg) {
1151 case IW_ENCODE_ALG_NONE:
1152 ext->key_len = 0;
1153 encoding->flags |= IW_ENCODE_DISABLED;
1154 break;
1155 case IW_ENCODE_ALG_WEP:
1156 ext->key_len = min_t(u16, le16_to_cpu(priv->keys[idx].len),
1157 max_key_len);
1158 memcpy(ext->key, priv->keys[idx].data, ext->key_len);
1159 encoding->flags |= IW_ENCODE_ENABLED;
1160 break;
1161 case IW_ENCODE_ALG_TKIP:
1162 ext->key_len = min_t(u16, sizeof(struct orinoco_tkip_key),
1163 max_key_len);
1164 memcpy(ext->key, &priv->tkip_key[idx], ext->key_len);
1165 encoding->flags |= IW_ENCODE_ENABLED;
1166 break;
1167 }
1168
1169 err = 0;
1170 out:
1171 orinoco_unlock(priv, &flags);
1172
1173 return err;
1174}
1175
1176static int orinoco_ioctl_set_auth(struct net_device *dev,
1177 struct iw_request_info *info,
1178 union iwreq_data *wrqu, char *extra)
1179{
1180 struct orinoco_private *priv = netdev_priv(dev);
1181 hermes_t *hw = &priv->hw;
1182 struct iw_param *param = &wrqu->param;
1183 unsigned long flags;
1184 int ret = -EINPROGRESS;
1185
1186 if (orinoco_lock(priv, &flags) != 0)
1187 return -EBUSY;
1188
1189 switch (param->flags & IW_AUTH_INDEX) {
1190 case IW_AUTH_WPA_VERSION:
1191 case IW_AUTH_CIPHER_PAIRWISE:
1192 case IW_AUTH_CIPHER_GROUP:
1193 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1194 case IW_AUTH_PRIVACY_INVOKED:
1195 case IW_AUTH_DROP_UNENCRYPTED:
1196 /*
1197 * orinoco does not use these parameters
1198 */
1199 break;
1200
1201 case IW_AUTH_KEY_MGMT:
1202 /* wl_lkm implies value 2 == PSK for Hermes I
1203 * which ties in with WEXT
1204 * no other hints tho :(
1205 */
1206 priv->key_mgmt = param->value;
1207 break;
1208
1209 case IW_AUTH_TKIP_COUNTERMEASURES:
1210 /* When countermeasures are enabled, shut down the
1211 * card; when disabled, re-enable the card. This must
1212 * take effect immediately.
1213 *
1214 * TODO: Make sure that the EAPOL message is getting
1215 * out before card disabled
1216 */
1217 if (param->value) {
1218 priv->tkip_cm_active = 1;
1219 ret = hermes_enable_port(hw, 0);
1220 } else {
1221 priv->tkip_cm_active = 0;
1222 ret = hermes_disable_port(hw, 0);
1223 }
1224 break;
1225
1226 case IW_AUTH_80211_AUTH_ALG:
1227 if (param->value & IW_AUTH_ALG_SHARED_KEY)
1228 priv->wep_restrict = 1;
1229 else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
1230 priv->wep_restrict = 0;
1231 else
1232 ret = -EINVAL;
1233 break;
1234
1235 case IW_AUTH_WPA_ENABLED:
1236 if (priv->has_wpa) {
1237 priv->wpa_enabled = param->value ? 1 : 0;
1238 } else {
1239 if (param->value)
1240 ret = -EOPNOTSUPP;
1241 /* else silently accept disable of WPA */
1242 priv->wpa_enabled = 0;
1243 }
1244 break;
1245
1246 default:
1247 ret = -EOPNOTSUPP;
1248 }
1249
1250 orinoco_unlock(priv, &flags);
1251 return ret;
1252}
1253
1254static int orinoco_ioctl_get_auth(struct net_device *dev,
1255 struct iw_request_info *info,
1256 union iwreq_data *wrqu, char *extra)
1257{
1258 struct orinoco_private *priv = netdev_priv(dev);
1259 struct iw_param *param = &wrqu->param;
1260 unsigned long flags;
1261 int ret = 0;
1262
1263 if (orinoco_lock(priv, &flags) != 0)
1264 return -EBUSY;
1265
1266 switch (param->flags & IW_AUTH_INDEX) {
1267 case IW_AUTH_KEY_MGMT:
1268 param->value = priv->key_mgmt;
1269 break;
1270
1271 case IW_AUTH_TKIP_COUNTERMEASURES:
1272 param->value = priv->tkip_cm_active;
1273 break;
1274
1275 case IW_AUTH_80211_AUTH_ALG:
1276 if (priv->wep_restrict)
1277 param->value = IW_AUTH_ALG_SHARED_KEY;
1278 else
1279 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
1280 break;
1281
1282 case IW_AUTH_WPA_ENABLED:
1283 param->value = priv->wpa_enabled;
1284 break;
1285
1286 default:
1287 ret = -EOPNOTSUPP;
1288 }
1289
1290 orinoco_unlock(priv, &flags);
1291 return ret;
1292}
1293
1294static int orinoco_ioctl_set_genie(struct net_device *dev,
1295 struct iw_request_info *info,
1296 union iwreq_data *wrqu, char *extra)
1297{
1298 struct orinoco_private *priv = netdev_priv(dev);
1299 u8 *buf;
1300 unsigned long flags;
1301
1302 /* cut off at IEEE80211_MAX_DATA_LEN */
1303 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
1304 (wrqu->data.length && (extra == NULL)))
1305 return -EINVAL;
1306
1307 if (wrqu->data.length) {
1308 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
1309 if (buf == NULL)
1310 return -ENOMEM;
1311
1312 memcpy(buf, extra, wrqu->data.length);
1313 } else
1314 buf = NULL;
1315
1316 if (orinoco_lock(priv, &flags) != 0) {
1317 kfree(buf);
1318 return -EBUSY;
1319 }
1320
1321 kfree(priv->wpa_ie);
1322 priv->wpa_ie = buf;
1323 priv->wpa_ie_len = wrqu->data.length;
1324
1325 if (priv->wpa_ie) {
1326 /* Looks like wl_lkm wants to check the auth alg, and
1327 * somehow pass it to the firmware.
1328 * Instead it just calls the key mgmt rid
1329 * - we do this in set auth.
1330 */
1331 }
1332
1333 orinoco_unlock(priv, &flags);
1334 return 0;
1335}
1336
1337static int orinoco_ioctl_get_genie(struct net_device *dev,
1338 struct iw_request_info *info,
1339 union iwreq_data *wrqu, char *extra)
1340{
1341 struct orinoco_private *priv = netdev_priv(dev);
1342 unsigned long flags;
1343 int err = 0;
1344
1345 if (orinoco_lock(priv, &flags) != 0)
1346 return -EBUSY;
1347
1348 if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
1349 wrqu->data.length = 0;
1350 goto out;
1351 }
1352
1353 if (wrqu->data.length < priv->wpa_ie_len) {
1354 err = -E2BIG;
1355 goto out;
1356 }
1357
1358 wrqu->data.length = priv->wpa_ie_len;
1359 memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
1360
1361out:
1362 orinoco_unlock(priv, &flags);
1363 return err;
1364}
1365
1366static int orinoco_ioctl_set_mlme(struct net_device *dev,
1367 struct iw_request_info *info,
1368 union iwreq_data *wrqu, char *extra)
1369{
1370 struct orinoco_private *priv = netdev_priv(dev);
1371 hermes_t *hw = &priv->hw;
1372 struct iw_mlme *mlme = (struct iw_mlme *)extra;
1373 unsigned long flags;
1374 int ret = 0;
1375
1376 if (orinoco_lock(priv, &flags) != 0)
1377 return -EBUSY;
1378
1379 switch (mlme->cmd) {
1380 case IW_MLME_DEAUTH:
1381 /* silently ignore */
1382 break;
1383
1384 case IW_MLME_DISASSOC:
1385 {
1386 struct {
1387 u8 addr[ETH_ALEN];
1388 __le16 reason_code;
1389 } __attribute__ ((packed)) buf;
1390
1391 memcpy(buf.addr, mlme->addr.sa_data, ETH_ALEN);
1392 buf.reason_code = cpu_to_le16(mlme->reason_code);
1393 ret = HERMES_WRITE_RECORD(hw, USER_BAP,
1394 HERMES_RID_CNFDISASSOCIATE,
1395 &buf);
1396 break;
1397 }
1398 default:
1399 ret = -EOPNOTSUPP;
1400 }
1401
1402 orinoco_unlock(priv, &flags);
1403 return ret;
1404}
1405
1406static int orinoco_ioctl_getretry(struct net_device *dev,
1407 struct iw_request_info *info,
1408 struct iw_param *rrq,
1409 char *extra)
1410{
1411 struct orinoco_private *priv = netdev_priv(dev);
1412 hermes_t *hw = &priv->hw;
1413 int err = 0;
1414 u16 short_limit, long_limit, lifetime;
1415 unsigned long flags;
1416
1417 if (orinoco_lock(priv, &flags) != 0)
1418 return -EBUSY;
1419
1420 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
1421 &short_limit);
1422 if (err)
1423 goto out;
1424
1425 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
1426 &long_limit);
1427 if (err)
1428 goto out;
1429
1430 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
1431 &lifetime);
1432 if (err)
1433 goto out;
1434
1435 rrq->disabled = 0; /* Can't be disabled */
1436
1437 /* Note : by default, display the retry number */
1438 if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
1439 rrq->flags = IW_RETRY_LIFETIME;
1440 rrq->value = lifetime * 1000; /* ??? */
1441 } else {
1442 /* By default, display the min number */
1443 if ((rrq->flags & IW_RETRY_LONG)) {
1444 rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
1445 rrq->value = long_limit;
1446 } else {
1447 rrq->flags = IW_RETRY_LIMIT;
1448 rrq->value = short_limit;
1449 if (short_limit != long_limit)
1450 rrq->flags |= IW_RETRY_SHORT;
1451 }
1452 }
1453
1454 out:
1455 orinoco_unlock(priv, &flags);
1456
1457 return err;
1458}
1459
1460static int orinoco_ioctl_reset(struct net_device *dev,
1461 struct iw_request_info *info,
1462 void *wrqu,
1463 char *extra)
1464{
1465 struct orinoco_private *priv = netdev_priv(dev);
1466
1467 if (!capable(CAP_NET_ADMIN))
1468 return -EPERM;
1469
1470 if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) {
1471 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
1472
1473 /* Firmware reset */
1474 orinoco_reset(&priv->reset_work);
1475 } else {
1476 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
1477
1478 schedule_work(&priv->reset_work);
1479 }
1480
1481 return 0;
1482}
1483
1484static int orinoco_ioctl_setibssport(struct net_device *dev,
1485 struct iw_request_info *info,
1486 void *wrqu,
1487 char *extra)
1488
1489{
1490 struct orinoco_private *priv = netdev_priv(dev);
1491 int val = *((int *) extra);
1492 unsigned long flags;
1493
1494 if (orinoco_lock(priv, &flags) != 0)
1495 return -EBUSY;
1496
1497 priv->ibss_port = val ;
1498
1499 /* Actually update the mode we are using */
1500 set_port_type(priv);
1501
1502 orinoco_unlock(priv, &flags);
1503 return -EINPROGRESS; /* Call commit handler */
1504}
1505
1506static int orinoco_ioctl_getibssport(struct net_device *dev,
1507 struct iw_request_info *info,
1508 void *wrqu,
1509 char *extra)
1510{
1511 struct orinoco_private *priv = netdev_priv(dev);
1512 int *val = (int *) extra;
1513
1514 *val = priv->ibss_port;
1515 return 0;
1516}
1517
1518static int orinoco_ioctl_setport3(struct net_device *dev,
1519 struct iw_request_info *info,
1520 void *wrqu,
1521 char *extra)
1522{
1523 struct orinoco_private *priv = netdev_priv(dev);
1524 int val = *((int *) extra);
1525 int err = 0;
1526 unsigned long flags;
1527
1528 if (orinoco_lock(priv, &flags) != 0)
1529 return -EBUSY;
1530
1531 switch (val) {
1532 case 0: /* Try to do IEEE ad-hoc mode */
1533 if (!priv->has_ibss) {
1534 err = -EINVAL;
1535 break;
1536 }
1537 priv->prefer_port3 = 0;
1538
1539 break;
1540
1541 case 1: /* Try to do Lucent proprietary ad-hoc mode */
1542 if (!priv->has_port3) {
1543 err = -EINVAL;
1544 break;
1545 }
1546 priv->prefer_port3 = 1;
1547 break;
1548
1549 default:
1550 err = -EINVAL;
1551 }
1552
1553 if (!err) {
1554 /* Actually update the mode we are using */
1555 set_port_type(priv);
1556 err = -EINPROGRESS;
1557 }
1558
1559 orinoco_unlock(priv, &flags);
1560
1561 return err;
1562}
1563
1564static int orinoco_ioctl_getport3(struct net_device *dev,
1565 struct iw_request_info *info,
1566 void *wrqu,
1567 char *extra)
1568{
1569 struct orinoco_private *priv = netdev_priv(dev);
1570 int *val = (int *) extra;
1571
1572 *val = priv->prefer_port3;
1573 return 0;
1574}
1575
1576static int orinoco_ioctl_setpreamble(struct net_device *dev,
1577 struct iw_request_info *info,
1578 void *wrqu,
1579 char *extra)
1580{
1581 struct orinoco_private *priv = netdev_priv(dev);
1582 unsigned long flags;
1583 int val;
1584
1585 if (!priv->has_preamble)
1586 return -EOPNOTSUPP;
1587
1588 /* 802.11b has recently defined some short preamble.
1589 * Basically, the Phy header has been reduced in size.
1590 * This increase performance, especially at high rates
1591 * (the preamble is transmitted at 1Mb/s), unfortunately
1592 * this give compatibility troubles... - Jean II */
1593 val = *((int *) extra);
1594
1595 if (orinoco_lock(priv, &flags) != 0)
1596 return -EBUSY;
1597
1598 if (val)
1599 priv->preamble = 1;
1600 else
1601 priv->preamble = 0;
1602
1603 orinoco_unlock(priv, &flags);
1604
1605 return -EINPROGRESS; /* Call commit handler */
1606}
1607
1608static int orinoco_ioctl_getpreamble(struct net_device *dev,
1609 struct iw_request_info *info,
1610 void *wrqu,
1611 char *extra)
1612{
1613 struct orinoco_private *priv = netdev_priv(dev);
1614 int *val = (int *) extra;
1615
1616 if (!priv->has_preamble)
1617 return -EOPNOTSUPP;
1618
1619 *val = priv->preamble;
1620 return 0;
1621}
1622
1623/* ioctl interface to hermes_read_ltv()
1624 * To use with iwpriv, pass the RID as the token argument, e.g.
1625 * iwpriv get_rid [0xfc00]
1626 * At least Wireless Tools 25 is required to use iwpriv.
1627 * For Wireless Tools 25 and 26 append "dummy" are the end. */
1628static int orinoco_ioctl_getrid(struct net_device *dev,
1629 struct iw_request_info *info,
1630 struct iw_point *data,
1631 char *extra)
1632{
1633 struct orinoco_private *priv = netdev_priv(dev);
1634 hermes_t *hw = &priv->hw;
1635 int rid = data->flags;
1636 u16 length;
1637 int err;
1638 unsigned long flags;
1639
1640 /* It's a "get" function, but we don't want users to access the
1641 * WEP key and other raw firmware data */
1642 if (!capable(CAP_NET_ADMIN))
1643 return -EPERM;
1644
1645 if (rid < 0xfc00 || rid > 0xffff)
1646 return -EINVAL;
1647
1648 if (orinoco_lock(priv, &flags) != 0)
1649 return -EBUSY;
1650
1651 err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
1652 extra);
1653 if (err)
1654 goto out;
1655
1656 data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length),
1657 MAX_RID_LEN);
1658
1659 out:
1660 orinoco_unlock(priv, &flags);
1661 return err;
1662}
1663
1664/* Trigger a scan (look for other cells in the vicinity) */
1665static int orinoco_ioctl_setscan(struct net_device *dev,
1666 struct iw_request_info *info,
1667 struct iw_point *srq,
1668 char *extra)
1669{
1670 struct orinoco_private *priv = netdev_priv(dev);
1671 hermes_t *hw = &priv->hw;
1672 struct iw_scan_req *si = (struct iw_scan_req *) extra;
1673 int err = 0;
1674 unsigned long flags;
1675
1676 /* Note : you may have realised that, as this is a SET operation,
1677 * this is privileged and therefore a normal user can't
1678 * perform scanning.
1679 * This is not an error, while the device perform scanning,
1680 * traffic doesn't flow, so it's a perfect DoS...
1681 * Jean II */
1682
1683 if (orinoco_lock(priv, &flags) != 0)
1684 return -EBUSY;
1685
1686 /* Scanning with port 0 disabled would fail */
1687 if (!netif_running(dev)) {
1688 err = -ENETDOWN;
1689 goto out;
1690 }
1691
1692 /* In monitor mode, the scan results are always empty.
1693 * Probe responses are passed to the driver as received
1694 * frames and could be processed in software. */
1695 if (priv->iw_mode == IW_MODE_MONITOR) {
1696 err = -EOPNOTSUPP;
1697 goto out;
1698 }
1699
1700 /* Note : because we don't lock out the irq handler, the way
1701 * we access scan variables in priv is critical.
1702 * o scan_inprogress : not touched by irq handler
1703 * o scan_mode : not touched by irq handler
1704 * Before modifying anything on those variables, please think hard !
1705 * Jean II */
1706
1707 /* Save flags */
1708 priv->scan_mode = srq->flags;
1709
1710 /* Always trigger scanning, even if it's in progress.
1711 * This way, if the info frame get lost, we will recover somewhat
1712 * gracefully - Jean II */
1713
1714 if (priv->has_hostscan) {
1715 switch (priv->firmware_type) {
1716 case FIRMWARE_TYPE_SYMBOL:
1717 err = hermes_write_wordrec(hw, USER_BAP,
1718 HERMES_RID_CNFHOSTSCAN_SYMBOL,
1719 HERMES_HOSTSCAN_SYMBOL_ONCE |
1720 HERMES_HOSTSCAN_SYMBOL_BCAST);
1721 break;
1722 case FIRMWARE_TYPE_INTERSIL: {
1723 __le16 req[3];
1724
1725 req[0] = cpu_to_le16(0x3fff); /* All channels */
1726 req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
1727 req[2] = 0; /* Any ESSID */
1728 err = HERMES_WRITE_RECORD(hw, USER_BAP,
1729 HERMES_RID_CNFHOSTSCAN, &req);
1730 }
1731 break;
1732 case FIRMWARE_TYPE_AGERE:
1733 if (priv->scan_mode & IW_SCAN_THIS_ESSID) {
1734 struct hermes_idstring idbuf;
1735 size_t len = min(sizeof(idbuf.val),
1736 (size_t) si->essid_len);
1737 idbuf.len = cpu_to_le16(len);
1738 memcpy(idbuf.val, si->essid, len);
1739
1740 err = hermes_write_ltv(hw, USER_BAP,
1741 HERMES_RID_CNFSCANSSID_AGERE,
1742 HERMES_BYTES_TO_RECLEN(len + 2),
1743 &idbuf);
1744 } else
1745 err = hermes_write_wordrec(hw, USER_BAP,
1746 HERMES_RID_CNFSCANSSID_AGERE,
1747 0); /* Any ESSID */
1748 if (err)
1749 break;
1750
1751 if (priv->has_ext_scan) {
1752 /* Clear scan results at the start of
1753 * an extended scan */
1754 orinoco_clear_scan_results(priv,
1755 msecs_to_jiffies(15000));
1756
1757 /* TODO: Is this available on older firmware?
1758 * Can we use it to scan specific channels
1759 * for IW_SCAN_THIS_FREQ? */
1760 err = hermes_write_wordrec(hw, USER_BAP,
1761 HERMES_RID_CNFSCANCHANNELS2GHZ,
1762 0x7FFF);
1763 if (err)
1764 goto out;
1765
1766 err = hermes_inquire(hw,
1767 HERMES_INQ_CHANNELINFO);
1768 } else
1769 err = hermes_inquire(hw, HERMES_INQ_SCAN);
1770 break;
1771 }
1772 } else
1773 err = hermes_inquire(hw, HERMES_INQ_SCAN);
1774
1775 /* One more client */
1776 if (!err)
1777 priv->scan_inprogress = 1;
1778
1779 out:
1780 orinoco_unlock(priv, &flags);
1781 return err;
1782}
1783
1784#define MAX_CUSTOM_LEN 64
1785
1786/* Translate scan data returned from the card to a card independant
1787 * format that the Wireless Tools will understand - Jean II */
1788static inline char *orinoco_translate_scan(struct net_device *dev,
1789 struct iw_request_info *info,
1790 char *current_ev,
1791 char *end_buf,
1792 union hermes_scan_info *bss,
1793 unsigned long last_scanned)
1794{
1795 struct orinoco_private *priv = netdev_priv(dev);
1796 u16 capabilities;
1797 u16 channel;
1798 struct iw_event iwe; /* Temporary buffer */
1799 char custom[MAX_CUSTOM_LEN];
1800
1801 memset(&iwe, 0, sizeof(iwe));
1802
1803 /* First entry *MUST* be the AP MAC address */
1804 iwe.cmd = SIOCGIWAP;
1805 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1806 memcpy(iwe.u.ap_addr.sa_data, bss->a.bssid, ETH_ALEN);
1807 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1808 &iwe, IW_EV_ADDR_LEN);
1809
1810 /* Other entries will be displayed in the order we give them */
1811
1812 /* Add the ESSID */
1813 iwe.u.data.length = le16_to_cpu(bss->a.essid_len);
1814 if (iwe.u.data.length > 32)
1815 iwe.u.data.length = 32;
1816 iwe.cmd = SIOCGIWESSID;
1817 iwe.u.data.flags = 1;
1818 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1819 &iwe, bss->a.essid);
1820
1821 /* Add mode */
1822 iwe.cmd = SIOCGIWMODE;
1823 capabilities = le16_to_cpu(bss->a.capabilities);
1824 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
1825 if (capabilities & WLAN_CAPABILITY_ESS)
1826 iwe.u.mode = IW_MODE_MASTER;
1827 else
1828 iwe.u.mode = IW_MODE_ADHOC;
1829 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1830 &iwe, IW_EV_UINT_LEN);
1831 }
1832
1833 channel = bss->s.channel;
1834 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
1835 /* Add channel and frequency */
1836 iwe.cmd = SIOCGIWFREQ;
1837 iwe.u.freq.m = channel;
1838 iwe.u.freq.e = 0;
1839 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1840 &iwe, IW_EV_FREQ_LEN);
1841
1842 iwe.u.freq.m = ieee80211_dsss_chan_to_freq(channel) * 100000;
1843 iwe.u.freq.e = 1;
1844 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1845 &iwe, IW_EV_FREQ_LEN);
1846 }
1847
1848 /* Add quality statistics. level and noise in dB. No link quality */
1849 iwe.cmd = IWEVQUAL;
1850 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
1851 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95;
1852 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95;
1853 /* Wireless tools prior to 27.pre22 will show link quality
1854 * anyway, so we provide a reasonable value. */
1855 if (iwe.u.qual.level > iwe.u.qual.noise)
1856 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
1857 else
1858 iwe.u.qual.qual = 0;
1859 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1860 &iwe, IW_EV_QUAL_LEN);
1861
1862 /* Add encryption capability */
1863 iwe.cmd = SIOCGIWENCODE;
1864 if (capabilities & WLAN_CAPABILITY_PRIVACY)
1865 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
1866 else
1867 iwe.u.data.flags = IW_ENCODE_DISABLED;
1868 iwe.u.data.length = 0;
1869 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1870 &iwe, NULL);
1871
1872 /* Bit rate is not available in Lucent/Agere firmwares */
1873 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
1874 char *current_val = current_ev + iwe_stream_lcp_len(info);
1875 int i;
1876 int step;
1877
1878 if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
1879 step = 2;
1880 else
1881 step = 1;
1882
1883 iwe.cmd = SIOCGIWRATE;
1884 /* Those two flags are ignored... */
1885 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
1886 /* Max 10 values */
1887 for (i = 0; i < 10; i += step) {
1888 /* NULL terminated */
1889 if (bss->p.rates[i] == 0x0)
1890 break;
1891 /* Bit rate given in 500 kb/s units (+ 0x80) */
1892 iwe.u.bitrate.value =
1893 ((bss->p.rates[i] & 0x7f) * 500000);
1894 current_val = iwe_stream_add_value(info, current_ev,
1895 current_val,
1896 end_buf, &iwe,
1897 IW_EV_PARAM_LEN);
1898 }
1899 /* Check if we added any event */
1900 if ((current_val - current_ev) > iwe_stream_lcp_len(info))
1901 current_ev = current_val;
1902 }
1903
1904 /* Beacon interval */
1905 iwe.cmd = IWEVCUSTOM;
1906 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
1907 "bcn_int=%d",
1908 le16_to_cpu(bss->a.beacon_interv));
1909 if (iwe.u.data.length)
1910 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1911 &iwe, custom);
1912
1913 /* Capabilites */
1914 iwe.cmd = IWEVCUSTOM;
1915 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
1916 "capab=0x%04x",
1917 capabilities);
1918 if (iwe.u.data.length)
1919 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1920 &iwe, custom);
1921
1922 /* Add EXTRA: Age to display seconds since last beacon/probe response
1923 * for given network. */
1924 iwe.cmd = IWEVCUSTOM;
1925 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
1926 " Last beacon: %dms ago",
1927 jiffies_to_msecs(jiffies - last_scanned));
1928 if (iwe.u.data.length)
1929 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1930 &iwe, custom);
1931
1932 return current_ev;
1933}
1934
1935static inline char *orinoco_translate_ext_scan(struct net_device *dev,
1936 struct iw_request_info *info,
1937 char *current_ev,
1938 char *end_buf,
1939 struct agere_ext_scan_info *bss,
1940 unsigned long last_scanned)
1941{
1942 u16 capabilities;
1943 u16 channel;
1944 struct iw_event iwe; /* Temporary buffer */
1945 char custom[MAX_CUSTOM_LEN];
1946 u8 *ie;
1947
1948 memset(&iwe, 0, sizeof(iwe));
1949
1950 /* First entry *MUST* be the AP MAC address */
1951 iwe.cmd = SIOCGIWAP;
1952 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1953 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
1954 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1955 &iwe, IW_EV_ADDR_LEN);
1956
1957 /* Other entries will be displayed in the order we give them */
1958
1959 /* Add the ESSID */
1960 ie = bss->data;
1961 iwe.u.data.length = ie[1];
1962 if (iwe.u.data.length) {
1963 if (iwe.u.data.length > 32)
1964 iwe.u.data.length = 32;
1965 iwe.cmd = SIOCGIWESSID;
1966 iwe.u.data.flags = 1;
1967 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1968 &iwe, &ie[2]);
1969 }
1970
1971 /* Add mode */
1972 capabilities = le16_to_cpu(bss->capabilities);
1973 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
1974 iwe.cmd = SIOCGIWMODE;
1975 if (capabilities & WLAN_CAPABILITY_ESS)
1976 iwe.u.mode = IW_MODE_MASTER;
1977 else
1978 iwe.u.mode = IW_MODE_ADHOC;
1979 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1980 &iwe, IW_EV_UINT_LEN);
1981 }
1982
1983 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_DS_PARAMS);
1984 channel = ie ? ie[2] : 0;
1985 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
1986 /* Add channel and frequency */
1987 iwe.cmd = SIOCGIWFREQ;
1988 iwe.u.freq.m = channel;
1989 iwe.u.freq.e = 0;
1990 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1991 &iwe, IW_EV_FREQ_LEN);
1992
1993 iwe.u.freq.m = ieee80211_dsss_chan_to_freq(channel) * 100000;
1994 iwe.u.freq.e = 1;
1995 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
1996 &iwe, IW_EV_FREQ_LEN);
1997 }
1998
1999 /* Add quality statistics. level and noise in dB. No link quality */
2000 iwe.cmd = IWEVQUAL;
2001 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
2002 iwe.u.qual.level = bss->level - 0x95;
2003 iwe.u.qual.noise = bss->noise - 0x95;
2004 /* Wireless tools prior to 27.pre22 will show link quality
2005 * anyway, so we provide a reasonable value. */
2006 if (iwe.u.qual.level > iwe.u.qual.noise)
2007 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
2008 else
2009 iwe.u.qual.qual = 0;
2010 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
2011 &iwe, IW_EV_QUAL_LEN);
2012
2013 /* Add encryption capability */
2014 iwe.cmd = SIOCGIWENCODE;
2015 if (capabilities & WLAN_CAPABILITY_PRIVACY)
2016 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
2017 else
2018 iwe.u.data.flags = IW_ENCODE_DISABLED;
2019 iwe.u.data.length = 0;
2020 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2021 &iwe, NULL);
2022
2023 /* WPA IE */
2024 ie = orinoco_get_wpa_ie(bss->data, sizeof(bss->data));
2025 if (ie) {
2026 iwe.cmd = IWEVGENIE;
2027 iwe.u.data.length = ie[1] + 2;
2028 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2029 &iwe, ie);
2030 }
2031
2032 /* RSN IE */
2033 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_RSN);
2034 if (ie) {
2035 iwe.cmd = IWEVGENIE;
2036 iwe.u.data.length = ie[1] + 2;
2037 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2038 &iwe, ie);
2039 }
2040
2041 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_SUPP_RATES);
2042 if (ie) {
2043 char *p = current_ev + iwe_stream_lcp_len(info);
2044 int i;
2045
2046 iwe.cmd = SIOCGIWRATE;
2047 /* Those two flags are ignored... */
2048 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
2049
2050 for (i = 2; i < (ie[1] + 2); i++) {
2051 iwe.u.bitrate.value = ((ie[i] & 0x7F) * 500000);
2052 p = iwe_stream_add_value(info, current_ev, p, end_buf,
2053 &iwe, IW_EV_PARAM_LEN);
2054 }
2055 /* Check if we added any event */
2056 if (p > (current_ev + iwe_stream_lcp_len(info)))
2057 current_ev = p;
2058 }
2059
2060 /* Timestamp */
2061 iwe.cmd = IWEVCUSTOM;
2062 iwe.u.data.length =
2063 snprintf(custom, MAX_CUSTOM_LEN, "tsf=%016llx",
2064 (unsigned long long) le64_to_cpu(bss->timestamp));
2065 if (iwe.u.data.length)
2066 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2067 &iwe, custom);
2068
2069 /* Beacon interval */
2070 iwe.cmd = IWEVCUSTOM;
2071 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
2072 "bcn_int=%d",
2073 le16_to_cpu(bss->beacon_interval));
2074 if (iwe.u.data.length)
2075 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2076 &iwe, custom);
2077
2078 /* Capabilites */
2079 iwe.cmd = IWEVCUSTOM;
2080 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
2081 "capab=0x%04x",
2082 capabilities);
2083 if (iwe.u.data.length)
2084 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2085 &iwe, custom);
2086
2087 /* Add EXTRA: Age to display seconds since last beacon/probe response
2088 * for given network. */
2089 iwe.cmd = IWEVCUSTOM;
2090 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
2091 " Last beacon: %dms ago",
2092 jiffies_to_msecs(jiffies - last_scanned));
2093 if (iwe.u.data.length)
2094 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
2095 &iwe, custom);
2096
2097 return current_ev;
2098}
2099
2100/* Return results of a scan */
2101static int orinoco_ioctl_getscan(struct net_device *dev,
2102 struct iw_request_info *info,
2103 struct iw_point *srq,
2104 char *extra)
2105{
2106 struct orinoco_private *priv = netdev_priv(dev);
2107 int err = 0;
2108 unsigned long flags;
2109 char *current_ev = extra;
2110
2111 if (orinoco_lock(priv, &flags) != 0)
2112 return -EBUSY;
2113
2114 if (priv->scan_inprogress) {
2115 /* Important note : we don't want to block the caller
2116 * until results are ready for various reasons.
2117 * First, managing wait queues is complex and racy.
2118 * Second, we grab some rtnetlink lock before comming
2119 * here (in dev_ioctl()).
2120 * Third, we generate an Wireless Event, so the
2121 * caller can wait itself on that - Jean II */
2122 err = -EAGAIN;
2123 goto out;
2124 }
2125
2126 if (priv->has_ext_scan) {
2127 struct xbss_element *bss;
2128
2129 list_for_each_entry(bss, &priv->bss_list, list) {
2130 /* Translate this entry to WE format */
2131 current_ev =
2132 orinoco_translate_ext_scan(dev, info,
2133 current_ev,
2134 extra + srq->length,
2135 &bss->bss,
2136 bss->last_scanned);
2137
2138 /* Check if there is space for one more entry */
2139 if ((extra + srq->length - current_ev)
2140 <= IW_EV_ADDR_LEN) {
2141 /* Ask user space to try again with a
2142 * bigger buffer */
2143 err = -E2BIG;
2144 goto out;
2145 }
2146 }
2147
2148 } else {
2149 struct bss_element *bss;
2150
2151 list_for_each_entry(bss, &priv->bss_list, list) {
2152 /* Translate this entry to WE format */
2153 current_ev = orinoco_translate_scan(dev, info,
2154 current_ev,
2155 extra + srq->length,
2156 &bss->bss,
2157 bss->last_scanned);
2158
2159 /* Check if there is space for one more entry */
2160 if ((extra + srq->length - current_ev)
2161 <= IW_EV_ADDR_LEN) {
2162 /* Ask user space to try again with a
2163 * bigger buffer */
2164 err = -E2BIG;
2165 goto out;
2166 }
2167 }
2168 }
2169
2170 srq->length = (current_ev - extra);
2171 srq->flags = (__u16) priv->scan_mode;
2172
2173out:
2174 orinoco_unlock(priv, &flags);
2175 return err;
2176}
2177
2178/* Commit handler, called after set operations */
2179static int orinoco_ioctl_commit(struct net_device *dev,
2180 struct iw_request_info *info,
2181 void *wrqu,
2182 char *extra)
2183{
2184 struct orinoco_private *priv = netdev_priv(dev);
2185 struct hermes *hw = &priv->hw;
2186 unsigned long flags;
2187 int err = 0;
2188
2189 if (!priv->open)
2190 return 0;
2191
2192 if (priv->broken_disableport) {
2193 orinoco_reset(&priv->reset_work);
2194 return 0;
2195 }
2196
2197 if (orinoco_lock(priv, &flags) != 0)
2198 return err;
2199
2200 err = hermes_disable_port(hw, 0);
2201 if (err) {
2202 printk(KERN_WARNING "%s: Unable to disable port "
2203 "while reconfiguring card\n", dev->name);
2204 priv->broken_disableport = 1;
2205 goto out;
2206 }
2207
2208 err = __orinoco_program_rids(dev);
2209 if (err) {
2210 printk(KERN_WARNING "%s: Unable to reconfigure card\n",
2211 dev->name);
2212 goto out;
2213 }
2214
2215 err = hermes_enable_port(hw, 0);
2216 if (err) {
2217 printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
2218 dev->name);
2219 goto out;
2220 }
2221
2222 out:
2223 if (err) {
2224 printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
2225 schedule_work(&priv->reset_work);
2226 err = 0;
2227 }
2228
2229 orinoco_unlock(priv, &flags);
2230 return err;
2231}
2232
2233static const struct iw_priv_args orinoco_privtab[] = {
2234 { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
2235 { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
2236 { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2237 0, "set_port3" },
2238 { SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2239 "get_port3" },
2240 { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2241 0, "set_preamble" },
2242 { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2243 "get_preamble" },
2244 { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2245 0, "set_ibssport" },
2246 { SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2247 "get_ibssport" },
2248 { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN,
2249 "get_rid" },
2250};
2251
2252
2253/*
2254 * Structures to export the Wireless Handlers
2255 */
2256
2257#define STD_IW_HANDLER(id, func) \
2258 [IW_IOCTL_IDX(id)] = (iw_handler) func
2259static const iw_handler orinoco_handler[] = {
2260 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
2261 STD_IW_HANDLER(SIOCGIWNAME, orinoco_ioctl_getname),
2262 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
2263 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
2264 STD_IW_HANDLER(SIOCSIWMODE, orinoco_ioctl_setmode),
2265 STD_IW_HANDLER(SIOCGIWMODE, orinoco_ioctl_getmode),
2266 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
2267 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
2268 STD_IW_HANDLER(SIOCGIWRANGE, orinoco_ioctl_getiwrange),
2269 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
2270 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
2271 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
2272 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
2273 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
2274 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
2275 STD_IW_HANDLER(SIOCSIWSCAN, orinoco_ioctl_setscan),
2276 STD_IW_HANDLER(SIOCGIWSCAN, orinoco_ioctl_getscan),
2277 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
2278 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
2279 STD_IW_HANDLER(SIOCSIWNICKN, orinoco_ioctl_setnick),
2280 STD_IW_HANDLER(SIOCGIWNICKN, orinoco_ioctl_getnick),
2281 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
2282 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
2283 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
2284 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
2285 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
2286 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
2287 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
2288 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
2289 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
2290 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
2291 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
2292 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
2293 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
2294 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
2295 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
2296 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
2297 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
2298 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
2299};
2300
2301
2302/*
2303 Added typecasting since we no longer use iwreq_data -- Moustafa
2304 */
2305static const iw_handler orinoco_private_handler[] = {
2306 [0] = (iw_handler) orinoco_ioctl_reset,
2307 [1] = (iw_handler) orinoco_ioctl_reset,
2308 [2] = (iw_handler) orinoco_ioctl_setport3,
2309 [3] = (iw_handler) orinoco_ioctl_getport3,
2310 [4] = (iw_handler) orinoco_ioctl_setpreamble,
2311 [5] = (iw_handler) orinoco_ioctl_getpreamble,
2312 [6] = (iw_handler) orinoco_ioctl_setibssport,
2313 [7] = (iw_handler) orinoco_ioctl_getibssport,
2314 [9] = (iw_handler) orinoco_ioctl_getrid,
2315};
2316
2317const struct iw_handler_def orinoco_handler_def = {
2318 .num_standard = ARRAY_SIZE(orinoco_handler),
2319 .num_private = ARRAY_SIZE(orinoco_private_handler),
2320 .num_private_args = ARRAY_SIZE(orinoco_privtab),
2321 .standard = orinoco_handler,
2322 .private = orinoco_private_handler,
2323 .private_args = orinoco_privtab,
2324 .get_wireless_stats = orinoco_get_wireless_stats,
2325};
diff --git a/drivers/net/wireless/orinoco/wext.h b/drivers/net/wireless/orinoco/wext.h
new file mode 100644
index 00000000000..1479f4e26dd
--- /dev/null
+++ b/drivers/net/wireless/orinoco/wext.h
@@ -0,0 +1,13 @@
1/* Wireless extensions support.
2 *
3 * See copyright notice in main.c
4 */
5#ifndef _ORINOCO_WEXT_H_
6#define _ORINOCO_WEXT_H_
7
8#include <net/iw_handler.h>
9
10/* Structure defining all our WEXT handlers */
11extern const struct iw_handler_def orinoco_handler_def;
12
13#endif /* _ORINOCO_WEXT_H_ */
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index d3469d08f96..cfc5f41aa13 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -61,3 +61,13 @@ config P54_PCI
61 http://prism54.org/ 61 http://prism54.org/
62 62
63 If you choose to build a module, it'll be called p54pci. 63 If you choose to build a module, it'll be called p54pci.
64
65config P54_SPI
66 tristate "Prism54 SPI (stlc45xx) support"
67 depends on P54_COMMON && SPI_MASTER
68 ---help---
69 This driver is for stlc4550 or stlc4560 based wireless chips.
70 This driver is experimental, untested and will probably only work on
71 Nokia's N800/N810 Portable Internet Tablet.
72
73 If you choose to build a module, it'll be called p54spi.
diff --git a/drivers/net/wireless/p54/Makefile b/drivers/net/wireless/p54/Makefile
index 4fa9ce71736..c2050dee629 100644
--- a/drivers/net/wireless/p54/Makefile
+++ b/drivers/net/wireless/p54/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_P54_COMMON) += p54common.o 1obj-$(CONFIG_P54_COMMON) += p54common.o
2obj-$(CONFIG_P54_USB) += p54usb.o 2obj-$(CONFIG_P54_USB) += p54usb.o
3obj-$(CONFIG_P54_PCI) += p54pci.o 3obj-$(CONFIG_P54_PCI) += p54pci.o
4obj-$(CONFIG_P54_SPI) += p54spi.o
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index ab79e32f0b2..94c3acd1fca 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -44,6 +44,18 @@ enum p54_control_frame_types {
44 P54_CONTROL_TYPE_BT_OPTIONS = 35 44 P54_CONTROL_TYPE_BT_OPTIONS = 35
45}; 45};
46 46
47/* provide 16 bytes for the transport back-end */
48#define P54_TX_INFO_DATA_SIZE 16
49
50/* stored in ieee80211_tx_info's rate_driver_data */
51struct p54_tx_info {
52 u32 start_addr;
53 u32 end_addr;
54 void *data[P54_TX_INFO_DATA_SIZE / sizeof(void *)];
55};
56
57#define P54_MAX_CTRL_FRAME_LEN 0x1000
58
47#define P54_HDR_FLAG_CONTROL BIT(15) 59#define P54_HDR_FLAG_CONTROL BIT(15)
48#define P54_HDR_FLAG_CONTROL_OPSET (BIT(15) + BIT(0)) 60#define P54_HDR_FLAG_CONTROL_OPSET (BIT(15) + BIT(0))
49 61
@@ -75,6 +87,14 @@ struct p54_rssi_linear_approximation {
75 s16 longbow_unk2; 87 s16 longbow_unk2;
76}; 88};
77 89
90struct p54_cal_database {
91 size_t entries;
92 size_t entry_size;
93 size_t offset;
94 size_t len;
95 u8 data[0];
96};
97
78#define EEPROM_READBACK_LEN 0x3fc 98#define EEPROM_READBACK_LEN 0x3fc
79 99
80#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 100#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000
@@ -84,6 +104,14 @@ struct p54_rssi_linear_approximation {
84#define FW_LM87 0x4c4d3837 104#define FW_LM87 0x4c4d3837
85#define FW_LM20 0x4c4d3230 105#define FW_LM20 0x4c4d3230
86 106
107enum fw_state {
108 FW_STATE_OFF,
109 FW_STATE_BOOTING,
110 FW_STATE_READY,
111 FW_STATE_RESET,
112 FW_STATE_RESETTING,
113};
114
87struct p54_common { 115struct p54_common {
88 struct ieee80211_hw *hw; 116 struct ieee80211_hw *hw;
89 u32 rx_start; 117 u32 rx_start;
@@ -99,11 +127,12 @@ struct p54_common {
99 struct mutex conf_mutex; 127 struct mutex conf_mutex;
100 u8 mac_addr[ETH_ALEN]; 128 u8 mac_addr[ETH_ALEN];
101 u8 bssid[ETH_ALEN]; 129 u8 bssid[ETH_ALEN];
130 u8 rx_diversity_mask;
131 u8 tx_diversity_mask;
102 struct pda_iq_autocal_entry *iq_autocal; 132 struct pda_iq_autocal_entry *iq_autocal;
103 unsigned int iq_autocal_len; 133 unsigned int iq_autocal_len;
104 struct pda_channel_output_limit *output_limit; 134 struct p54_cal_database *output_limit;
105 unsigned int output_limit_len; 135 struct p54_cal_database *curve_data;
106 struct pda_pa_curve_data *curve_data;
107 struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS]; 136 struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
108 unsigned int filter_flags; 137 unsigned int filter_flags;
109 bool use_short_slot; 138 bool use_short_slot;
@@ -115,7 +144,7 @@ struct p54_common {
115 unsigned int output_power; 144 unsigned int output_power;
116 u32 tsf_low32; 145 u32 tsf_low32;
117 u32 tsf_high32; 146 u32 tsf_high32;
118 u64 basic_rate_mask; 147 u32 basic_rate_mask;
119 u16 wakeup_timer; 148 u16 wakeup_timer;
120 u16 aid; 149 u16 aid;
121 struct ieee80211_tx_queue_stats tx_stats[8]; 150 struct ieee80211_tx_queue_stats tx_stats[8];
@@ -133,6 +162,7 @@ struct p54_common {
133int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 162int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
134void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb); 163void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb);
135int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); 164int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
165int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len);
136int p54_read_eeprom(struct ieee80211_hw *dev); 166int p54_read_eeprom(struct ieee80211_hw *dev);
137struct ieee80211_hw *p54_init_common(size_t priv_data_len); 167struct ieee80211_hw *p54_init_common(size_t priv_data_len);
138void p54_free_common(struct ieee80211_hw *dev); 168void p54_free_common(struct ieee80211_hw *dev);
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 34561e6e816..45c2e7ad3ac 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -239,11 +239,11 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
239 239
240 if (priv->fw_var >= 0x300) { 240 if (priv->fw_var >= 0x300) {
241 /* Firmware supports QoS, use it! */ 241 /* Firmware supports QoS, use it! */
242 priv->tx_stats[4].limit = 3; /* AC_VO */ 242 priv->tx_stats[P54_QUEUE_AC_VO].limit = 3;
243 priv->tx_stats[5].limit = 4; /* AC_VI */ 243 priv->tx_stats[P54_QUEUE_AC_VI].limit = 4;
244 priv->tx_stats[6].limit = 3; /* AC_BE */ 244 priv->tx_stats[P54_QUEUE_AC_BE].limit = 3;
245 priv->tx_stats[7].limit = 2; /* AC_BK */ 245 priv->tx_stats[P54_QUEUE_AC_BK].limit = 2;
246 dev->queues = 4; 246 dev->queues = P54_QUEUE_AC_NUM;
247 } 247 }
248 248
249 if (!modparam_nohwcrypt) 249 if (!modparam_nohwcrypt)
@@ -272,13 +272,19 @@ static int p54_convert_rev0(struct ieee80211_hw *dev,
272 unsigned int i, j; 272 unsigned int i, j;
273 void *source, *target; 273 void *source, *target;
274 274
275 priv->curve_data = kmalloc(cd_len, GFP_KERNEL); 275 priv->curve_data = kmalloc(sizeof(*priv->curve_data) + cd_len,
276 GFP_KERNEL);
276 if (!priv->curve_data) 277 if (!priv->curve_data)
277 return -ENOMEM; 278 return -ENOMEM;
278 279
279 memcpy(priv->curve_data, curve_data, sizeof(*curve_data)); 280 priv->curve_data->entries = curve_data->channels;
281 priv->curve_data->entry_size = sizeof(__le16) +
282 sizeof(*dst) * curve_data->points_per_channel;
283 priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data);
284 priv->curve_data->len = cd_len;
285 memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data));
280 source = curve_data->data; 286 source = curve_data->data;
281 target = priv->curve_data->data; 287 target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data;
282 for (i = 0; i < curve_data->channels; i++) { 288 for (i = 0; i < curve_data->channels; i++) {
283 __le16 *freq = source; 289 __le16 *freq = source;
284 source += sizeof(__le16); 290 source += sizeof(__le16);
@@ -318,13 +324,19 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
318 unsigned int i, j; 324 unsigned int i, j;
319 void *source, *target; 325 void *source, *target;
320 326
321 priv->curve_data = kmalloc(cd_len, GFP_KERNEL); 327 priv->curve_data = kzalloc(cd_len + sizeof(*priv->curve_data),
328 GFP_KERNEL);
322 if (!priv->curve_data) 329 if (!priv->curve_data)
323 return -ENOMEM; 330 return -ENOMEM;
324 331
325 memcpy(priv->curve_data, curve_data, sizeof(*curve_data)); 332 priv->curve_data->entries = curve_data->channels;
333 priv->curve_data->entry_size = sizeof(__le16) +
334 sizeof(*dst) * curve_data->points_per_channel;
335 priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data);
336 priv->curve_data->len = cd_len;
337 memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data));
326 source = curve_data->data; 338 source = curve_data->data;
327 target = priv->curve_data->data; 339 target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data;
328 for (i = 0; i < curve_data->channels; i++) { 340 for (i = 0; i < curve_data->channels; i++) {
329 __le16 *freq = source; 341 __le16 *freq = source;
330 source += sizeof(__le16); 342 source += sizeof(__le16);
@@ -376,7 +388,102 @@ static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
376 } 388 }
377} 389}
378 390
379static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 391static void p54_parse_default_country(struct ieee80211_hw *dev,
392 void *data, int len)
393{
394 struct pda_country *country;
395
396 if (len != sizeof(*country)) {
397 printk(KERN_ERR "%s: found possible invalid default country "
398 "eeprom entry. (entry size: %d)\n",
399 wiphy_name(dev->wiphy), len);
400
401 print_hex_dump_bytes("country:", DUMP_PREFIX_NONE,
402 data, len);
403
404 printk(KERN_ERR "%s: please report this issue.\n",
405 wiphy_name(dev->wiphy));
406 return;
407 }
408
409 country = (struct pda_country *) data;
410 if (country->flags == PDR_COUNTRY_CERT_CODE_PSEUDO)
411 regulatory_hint(dev->wiphy, country->alpha2);
412 else {
413 /* TODO:
414 * write a shared/common function that converts
415 * "Regulatory domain codes" (802.11-2007 14.8.2.2)
416 * into ISO/IEC 3166-1 alpha2 for regulatory_hint.
417 */
418 }
419}
420
421static int p54_convert_output_limits(struct ieee80211_hw *dev,
422 u8 *data, size_t len)
423{
424 struct p54_common *priv = dev->priv;
425
426 if (len < 2)
427 return -EINVAL;
428
429 if (data[0] != 0) {
430 printk(KERN_ERR "%s: unknown output power db revision:%x\n",
431 wiphy_name(dev->wiphy), data[0]);
432 return -EINVAL;
433 }
434
435 if (2 + data[1] * sizeof(struct pda_channel_output_limit) > len)
436 return -EINVAL;
437
438 priv->output_limit = kmalloc(data[1] *
439 sizeof(struct pda_channel_output_limit) +
440 sizeof(*priv->output_limit), GFP_KERNEL);
441
442 if (!priv->output_limit)
443 return -ENOMEM;
444
445 priv->output_limit->offset = 0;
446 priv->output_limit->entries = data[1];
447 priv->output_limit->entry_size =
448 sizeof(struct pda_channel_output_limit);
449 priv->output_limit->len = priv->output_limit->entry_size *
450 priv->output_limit->entries +
451 priv->output_limit->offset;
452
453 memcpy(priv->output_limit->data, &data[2],
454 data[1] * sizeof(struct pda_channel_output_limit));
455
456 return 0;
457}
458
459static struct p54_cal_database *p54_convert_db(struct pda_custom_wrapper *src,
460 size_t total_len)
461{
462 struct p54_cal_database *dst;
463 size_t payload_len, entries, entry_size, offset;
464
465 payload_len = le16_to_cpu(src->len);
466 entries = le16_to_cpu(src->entries);
467 entry_size = le16_to_cpu(src->entry_size);
468 offset = le16_to_cpu(src->offset);
469 if (((entries * entry_size + offset) != payload_len) ||
470 (payload_len + sizeof(*src) != total_len))
471 return NULL;
472
473 dst = kmalloc(sizeof(*dst) + payload_len, GFP_KERNEL);
474 if (!dst)
475 return NULL;
476
477 dst->entries = entries;
478 dst->entry_size = entry_size;
479 dst->offset = offset;
480 dst->len = payload_len;
481
482 memcpy(dst->data, src->data, payload_len);
483 return dst;
484}
485
486int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
380{ 487{
381 struct p54_common *priv = dev->priv; 488 struct p54_common *priv = dev->priv;
382 struct eeprom_pda_wrap *wrap = NULL; 489 struct eeprom_pda_wrap *wrap = NULL;
@@ -401,30 +508,17 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
401 508
402 switch (le16_to_cpu(entry->code)) { 509 switch (le16_to_cpu(entry->code)) {
403 case PDR_MAC_ADDRESS: 510 case PDR_MAC_ADDRESS:
511 if (data_len != ETH_ALEN)
512 break;
404 SET_IEEE80211_PERM_ADDR(dev, entry->data); 513 SET_IEEE80211_PERM_ADDR(dev, entry->data);
405 break; 514 break;
406 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: 515 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS:
407 if (data_len < 2) { 516 if (priv->output_limit)
408 err = -EINVAL; 517 break;
409 goto err; 518 err = p54_convert_output_limits(dev, entry->data,
410 } 519 data_len);
411 520 if (err)
412 if (2 + entry->data[1]*sizeof(*priv->output_limit) > data_len) {
413 err = -EINVAL;
414 goto err;
415 }
416
417 priv->output_limit = kmalloc(entry->data[1] *
418 sizeof(*priv->output_limit), GFP_KERNEL);
419
420 if (!priv->output_limit) {
421 err = -ENOMEM;
422 goto err; 521 goto err;
423 }
424
425 memcpy(priv->output_limit, &entry->data[2],
426 entry->data[1]*sizeof(*priv->output_limit));
427 priv->output_limit_len = entry->data[1];
428 break; 522 break;
429 case PDR_PRISM_PA_CAL_CURVE_DATA: { 523 case PDR_PRISM_PA_CAL_CURVE_DATA: {
430 struct pda_pa_curve_data *curve_data = 524 struct pda_pa_curve_data *curve_data =
@@ -463,6 +557,9 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
463 memcpy(priv->iq_autocal, entry->data, data_len); 557 memcpy(priv->iq_autocal, entry->data, data_len);
464 priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); 558 priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry);
465 break; 559 break;
560 case PDR_DEFAULT_COUNTRY:
561 p54_parse_default_country(dev, entry->data, data_len);
562 break;
466 case PDR_INTERFACE_LIST: 563 case PDR_INTERFACE_LIST:
467 tmp = entry->data; 564 tmp = entry->data;
468 while ((u8 *)tmp < entry->data + data_len) { 565 while ((u8 *)tmp < entry->data + data_len) {
@@ -473,6 +570,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
473 } 570 }
474 break; 571 break;
475 case PDR_HARDWARE_PLATFORM_COMPONENT_ID: 572 case PDR_HARDWARE_PLATFORM_COMPONENT_ID:
573 if (data_len < 2)
574 break;
476 priv->version = *(u8 *)(entry->data + 1); 575 priv->version = *(u8 *)(entry->data + 1);
477 break; 576 break;
478 case PDR_RSSI_LINEAR_APPROXIMATION: 577 case PDR_RSSI_LINEAR_APPROXIMATION:
@@ -481,6 +580,34 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
481 p54_parse_rssical(dev, entry->data, data_len, 580 p54_parse_rssical(dev, entry->data, data_len,
482 le16_to_cpu(entry->code)); 581 le16_to_cpu(entry->code));
483 break; 582 break;
583 case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: {
584 __le16 *src = (void *) entry->data;
585 s16 *dst = (void *) &priv->rssical_db;
586 int i;
587
588 if (data_len != sizeof(priv->rssical_db)) {
589 err = -EINVAL;
590 goto err;
591 }
592 for (i = 0; i < sizeof(priv->rssical_db) /
593 sizeof(*src); i++)
594 *(dst++) = (s16) le16_to_cpu(*(src++));
595 }
596 break;
597 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
598 struct pda_custom_wrapper *pda = (void *) entry->data;
599 if (priv->output_limit || data_len < sizeof(*pda))
600 break;
601 priv->output_limit = p54_convert_db(pda, data_len);
602 }
603 break;
604 case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: {
605 struct pda_custom_wrapper *pda = (void *) entry->data;
606 if (priv->curve_data || data_len < sizeof(*pda))
607 break;
608 priv->curve_data = p54_convert_db(pda, data_len);
609 }
610 break;
484 case PDR_END: 611 case PDR_END:
485 /* make it overrun */ 612 /* make it overrun */
486 entry_len = len; 613 entry_len = len;
@@ -497,7 +624,6 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
497 case PDR_UTF8_OEM_NAME: 624 case PDR_UTF8_OEM_NAME:
498 case PDR_UTF8_PRODUCT_NAME: 625 case PDR_UTF8_PRODUCT_NAME:
499 case PDR_COUNTRY_LIST: 626 case PDR_COUNTRY_LIST:
500 case PDR_DEFAULT_COUNTRY:
501 case PDR_ANTENNA_GAIN: 627 case PDR_ANTENNA_GAIN:
502 case PDR_PRISM_INDIGO_PA_CALIBRATION_DATA: 628 case PDR_PRISM_INDIGO_PA_CALIBRATION_DATA:
503 case PDR_REGULATORY_POWER_LIMITS: 629 case PDR_REGULATORY_POWER_LIMITS:
@@ -525,12 +651,16 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
525 } 651 }
526 652
527 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; 653 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
528 if (priv->rxhw == 4) 654 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
529 p54_init_xbow_synth(dev); 655 p54_init_xbow_synth(dev);
530 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) 656 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
531 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 657 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
532 if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) 658 if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
533 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz; 659 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
660 if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
661 priv->rx_diversity_mask = 3;
662 if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
663 priv->tx_diversity_mask = 3;
534 664
535 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 665 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
536 u8 perm_addr[ETH_ALEN]; 666 u8 perm_addr[ETH_ALEN];
@@ -568,13 +698,21 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
568 wiphy_name(dev->wiphy)); 698 wiphy_name(dev->wiphy));
569 return err; 699 return err;
570} 700}
701EXPORT_SYMBOL_GPL(p54_parse_eeprom);
571 702
572static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi) 703static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi)
573{ 704{
574 struct p54_common *priv = dev->priv; 705 struct p54_common *priv = dev->priv;
575 int band = dev->conf.channel->band; 706 int band = dev->conf.channel->band;
576 707
577 return ((rssi * priv->rssical_db[band].mul) / 64 + 708 if (priv->rxhw != PDR_SYNTH_FRONTEND_LONGBOW)
709 return ((rssi * priv->rssical_db[band].mul) / 64 +
710 priv->rssical_db[band].add) / 4;
711 else
712 /*
713 * TODO: find the correct formula
714 */
715 return ((rssi * priv->rssical_db[band].mul) / 64 +
578 priv->rssical_db[band].add) / 4; 716 priv->rssical_db[band].add) / 4;
579} 717}
580 718
@@ -655,7 +793,8 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
655 return ; 793 return ;
656 794
657 for (i = 0; i < dev->queues; i++) 795 for (i = 0; i < dev->queues; i++)
658 if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit) 796 if (priv->tx_stats[i + P54_QUEUE_DATA].len <
797 priv->tx_stats[i + P54_QUEUE_DATA].limit)
659 ieee80211_wake_queue(dev, i); 798 ieee80211_wake_queue(dev, i);
660} 799}
661 800
@@ -663,7 +802,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
663{ 802{
664 struct p54_common *priv = dev->priv; 803 struct p54_common *priv = dev->priv;
665 struct ieee80211_tx_info *info; 804 struct ieee80211_tx_info *info;
666 struct memrecord *range; 805 struct p54_tx_info *range;
667 unsigned long flags; 806 unsigned long flags;
668 u32 freed = 0, last_addr = priv->rx_start; 807 u32 freed = 0, last_addr = priv->rx_start;
669 808
@@ -681,18 +820,18 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
681 range = (void *)info->rate_driver_data; 820 range = (void *)info->rate_driver_data;
682 if (skb->prev != (struct sk_buff *)&priv->tx_queue) { 821 if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
683 struct ieee80211_tx_info *ni; 822 struct ieee80211_tx_info *ni;
684 struct memrecord *mr; 823 struct p54_tx_info *mr;
685 824
686 ni = IEEE80211_SKB_CB(skb->prev); 825 ni = IEEE80211_SKB_CB(skb->prev);
687 mr = (struct memrecord *)ni->rate_driver_data; 826 mr = (struct p54_tx_info *)ni->rate_driver_data;
688 last_addr = mr->end_addr; 827 last_addr = mr->end_addr;
689 } 828 }
690 if (skb->next != (struct sk_buff *)&priv->tx_queue) { 829 if (skb->next != (struct sk_buff *)&priv->tx_queue) {
691 struct ieee80211_tx_info *ni; 830 struct ieee80211_tx_info *ni;
692 struct memrecord *mr; 831 struct p54_tx_info *mr;
693 832
694 ni = IEEE80211_SKB_CB(skb->next); 833 ni = IEEE80211_SKB_CB(skb->next);
695 mr = (struct memrecord *)ni->rate_driver_data; 834 mr = (struct p54_tx_info *)ni->rate_driver_data;
696 freed = mr->start_addr - last_addr; 835 freed = mr->start_addr - last_addr;
697 } else 836 } else
698 freed = priv->rx_end - last_addr; 837 freed = priv->rx_end - last_addr;
@@ -734,7 +873,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
734 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; 873 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
735 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; 874 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
736 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom; 875 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
737 struct memrecord *range = NULL; 876 struct p54_tx_info *range = NULL;
738 u32 freed = 0; 877 u32 freed = 0;
739 u32 last_addr = priv->rx_start; 878 u32 last_addr = priv->rx_start;
740 unsigned long flags; 879 unsigned long flags;
@@ -756,10 +895,10 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
756 895
757 if (entry->next != (struct sk_buff *)&priv->tx_queue) { 896 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
758 struct ieee80211_tx_info *ni; 897 struct ieee80211_tx_info *ni;
759 struct memrecord *mr; 898 struct p54_tx_info *mr;
760 899
761 ni = IEEE80211_SKB_CB(entry->next); 900 ni = IEEE80211_SKB_CB(entry->next);
762 mr = (struct memrecord *)ni->rate_driver_data; 901 mr = (struct p54_tx_info *)ni->rate_driver_data;
763 freed = mr->start_addr - last_addr; 902 freed = mr->start_addr - last_addr;
764 } else 903 } else
765 freed = priv->rx_end - last_addr; 904 freed = priv->rx_end - last_addr;
@@ -774,9 +913,16 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
774 priv->tx_stats[entry_data->hw_queue].len--; 913 priv->tx_stats[entry_data->hw_queue].len--;
775 priv->stats.dot11ACKFailureCount += payload->tries - 1; 914 priv->stats.dot11ACKFailureCount += payload->tries - 1;
776 915
777 if (unlikely(entry == priv->cached_beacon)) { 916 /*
917 * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
918 * generated by the driver. Therefore tx_status is bogus
919 * and we don't want to confuse the mac80211 stack.
920 */
921 if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) {
922 if (entry_data->hw_queue == P54_QUEUE_BEACON)
923 priv->cached_beacon = NULL;
924
778 kfree_skb(entry); 925 kfree_skb(entry);
779 priv->cached_beacon = NULL;
780 goto out; 926 goto out;
781 } 927 }
782 928
@@ -969,8 +1115,8 @@ EXPORT_SYMBOL_GPL(p54_rx);
969 * can find some unused memory to upload our packets to. However, data that we 1115 * can find some unused memory to upload our packets to. However, data that we
970 * want the card to TX needs to stay intact until the card has told us that 1116 * want the card to TX needs to stay intact until the card has told us that
971 * it is done with it. This function finds empty places we can upload to and 1117 * it is done with it. This function finds empty places we can upload to and
972 * marks allocated areas as reserved if necessary. p54_rx_frame_sent frees 1118 * marks allocated areas as reserved if necessary. p54_rx_frame_sent or
973 * allocated areas. 1119 * p54_free_skb frees allocated areas.
974 */ 1120 */
975static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, 1121static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
976 struct p54_hdr *data, u32 len) 1122 struct p54_hdr *data, u32 len)
@@ -979,7 +1125,7 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
979 struct sk_buff *entry = priv->tx_queue.next; 1125 struct sk_buff *entry = priv->tx_queue.next;
980 struct sk_buff *target_skb = NULL; 1126 struct sk_buff *target_skb = NULL;
981 struct ieee80211_tx_info *info; 1127 struct ieee80211_tx_info *info;
982 struct memrecord *range; 1128 struct p54_tx_info *range;
983 u32 last_addr = priv->rx_start; 1129 u32 last_addr = priv->rx_start;
984 u32 largest_hole = 0; 1130 u32 largest_hole = 0;
985 u32 target_addr = priv->rx_start; 1131 u32 target_addr = priv->rx_start;
@@ -1060,25 +1206,29 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
1060 return 0; 1206 return 0;
1061} 1207}
1062 1208
1063static struct sk_buff *p54_alloc_skb(struct ieee80211_hw *dev, 1209static struct sk_buff *p54_alloc_skb(struct ieee80211_hw *dev, u16 hdr_flags,
1064 u16 hdr_flags, u16 len, u16 type, gfp_t memflags) 1210 u16 payload_len, u16 type, gfp_t memflags)
1065{ 1211{
1066 struct p54_common *priv = dev->priv; 1212 struct p54_common *priv = dev->priv;
1067 struct p54_hdr *hdr; 1213 struct p54_hdr *hdr;
1068 struct sk_buff *skb; 1214 struct sk_buff *skb;
1215 size_t frame_len = sizeof(*hdr) + payload_len;
1216
1217 if (frame_len > P54_MAX_CTRL_FRAME_LEN)
1218 return NULL;
1069 1219
1070 skb = __dev_alloc_skb(len + priv->tx_hdr_len, memflags); 1220 skb = __dev_alloc_skb(priv->tx_hdr_len + frame_len, memflags);
1071 if (!skb) 1221 if (!skb)
1072 return NULL; 1222 return NULL;
1073 skb_reserve(skb, priv->tx_hdr_len); 1223 skb_reserve(skb, priv->tx_hdr_len);
1074 1224
1075 hdr = (struct p54_hdr *) skb_put(skb, sizeof(*hdr)); 1225 hdr = (struct p54_hdr *) skb_put(skb, sizeof(*hdr));
1076 hdr->flags = cpu_to_le16(hdr_flags); 1226 hdr->flags = cpu_to_le16(hdr_flags);
1077 hdr->len = cpu_to_le16(len - sizeof(*hdr)); 1227 hdr->len = cpu_to_le16(payload_len);
1078 hdr->type = cpu_to_le16(type); 1228 hdr->type = cpu_to_le16(type);
1079 hdr->tries = hdr->rts_tries = 0; 1229 hdr->tries = hdr->rts_tries = 0;
1080 1230
1081 if (unlikely(p54_assign_address(dev, skb, hdr, len))) { 1231 if (p54_assign_address(dev, skb, hdr, frame_len)) {
1082 kfree_skb(skb); 1232 kfree_skb(skb);
1083 return NULL; 1233 return NULL;
1084 } 1234 }
@@ -1088,7 +1238,6 @@ static struct sk_buff *p54_alloc_skb(struct ieee80211_hw *dev,
1088int p54_read_eeprom(struct ieee80211_hw *dev) 1238int p54_read_eeprom(struct ieee80211_hw *dev)
1089{ 1239{
1090 struct p54_common *priv = dev->priv; 1240 struct p54_common *priv = dev->priv;
1091 struct p54_hdr *hdr = NULL;
1092 struct p54_eeprom_lm86 *eeprom_hdr; 1241 struct p54_eeprom_lm86 *eeprom_hdr;
1093 struct sk_buff *skb; 1242 struct sk_buff *skb;
1094 size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize; 1243 size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize;
@@ -1101,9 +1250,9 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
1101 else 1250 else
1102 maxblocksize -= 0x4; 1251 maxblocksize -= 0x4;
1103 1252
1104 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL, sizeof(*hdr) + 1253 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL, sizeof(*eeprom_hdr) +
1105 sizeof(*eeprom_hdr) + maxblocksize, 1254 maxblocksize, P54_CONTROL_TYPE_EEPROM_READBACK,
1106 P54_CONTROL_TYPE_EEPROM_READBACK, GFP_KERNEL); 1255 GFP_KERNEL);
1107 if (!skb) 1256 if (!skb)
1108 goto free; 1257 goto free;
1109 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL); 1258 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
@@ -1159,9 +1308,8 @@ static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
1159 struct sk_buff *skb; 1308 struct sk_buff *skb;
1160 struct p54_tim *tim; 1309 struct p54_tim *tim;
1161 1310
1162 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1311 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*tim),
1163 sizeof(struct p54_hdr) + sizeof(*tim), 1312 P54_CONTROL_TYPE_TIM, GFP_ATOMIC);
1164 P54_CONTROL_TYPE_TIM, GFP_ATOMIC);
1165 if (!skb) 1313 if (!skb)
1166 return -ENOMEM; 1314 return -ENOMEM;
1167 1315
@@ -1178,9 +1326,8 @@ static int p54_sta_unlock(struct ieee80211_hw *dev, u8 *addr)
1178 struct sk_buff *skb; 1326 struct sk_buff *skb;
1179 struct p54_sta_unlock *sta; 1327 struct p54_sta_unlock *sta;
1180 1328
1181 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1329 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*sta),
1182 sizeof(struct p54_hdr) + sizeof(*sta), 1330 P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC);
1183 P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC);
1184 if (!skb) 1331 if (!skb)
1185 return -ENOMEM; 1332 return -ENOMEM;
1186 1333
@@ -1220,9 +1367,8 @@ static int p54_tx_cancel(struct ieee80211_hw *dev, struct sk_buff *entry)
1220 struct p54_hdr *hdr; 1367 struct p54_hdr *hdr;
1221 struct p54_txcancel *cancel; 1368 struct p54_txcancel *cancel;
1222 1369
1223 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1370 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*cancel),
1224 sizeof(struct p54_hdr) + sizeof(*cancel), 1371 P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC);
1225 P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC);
1226 if (!skb) 1372 if (!skb)
1227 return -ENOMEM; 1373 return -ENOMEM;
1228 1374
@@ -1239,46 +1385,73 @@ static int p54_tx_fill(struct ieee80211_hw *dev, struct sk_buff *skb,
1239{ 1385{
1240 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1386 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1241 struct p54_common *priv = dev->priv; 1387 struct p54_common *priv = dev->priv;
1242 int ret = 0; 1388 int ret = 1;
1243
1244 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
1245 if (ieee80211_is_beacon(hdr->frame_control)) {
1246 *aid = 0;
1247 *queue = 0;
1248 *extra_len = IEEE80211_MAX_TIM_LEN;
1249 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
1250 return 0;
1251 } else if (ieee80211_is_probe_resp(hdr->frame_control)) {
1252 *aid = 0;
1253 *queue = 2;
1254 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
1255 P54_HDR_FLAG_DATA_OUT_NOCANCEL;
1256 return 0;
1257 } else {
1258 *queue = 2;
1259 ret = 0;
1260 }
1261 } else {
1262 *queue += 4;
1263 ret = 1;
1264 }
1265 1389
1266 switch (priv->mode) { 1390 switch (priv->mode) {
1391 case NL80211_IFTYPE_MONITOR:
1392 /*
1393 * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for
1394 * every frame in promiscuous/monitor mode.
1395 * see STSW45x0C LMAC API - page 12.
1396 */
1397 *aid = 0;
1398 *flags = P54_HDR_FLAG_DATA_OUT_PROMISC;
1399 *queue += P54_QUEUE_DATA;
1400 break;
1267 case NL80211_IFTYPE_STATION: 1401 case NL80211_IFTYPE_STATION:
1268 *aid = 1; 1402 *aid = 1;
1403 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
1404 *queue = P54_QUEUE_MGMT;
1405 ret = 0;
1406 } else
1407 *queue += P54_QUEUE_DATA;
1269 break; 1408 break;
1270 case NL80211_IFTYPE_AP: 1409 case NL80211_IFTYPE_AP:
1271 case NL80211_IFTYPE_ADHOC: 1410 case NL80211_IFTYPE_ADHOC:
1272 case NL80211_IFTYPE_MESH_POINT: 1411 case NL80211_IFTYPE_MESH_POINT:
1273 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 1412 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1274 *aid = 0; 1413 *aid = 0;
1275 *queue = 3; 1414 *queue = P54_QUEUE_CAB;
1276 return 0; 1415 return 0;
1277 } 1416 }
1417
1418 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
1419 if (ieee80211_is_probe_resp(hdr->frame_control)) {
1420 *aid = 0;
1421 *queue = P54_QUEUE_MGMT;
1422 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
1423 P54_HDR_FLAG_DATA_OUT_NOCANCEL;
1424 return 0;
1425 } else if (ieee80211_is_beacon(hdr->frame_control)) {
1426 *aid = 0;
1427
1428 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1429 /*
1430 * Injecting beacons on top of a AP is
1431 * not a good idea... nevertheless,
1432 * it should be doable.
1433 */
1434
1435 *queue += P54_QUEUE_DATA;
1436 return 1;
1437 }
1438
1439 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
1440 *queue = P54_QUEUE_BEACON;
1441 *extra_len = IEEE80211_MAX_TIM_LEN;
1442 return 0;
1443 } else {
1444 *queue = P54_QUEUE_MGMT;
1445 ret = 0;
1446 }
1447 } else
1448 *queue += P54_QUEUE_DATA;
1449
1278 if (info->control.sta) 1450 if (info->control.sta)
1279 *aid = info->control.sta->aid; 1451 *aid = info->control.sta->aid;
1280 else 1452 else
1281 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; 1453 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
1454 break;
1282 } 1455 }
1283 return ret; 1456 return ret;
1284} 1457}
@@ -1300,7 +1473,7 @@ static u8 p54_convert_algo(enum ieee80211_key_alg alg)
1300static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1473static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1301{ 1474{
1302 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1475 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1303 struct ieee80211_tx_queue_stats *current_queue = NULL; 1476 struct ieee80211_tx_queue_stats *current_queue;
1304 struct p54_common *priv = dev->priv; 1477 struct p54_common *priv = dev->priv;
1305 struct p54_hdr *hdr; 1478 struct p54_hdr *hdr;
1306 struct p54_tx_data *txhdr; 1479 struct p54_tx_data *txhdr;
@@ -1443,15 +1616,17 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1443 } 1616 }
1444 txhdr->crypt_offset = crypt_offset; 1617 txhdr->crypt_offset = crypt_offset;
1445 txhdr->hw_queue = queue; 1618 txhdr->hw_queue = queue;
1446 if (current_queue) 1619 txhdr->backlog = current_queue->len;
1447 txhdr->backlog = current_queue->len;
1448 else
1449 txhdr->backlog = 0;
1450 memset(txhdr->durations, 0, sizeof(txhdr->durations)); 1620 memset(txhdr->durations, 0, sizeof(txhdr->durations));
1451 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? 1621 txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ?
1452 2 : info->antenna_sel_tx - 1; 1622 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
1453 txhdr->output_power = priv->output_power; 1623 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
1454 txhdr->cts_rate = cts_rate; 1624 txhdr->longbow.cts_rate = cts_rate;
1625 txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
1626 } else {
1627 txhdr->normal.output_power = priv->output_power;
1628 txhdr->normal.cts_rate = cts_rate;
1629 }
1455 if (padding) 1630 if (padding)
1456 txhdr->align[0] = padding; 1631 txhdr->align[0] = padding;
1457 1632
@@ -1464,14 +1639,12 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1464 queue_delayed_work(dev->workqueue, &priv->work, 1639 queue_delayed_work(dev->workqueue, &priv->work,
1465 msecs_to_jiffies(P54_TX_FRAME_LIFETIME)); 1640 msecs_to_jiffies(P54_TX_FRAME_LIFETIME));
1466 1641
1467 return 0; 1642 return NETDEV_TX_OK;
1468 1643
1469 err: 1644 err:
1470 skb_pull(skb, sizeof(*hdr) + sizeof(*txhdr) + padding); 1645 skb_pull(skb, sizeof(*hdr) + sizeof(*txhdr) + padding);
1471 if (current_queue) { 1646 current_queue->len--;
1472 current_queue->len--; 1647 current_queue->count--;
1473 current_queue->count--;
1474 }
1475 return NETDEV_TX_BUSY; 1648 return NETDEV_TX_BUSY;
1476} 1649}
1477 1650
@@ -1482,9 +1655,8 @@ static int p54_setup_mac(struct ieee80211_hw *dev)
1482 struct p54_setup_mac *setup; 1655 struct p54_setup_mac *setup;
1483 u16 mode; 1656 u16 mode;
1484 1657
1485 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup) + 1658 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup),
1486 sizeof(struct p54_hdr), P54_CONTROL_TYPE_SETUP, 1659 P54_CONTROL_TYPE_SETUP, GFP_ATOMIC);
1487 GFP_ATOMIC);
1488 if (!skb) 1660 if (!skb)
1489 return -ENOMEM; 1661 return -ENOMEM;
1490 1662
@@ -1501,11 +1673,21 @@ static int p54_setup_mac(struct ieee80211_hw *dev)
1501 case NL80211_IFTYPE_MESH_POINT: 1673 case NL80211_IFTYPE_MESH_POINT:
1502 mode = P54_FILTER_TYPE_IBSS; 1674 mode = P54_FILTER_TYPE_IBSS;
1503 break; 1675 break;
1676 case NL80211_IFTYPE_MONITOR:
1677 mode = P54_FILTER_TYPE_PROMISCUOUS;
1678 break;
1504 default: 1679 default:
1505 mode = P54_FILTER_TYPE_NONE; 1680 mode = P54_FILTER_TYPE_NONE;
1506 break; 1681 break;
1507 } 1682 }
1508 if (priv->filter_flags & FIF_PROMISC_IN_BSS) 1683
1684 /*
1685 * "TRANSPARENT and PROMISCUOUS are mutually exclusive"
1686 * STSW45X0C LMAC API - page 12
1687 */
1688 if (((priv->filter_flags & FIF_PROMISC_IN_BSS) ||
1689 (priv->filter_flags & FIF_OTHER_BSS)) &&
1690 (mode != P54_FILTER_TYPE_PROMISCUOUS))
1509 mode |= P54_FILTER_TYPE_TRANSPARENT; 1691 mode |= P54_FILTER_TYPE_TRANSPARENT;
1510 } else 1692 } else
1511 mode = P54_FILTER_TYPE_RX_DISABLED; 1693 mode = P54_FILTER_TYPE_RX_DISABLED;
@@ -1513,7 +1695,7 @@ static int p54_setup_mac(struct ieee80211_hw *dev)
1513 setup->mac_mode = cpu_to_le16(mode); 1695 setup->mac_mode = cpu_to_le16(mode);
1514 memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN); 1696 memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN);
1515 memcpy(setup->bssid, priv->bssid, ETH_ALEN); 1697 memcpy(setup->bssid, priv->bssid, ETH_ALEN);
1516 setup->rx_antenna = 2; /* automatic */ 1698 setup->rx_antenna = 2 & priv->rx_diversity_mask; /* automatic */
1517 setup->rx_align = 0; 1699 setup->rx_align = 0;
1518 if (priv->fw_var < 0x500) { 1700 if (priv->fw_var < 0x500) {
1519 setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); 1701 setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
@@ -1546,79 +1728,137 @@ static int p54_scan(struct ieee80211_hw *dev, u16 mode, u16 dwell)
1546{ 1728{
1547 struct p54_common *priv = dev->priv; 1729 struct p54_common *priv = dev->priv;
1548 struct sk_buff *skb; 1730 struct sk_buff *skb;
1549 struct p54_scan *chan; 1731 struct p54_hdr *hdr;
1732 struct p54_scan_head *head;
1733 struct p54_iq_autocal_entry *iq_autocal;
1734 union p54_scan_body_union *body;
1735 struct p54_scan_tail_rate *rate;
1736 struct pda_rssi_cal_entry *rssi;
1550 unsigned int i; 1737 unsigned int i;
1551 void *entry; 1738 void *entry;
1552 __le16 freq = cpu_to_le16(dev->conf.channel->center_freq);
1553 int band = dev->conf.channel->band; 1739 int band = dev->conf.channel->band;
1740 __le16 freq = cpu_to_le16(dev->conf.channel->center_freq);
1554 1741
1555 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*chan) + 1742 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
1556 sizeof(struct p54_hdr), P54_CONTROL_TYPE_SCAN, 1743 2 + sizeof(*iq_autocal) + sizeof(*body) +
1557 GFP_ATOMIC); 1744 sizeof(*rate) + 2 * sizeof(*rssi),
1745 P54_CONTROL_TYPE_SCAN, GFP_ATOMIC);
1558 if (!skb) 1746 if (!skb)
1559 return -ENOMEM; 1747 return -ENOMEM;
1560 1748
1561 chan = (struct p54_scan *) skb_put(skb, sizeof(*chan)); 1749 head = (struct p54_scan_head *) skb_put(skb, sizeof(*head));
1562 memset(chan->padding1, 0, sizeof(chan->padding1)); 1750 memset(head->scan_params, 0, sizeof(head->scan_params));
1563 chan->mode = cpu_to_le16(mode); 1751 head->mode = cpu_to_le16(mode);
1564 chan->dwell = cpu_to_le16(dwell); 1752 head->dwell = cpu_to_le16(dwell);
1753 head->freq = freq;
1754
1755 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
1756 __le16 *pa_power_points = (__le16 *) skb_put(skb, 2);
1757 *pa_power_points = cpu_to_le16(0x0c);
1758 }
1565 1759
1760 iq_autocal = (void *) skb_put(skb, sizeof(*iq_autocal));
1566 for (i = 0; i < priv->iq_autocal_len; i++) { 1761 for (i = 0; i < priv->iq_autocal_len; i++) {
1567 if (priv->iq_autocal[i].freq != freq) 1762 if (priv->iq_autocal[i].freq != freq)
1568 continue; 1763 continue;
1569 1764
1570 memcpy(&chan->iq_autocal, &priv->iq_autocal[i], 1765 memcpy(iq_autocal, &priv->iq_autocal[i].params,
1571 sizeof(*priv->iq_autocal)); 1766 sizeof(struct p54_iq_autocal_entry));
1572 break; 1767 break;
1573 } 1768 }
1574 if (i == priv->iq_autocal_len) 1769 if (i == priv->iq_autocal_len)
1575 goto err; 1770 goto err;
1576 1771
1577 for (i = 0; i < priv->output_limit_len; i++) { 1772 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW)
1578 if (priv->output_limit[i].freq != freq) 1773 body = (void *) skb_put(skb, sizeof(body->longbow));
1774 else
1775 body = (void *) skb_put(skb, sizeof(body->normal));
1776
1777 for (i = 0; i < priv->output_limit->entries; i++) {
1778 __le16 *entry_freq = (void *) (priv->output_limit->data +
1779 priv->output_limit->entry_size * i);
1780
1781 if (*entry_freq != freq)
1579 continue; 1782 continue;
1580 1783
1581 chan->val_barker = 0x38; 1784 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
1582 chan->val_bpsk = chan->dup_bpsk = 1785 memcpy(&body->longbow.power_limits,
1583 priv->output_limit[i].val_bpsk; 1786 (void *) entry_freq + sizeof(__le16),
1584 chan->val_qpsk = chan->dup_qpsk = 1787 priv->output_limit->entry_size);
1585 priv->output_limit[i].val_qpsk; 1788 } else {
1586 chan->val_16qam = chan->dup_16qam = 1789 struct pda_channel_output_limit *limits =
1587 priv->output_limit[i].val_16qam; 1790 (void *) entry_freq;
1588 chan->val_64qam = chan->dup_64qam = 1791
1589 priv->output_limit[i].val_64qam; 1792 body->normal.val_barker = 0x38;
1793 body->normal.val_bpsk = body->normal.dup_bpsk =
1794 limits->val_bpsk;
1795 body->normal.val_qpsk = body->normal.dup_qpsk =
1796 limits->val_qpsk;
1797 body->normal.val_16qam = body->normal.dup_16qam =
1798 limits->val_16qam;
1799 body->normal.val_64qam = body->normal.dup_64qam =
1800 limits->val_64qam;
1801 }
1590 break; 1802 break;
1591 } 1803 }
1592 if (i == priv->output_limit_len) 1804 if (i == priv->output_limit->entries)
1593 goto err; 1805 goto err;
1594 1806
1595 entry = priv->curve_data->data; 1807 entry = (void *)(priv->curve_data->data + priv->curve_data->offset);
1596 for (i = 0; i < priv->curve_data->channels; i++) { 1808 for (i = 0; i < priv->curve_data->entries; i++) {
1597 if (*((__le16 *)entry) != freq) { 1809 if (*((__le16 *)entry) != freq) {
1598 entry += sizeof(__le16); 1810 entry += priv->curve_data->entry_size;
1599 entry += sizeof(struct p54_pa_curve_data_sample) *
1600 priv->curve_data->points_per_channel;
1601 continue; 1811 continue;
1602 } 1812 }
1603 1813
1604 entry += sizeof(__le16); 1814 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
1605 chan->pa_points_per_curve = 8; 1815 memcpy(&body->longbow.curve_data,
1606 memset(chan->curve_data, 0, sizeof(*chan->curve_data)); 1816 (void *) entry + sizeof(__le16),
1607 memcpy(chan->curve_data, entry, 1817 priv->curve_data->entry_size);
1608 sizeof(struct p54_pa_curve_data_sample) * 1818 } else {
1609 min((u8)8, priv->curve_data->points_per_channel)); 1819 struct p54_scan_body *chan = &body->normal;
1820 struct pda_pa_curve_data *curve_data =
1821 (void *) priv->curve_data->data;
1822
1823 entry += sizeof(__le16);
1824 chan->pa_points_per_curve = 8;
1825 memset(chan->curve_data, 0, sizeof(*chan->curve_data));
1826 memcpy(chan->curve_data, entry,
1827 sizeof(struct p54_pa_curve_data_sample) *
1828 min((u8)8, curve_data->points_per_channel));
1829 }
1610 break; 1830 break;
1611 } 1831 }
1832 if (i == priv->curve_data->entries)
1833 goto err;
1612 1834
1613 if (priv->fw_var < 0x500) { 1835 if ((priv->fw_var >= 0x500) && (priv->fw_var < 0x509)) {
1614 chan->v1_rssi.mul = cpu_to_le16(priv->rssical_db[band].mul); 1836 rate = (void *) skb_put(skb, sizeof(*rate));
1615 chan->v1_rssi.add = cpu_to_le16(priv->rssical_db[band].add); 1837 rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
1616 } else { 1838 for (i = 0; i < sizeof(rate->rts_rates); i++)
1617 chan->v2.rssi.mul = cpu_to_le16(priv->rssical_db[band].mul); 1839 rate->rts_rates[i] = i;
1618 chan->v2.rssi.add = cpu_to_le16(priv->rssical_db[band].add);
1619 chan->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
1620 memset(chan->v2.rts_rates, 0, 8);
1621 } 1840 }
1841
1842 rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
1843 rssi->mul = cpu_to_le16(priv->rssical_db[band].mul);
1844 rssi->add = cpu_to_le16(priv->rssical_db[band].add);
1845 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
1846 /* Longbow frontend needs ever more */
1847 rssi = (void *) skb_put(skb, sizeof(*rssi));
1848 rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn);
1849 rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2);
1850 }
1851
1852 if (priv->fw_var >= 0x509) {
1853 rate = (void *) skb_put(skb, sizeof(*rate));
1854 rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
1855 for (i = 0; i < sizeof(rate->rts_rates); i++)
1856 rate->rts_rates[i] = i;
1857 }
1858
1859 hdr = (struct p54_hdr *) skb->data;
1860 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
1861
1622 priv->tx(dev, skb); 1862 priv->tx(dev, skb);
1623 return 0; 1863 return 0;
1624 1864
@@ -1634,9 +1874,8 @@ static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act)
1634 struct sk_buff *skb; 1874 struct sk_buff *skb;
1635 struct p54_led *led; 1875 struct p54_led *led;
1636 1876
1637 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led) + 1877 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led),
1638 sizeof(struct p54_hdr), P54_CONTROL_TYPE_LED, 1878 P54_CONTROL_TYPE_LED, GFP_ATOMIC);
1639 GFP_ATOMIC);
1640 if (!skb) 1879 if (!skb)
1641 return -ENOMEM; 1880 return -ENOMEM;
1642 1881
@@ -1663,9 +1902,8 @@ static int p54_set_edcf(struct ieee80211_hw *dev)
1663 struct sk_buff *skb; 1902 struct sk_buff *skb;
1664 struct p54_edcf *edcf; 1903 struct p54_edcf *edcf;
1665 1904
1666 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf) + 1905 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
1667 sizeof(struct p54_hdr), P54_CONTROL_TYPE_DCFINIT, 1906 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
1668 GFP_ATOMIC);
1669 if (!skb) 1907 if (!skb)
1670 return -ENOMEM; 1908 return -ENOMEM;
1671 1909
@@ -1689,6 +1927,42 @@ static int p54_set_edcf(struct ieee80211_hw *dev)
1689 return 0; 1927 return 0;
1690} 1928}
1691 1929
1930static int p54_set_ps(struct ieee80211_hw *dev)
1931{
1932 struct p54_common *priv = dev->priv;
1933 struct sk_buff *skb;
1934 struct p54_psm *psm;
1935 u16 mode;
1936 int i;
1937
1938 if (dev->conf.flags & IEEE80211_CONF_PS)
1939 mode = P54_PSM | P54_PSM_DTIM | P54_PSM_MCBC;
1940 else
1941 mode = P54_PSM_CAM;
1942
1943 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*psm),
1944 P54_CONTROL_TYPE_PSM, GFP_ATOMIC);
1945 if (!skb)
1946 return -ENOMEM;
1947
1948 psm = (struct p54_psm *)skb_put(skb, sizeof(*psm));
1949 psm->mode = cpu_to_le16(mode);
1950 psm->aid = cpu_to_le16(priv->aid);
1951 for (i = 0; i < ARRAY_SIZE(psm->intervals); i++) {
1952 psm->intervals[i].interval =
1953 cpu_to_le16(dev->conf.listen_interval);
1954 psm->intervals[i].periods = cpu_to_le16(1);
1955 }
1956
1957 psm->beacon_rssi_skip_max = 60;
1958 psm->rssi_delta_threshold = 0;
1959 psm->nr = 0;
1960
1961 priv->tx(dev, skb);
1962
1963 return 0;
1964}
1965
1692static int p54_beacon_tim(struct sk_buff *skb) 1966static int p54_beacon_tim(struct sk_buff *skb)
1693{ 1967{
1694 /* 1968 /*
@@ -1881,6 +2155,11 @@ static int p54_config(struct ieee80211_hw *dev, u32 changed)
1881 if (ret) 2155 if (ret)
1882 goto out; 2156 goto out;
1883 } 2157 }
2158 if (changed & IEEE80211_CONF_CHANGE_PS) {
2159 ret = p54_set_ps(dev);
2160 if (ret)
2161 goto out;
2162 }
1884 2163
1885out: 2164out:
1886 mutex_unlock(&priv->conf_mutex); 2165 mutex_unlock(&priv->conf_mutex);
@@ -1932,12 +2211,13 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
1932 struct p54_common *priv = dev->priv; 2211 struct p54_common *priv = dev->priv;
1933 2212
1934 *total_flags &= FIF_PROMISC_IN_BSS | 2213 *total_flags &= FIF_PROMISC_IN_BSS |
2214 FIF_OTHER_BSS |
1935 (*total_flags & FIF_PROMISC_IN_BSS) ? 2215 (*total_flags & FIF_PROMISC_IN_BSS) ?
1936 FIF_FCSFAIL : 0; 2216 FIF_FCSFAIL : 0;
1937 2217
1938 priv->filter_flags = *total_flags; 2218 priv->filter_flags = *total_flags;
1939 2219
1940 if (changed_flags & FIF_PROMISC_IN_BSS) 2220 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
1941 p54_setup_mac(dev); 2221 p54_setup_mac(dev);
1942} 2222}
1943 2223
@@ -1964,10 +2244,8 @@ static int p54_init_xbow_synth(struct ieee80211_hw *dev)
1964 struct sk_buff *skb; 2244 struct sk_buff *skb;
1965 struct p54_xbow_synth *xbow; 2245 struct p54_xbow_synth *xbow;
1966 2246
1967 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow) + 2247 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow),
1968 sizeof(struct p54_hdr), 2248 P54_CONTROL_TYPE_XBOW_SYNTH_CFG, GFP_KERNEL);
1969 P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
1970 GFP_KERNEL);
1971 if (!skb) 2249 if (!skb)
1972 return -ENOMEM; 2250 return -ENOMEM;
1973 2251
@@ -1996,7 +2274,7 @@ static void p54_work(struct work_struct *work)
1996 * 2. cancel stuck frames / reset the device if necessary. 2274 * 2. cancel stuck frames / reset the device if necessary.
1997 */ 2275 */
1998 2276
1999 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL, sizeof(struct p54_hdr) + 2277 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL,
2000 sizeof(struct p54_statistics), 2278 sizeof(struct p54_statistics),
2001 P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL); 2279 P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL);
2002 if (!skb) 2280 if (!skb)
@@ -2019,8 +2297,8 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
2019{ 2297{
2020 struct p54_common *priv = dev->priv; 2298 struct p54_common *priv = dev->priv;
2021 2299
2022 memcpy(stats, &priv->tx_stats[4], sizeof(stats[0]) * dev->queues); 2300 memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
2023 2301 sizeof(stats[0]) * dev->queues);
2024 return 0; 2302 return 0;
2025} 2303}
2026 2304
@@ -2056,7 +2334,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
2056} 2334}
2057 2335
2058static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, 2336static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2059 const u8 *local_address, const u8 *address, 2337 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2060 struct ieee80211_key_conf *key) 2338 struct ieee80211_key_conf *key)
2061{ 2339{
2062 struct p54_common *priv = dev->priv; 2340 struct p54_common *priv = dev->priv;
@@ -2107,9 +2385,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2107 } 2385 }
2108 2386
2109 mutex_lock(&priv->conf_mutex); 2387 mutex_lock(&priv->conf_mutex);
2110 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey) + 2388 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey),
2111 sizeof(struct p54_hdr), P54_CONTROL_TYPE_RX_KEYCACHE, 2389 P54_CONTROL_TYPE_RX_KEYCACHE, GFP_ATOMIC);
2112 GFP_ATOMIC);
2113 if (!skb) { 2390 if (!skb) {
2114 mutex_unlock(&priv->conf_mutex); 2391 mutex_unlock(&priv->conf_mutex);
2115 return -ENOMEM; 2392 return -ENOMEM;
@@ -2120,8 +2397,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2120 rxkey->entry = key->keyidx; 2397 rxkey->entry = key->keyidx;
2121 rxkey->key_id = key->keyidx; 2398 rxkey->key_id = key->keyidx;
2122 rxkey->key_type = algo; 2399 rxkey->key_type = algo;
2123 if (address) 2400 if (sta)
2124 memcpy(rxkey->mac, address, ETH_ALEN); 2401 memcpy(rxkey->mac, sta->addr, ETH_ALEN);
2125 else 2402 else
2126 memset(rxkey->mac, ~0, ETH_ALEN); 2403 memset(rxkey->mac, ~0, ETH_ALEN);
2127 if (key->alg != ALG_TKIP) { 2404 if (key->alg != ALG_TKIP) {
@@ -2181,11 +2458,11 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
2181 BIT(NL80211_IFTYPE_MESH_POINT); 2458 BIT(NL80211_IFTYPE_MESH_POINT);
2182 2459
2183 dev->channel_change_time = 1000; /* TODO: find actual value */ 2460 dev->channel_change_time = 1000; /* TODO: find actual value */
2184 priv->tx_stats[0].limit = 1; /* Beacon queue */ 2461 priv->tx_stats[P54_QUEUE_BEACON].limit = 1;
2185 priv->tx_stats[1].limit = 1; /* Probe queue for HW scan */ 2462 priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1;
2186 priv->tx_stats[2].limit = 3; /* queue for MLMEs */ 2463 priv->tx_stats[P54_QUEUE_MGMT].limit = 3;
2187 priv->tx_stats[3].limit = 3; /* Broadcast / MC queue */ 2464 priv->tx_stats[P54_QUEUE_CAB].limit = 3;
2188 priv->tx_stats[4].limit = 5; /* Data */ 2465 priv->tx_stats[P54_QUEUE_DATA].limit = 5;
2189 dev->queues = 1; 2466 dev->queues = 1;
2190 priv->noise = -94; 2467 priv->noise = -94;
2191 /* 2468 /*
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index f5729de83fe..def23b1f49e 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -26,12 +26,17 @@ struct bootrec {
26} __attribute__((packed)); 26} __attribute__((packed));
27 27
28#define PDR_SYNTH_FRONTEND_MASK 0x0007 28#define PDR_SYNTH_FRONTEND_MASK 0x0007
29#define PDR_SYNTH_FRONTEND_DUETTE3 0x0001
30#define PDR_SYNTH_FRONTEND_DUETTE2 0x0002
31#define PDR_SYNTH_FRONTEND_FRISBEE 0x0003
32#define PDR_SYNTH_FRONTEND_XBOW 0x0004
33#define PDR_SYNTH_FRONTEND_LONGBOW 0x0005
29#define PDR_SYNTH_IQ_CAL_MASK 0x0018 34#define PDR_SYNTH_IQ_CAL_MASK 0x0018
30#define PDR_SYNTH_IQ_CAL_PA_DETECTOR 0x0000 35#define PDR_SYNTH_IQ_CAL_PA_DETECTOR 0x0000
31#define PDR_SYNTH_IQ_CAL_DISABLED 0x0008 36#define PDR_SYNTH_IQ_CAL_DISABLED 0x0008
32#define PDR_SYNTH_IQ_CAL_ZIF 0x0010 37#define PDR_SYNTH_IQ_CAL_ZIF 0x0010
33#define PDR_SYNTH_FAA_SWITCH_MASK 0x0020 38#define PDR_SYNTH_FAA_SWITCH_MASK 0x0020
34#define PDR_SYNTH_FAA_SWITCH_ENABLED 0x0001 39#define PDR_SYNTH_FAA_SWITCH_ENABLED 0x0020
35#define PDR_SYNTH_24_GHZ_MASK 0x0040 40#define PDR_SYNTH_24_GHZ_MASK 0x0040
36#define PDR_SYNTH_24_GHZ_DISABLED 0x0040 41#define PDR_SYNTH_24_GHZ_DISABLED 0x0040
37#define PDR_SYNTH_5_GHZ_MASK 0x0080 42#define PDR_SYNTH_5_GHZ_MASK 0x0080
@@ -125,9 +130,13 @@ struct eeprom_pda_wrap {
125 u8 data[0]; 130 u8 data[0];
126} __attribute__ ((packed)); 131} __attribute__ ((packed));
127 132
133struct p54_iq_autocal_entry {
134 __le16 iq_param[4];
135} __attribute__ ((packed));
136
128struct pda_iq_autocal_entry { 137struct pda_iq_autocal_entry {
129 __le16 freq; 138 __le16 freq;
130 __le16 iq_param[4]; 139 struct p54_iq_autocal_entry params;
131} __attribute__ ((packed)); 140} __attribute__ ((packed));
132 141
133struct pda_channel_output_limit { 142struct pda_channel_output_limit {
@@ -180,6 +189,35 @@ struct pda_rssi_cal_entry {
180 __le16 add; 189 __le16 add;
181} __attribute__ ((packed)); 190} __attribute__ ((packed));
182 191
192struct pda_country {
193 u8 regdomain;
194 u8 alpha2[2];
195 u8 flags;
196} __attribute__ ((packed));
197
198/*
199 * Warning: Longbow's structures are bogus.
200 */
201struct p54_channel_output_limit_longbow {
202 __le16 rf_power_points[12];
203} __attribute__ ((packed));
204
205struct p54_pa_curve_data_sample_longbow {
206 __le16 rf_power;
207 __le16 pa_detector;
208 struct {
209 __le16 data[4];
210 } points[3] __attribute__ ((packed));
211} __attribute__ ((packed));
212
213struct pda_custom_wrapper {
214 __le16 entries;
215 __le16 entry_size;
216 __le16 offset;
217 __le16 len;
218 u8 data[0];
219} __attribute__ ((packed));
220
183/* 221/*
184 * this defines the PDR codes used to build PDAs as defined in document 222 * this defines the PDR codes used to build PDAs as defined in document
185 * number 553155. The current implementation mirrors version 1.1 of the 223 * number 553155. The current implementation mirrors version 1.1 of the
@@ -225,8 +263,13 @@ struct pda_rssi_cal_entry {
225/* reserved range (0x2000 - 0x7fff) */ 263/* reserved range (0x2000 - 0x7fff) */
226 264
227/* customer range (0x8000 - 0xffff) */ 265/* customer range (0x8000 - 0xffff) */
228#define PDR_BASEBAND_REGISTERS 0x8000 266#define PDR_BASEBAND_REGISTERS 0x8000
229#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001 267#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001
268
269/* used by our modificated eeprom image */
270#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD
271#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF
272#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D
230 273
231/* PDR definitions for default country & country list */ 274/* PDR definitions for default country & country list */
232#define PDR_COUNTRY_CERT_CODE 0x80 275#define PDR_COUNTRY_CERT_CODE 0x80
@@ -241,12 +284,6 @@ struct pda_rssi_cal_entry {
241#define PDR_COUNTRY_CERT_IODOOR_OUTDOOR 0x30 284#define PDR_COUNTRY_CERT_IODOOR_OUTDOOR 0x30
242#define PDR_COUNTRY_CERT_INDEX 0x0F 285#define PDR_COUNTRY_CERT_INDEX 0x0F
243 286
244/* stored in skb->cb */
245struct memrecord {
246 u32 start_addr;
247 u32 end_addr;
248};
249
250struct p54_eeprom_lm86 { 287struct p54_eeprom_lm86 {
251 union { 288 union {
252 struct { 289 struct {
@@ -329,7 +366,7 @@ struct p54_frame_sent {
329 u8 padding; 366 u8 padding;
330} __attribute__ ((packed)); 367} __attribute__ ((packed));
331 368
332enum p54_tx_data_crypt { 369enum p54_tx_data_crypt {
333 P54_CRYPTO_NONE = 0, 370 P54_CRYPTO_NONE = 0,
334 P54_CRYPTO_WEP, 371 P54_CRYPTO_WEP,
335 P54_CRYPTO_TKIP, 372 P54_CRYPTO_TKIP,
@@ -340,6 +377,23 @@ enum p54_tx_data_crypt {
340 P54_CRYPTO_AESCCMP 377 P54_CRYPTO_AESCCMP
341}; 378};
342 379
380enum p54_tx_data_queue {
381 P54_QUEUE_BEACON = 0,
382 P54_QUEUE_FWSCAN = 1,
383 P54_QUEUE_MGMT = 2,
384 P54_QUEUE_CAB = 3,
385 P54_QUEUE_DATA = 4,
386
387 P54_QUEUE_AC_NUM = 4,
388 P54_QUEUE_AC_VO = 4,
389 P54_QUEUE_AC_VI = 5,
390 P54_QUEUE_AC_BE = 6,
391 P54_QUEUE_AC_BK = 7,
392
393 /* keep last */
394 P54_QUEUE_NUM = 8,
395};
396
343struct p54_tx_data { 397struct p54_tx_data {
344 u8 rateset[8]; 398 u8 rateset[8];
345 u8 rts_rate_idx; 399 u8 rts_rate_idx;
@@ -351,9 +405,18 @@ struct p54_tx_data {
351 u8 backlog; 405 u8 backlog;
352 __le16 durations[4]; 406 __le16 durations[4];
353 u8 tx_antenna; 407 u8 tx_antenna;
354 u8 output_power; 408 union {
355 u8 cts_rate; 409 struct {
356 u8 unalloc2[3]; 410 u8 cts_rate;
411 __le16 output_power;
412 } __attribute__((packed)) longbow;
413 struct {
414 u8 output_power;
415 u8 cts_rate;
416 u8 unalloc;
417 } __attribute__ ((packed)) normal;
418 } __attribute__ ((packed));
419 u8 unalloc2[2];
357 u8 align[0]; 420 u8 align[0];
358} __attribute__ ((packed)); 421} __attribute__ ((packed));
359 422
@@ -414,11 +477,14 @@ struct p54_setup_mac {
414#define P54_SCAN_ACTIVE BIT(2) 477#define P54_SCAN_ACTIVE BIT(2)
415#define P54_SCAN_FILTER BIT(3) 478#define P54_SCAN_FILTER BIT(3)
416 479
417struct p54_scan { 480struct p54_scan_head {
418 __le16 mode; 481 __le16 mode;
419 __le16 dwell; 482 __le16 dwell;
420 u8 padding1[20]; 483 u8 scan_params[20];
421 struct pda_iq_autocal_entry iq_autocal; 484 __le16 freq;
485} __attribute__ ((packed));
486
487struct p54_scan_body {
422 u8 pa_points_per_curve; 488 u8 pa_points_per_curve;
423 u8 val_barker; 489 u8 val_barker;
424 u8 val_bpsk; 490 u8 val_bpsk;
@@ -430,19 +496,23 @@ struct p54_scan {
430 u8 dup_qpsk; 496 u8 dup_qpsk;
431 u8 dup_16qam; 497 u8 dup_16qam;
432 u8 dup_64qam; 498 u8 dup_64qam;
433 union { 499} __attribute__ ((packed));
434 struct pda_rssi_cal_entry v1_rssi;
435 500
436 struct { 501struct p54_scan_body_longbow {
437 __le32 basic_rate_mask; 502 struct p54_channel_output_limit_longbow power_limits;
438 u8 rts_rates[8]; 503 struct p54_pa_curve_data_sample_longbow curve_data[8];
439 struct pda_rssi_cal_entry rssi; 504 __le16 unkn[6]; /* maybe more power_limits or rate_mask */
440 } v2 __attribute__ ((packed)); 505} __attribute__ ((packed));
441 } __attribute__ ((packed)); 506
507union p54_scan_body_union {
508 struct p54_scan_body normal;
509 struct p54_scan_body_longbow longbow;
442} __attribute__ ((packed)); 510} __attribute__ ((packed));
443 511
444#define P54_SCAN_V1_LEN 0x70 512struct p54_scan_tail_rate {
445#define P54_SCAN_V2_LEN 0x7c 513 __le32 basic_rate_mask;
514 u8 rts_rates[8];
515} __attribute__ ((packed));
446 516
447struct p54_led { 517struct p54_led {
448 __le16 mode; 518 __le16 mode;
@@ -511,6 +581,7 @@ struct p54_psm_interval {
511 __le16 periods; 581 __le16 periods;
512} __attribute__ ((packed)); 582} __attribute__ ((packed));
513 583
584#define P54_PSM_CAM 0
514#define P54_PSM BIT(0) 585#define P54_PSM BIT(0)
515#define P54_PSM_DTIM BIT(1) 586#define P54_PSM_DTIM BIT(1)
516#define P54_PSM_MCBC BIT(2) 587#define P54_PSM_MCBC BIT(2)
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index aa367a0ddc4..3f9a6b04ea9 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -79,6 +79,12 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
79 if (err) 79 if (err)
80 return err; 80 return err;
81 81
82 if (priv->common.fw_interface != FW_LM86) {
83 dev_err(&priv->pdev->dev, "wrong firmware, "
84 "please get a LM86(PCI) firmware a try again.\n");
85 return -EINVAL;
86 }
87
82 data = (__le32 *) priv->firmware->data; 88 data = (__le32 *) priv->firmware->data;
83 remains = priv->firmware->size; 89 remains = priv->firmware->size;
84 device_addr = ISL38XX_DEV_FIRMWARE_ADDR; 90 device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
new file mode 100644
index 00000000000..7fde243b3d5
--- /dev/null
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -0,0 +1,770 @@
1/*
2 * Copyright (C) 2008 Christian Lamparter <chunkeey@web.de>
3 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
4 *
5 * This driver is a port from stlc45xx:
6 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/interrupt.h>
26#include <linux/firmware.h>
27#include <linux/delay.h>
28#include <linux/irq.h>
29#include <linux/spi/spi.h>
30#include <linux/etherdevice.h>
31#include <linux/gpio.h>
32
33#include "p54spi.h"
34#include "p54spi_eeprom.h"
35#include "p54.h"
36
37#include "p54common.h"
38
39MODULE_FIRMWARE("3826.arm");
40MODULE_ALIAS("stlc45xx");
41
42/*
43 * gpios should be handled in board files and provided via platform data,
44 * but because it's currently impossible for p54spi to have a header file
45 * in include/linux, let's use module paramaters for now
46 */
47
48static int p54spi_gpio_power = 97;
49module_param(p54spi_gpio_power, int, 0444);
50MODULE_PARM_DESC(p54spi_gpio_power, "gpio number for power line");
51
52static int p54spi_gpio_irq = 87;
53module_param(p54spi_gpio_irq, int, 0444);
54MODULE_PARM_DESC(p54spi_gpio_irq, "gpio number for irq line");
55
56static void p54spi_spi_read(struct p54s_priv *priv, u8 address,
57 void *buf, size_t len)
58{
59 struct spi_transfer t[2];
60 struct spi_message m;
61 __le16 addr;
62
63 /* We first push the address */
64 addr = cpu_to_le16(address << 8 | SPI_ADRS_READ_BIT_15);
65
66 spi_message_init(&m);
67 memset(t, 0, sizeof(t));
68
69 t[0].tx_buf = &addr;
70 t[0].len = sizeof(addr);
71 spi_message_add_tail(&t[0], &m);
72
73 t[1].rx_buf = buf;
74 t[1].len = len;
75 spi_message_add_tail(&t[1], &m);
76
77 spi_sync(priv->spi, &m);
78}
79
80
81static void p54spi_spi_write(struct p54s_priv *priv, u8 address,
82 const void *buf, size_t len)
83{
84 struct spi_transfer t[3];
85 struct spi_message m;
86 __le16 addr;
87
88 /* We first push the address */
89 addr = cpu_to_le16(address << 8);
90
91 spi_message_init(&m);
92 memset(t, 0, sizeof(t));
93
94 t[0].tx_buf = &addr;
95 t[0].len = sizeof(addr);
96 spi_message_add_tail(&t[0], &m);
97
98 t[1].tx_buf = buf;
99 t[1].len = len;
100 spi_message_add_tail(&t[1], &m);
101
102 if (len % 2) {
103 __le16 last_word;
104 last_word = cpu_to_le16(((u8 *)buf)[len - 1]);
105
106 t[2].tx_buf = &last_word;
107 t[2].len = sizeof(last_word);
108 spi_message_add_tail(&t[2], &m);
109 }
110
111 spi_sync(priv->spi, &m);
112}
113
114static u16 p54spi_read16(struct p54s_priv *priv, u8 addr)
115{
116 __le16 val;
117
118 p54spi_spi_read(priv, addr, &val, sizeof(val));
119
120 return le16_to_cpu(val);
121}
122
123static u32 p54spi_read32(struct p54s_priv *priv, u8 addr)
124{
125 __le32 val;
126
127 p54spi_spi_read(priv, addr, &val, sizeof(val));
128
129 return le32_to_cpu(val);
130}
131
132static inline void p54spi_write16(struct p54s_priv *priv, u8 addr, __le16 val)
133{
134 p54spi_spi_write(priv, addr, &val, sizeof(val));
135}
136
137static inline void p54spi_write32(struct p54s_priv *priv, u8 addr, __le32 val)
138{
139 p54spi_spi_write(priv, addr, &val, sizeof(val));
140}
141
142struct p54spi_spi_reg {
143 u16 address; /* __le16 ? */
144 u16 length;
145 char *name;
146};
147
148static const struct p54spi_spi_reg p54spi_registers_array[] =
149{
150 { SPI_ADRS_ARM_INTERRUPTS, 32, "ARM_INT " },
151 { SPI_ADRS_ARM_INT_EN, 32, "ARM_INT_ENA " },
152 { SPI_ADRS_HOST_INTERRUPTS, 32, "HOST_INT " },
153 { SPI_ADRS_HOST_INT_EN, 32, "HOST_INT_ENA" },
154 { SPI_ADRS_HOST_INT_ACK, 32, "HOST_INT_ACK" },
155 { SPI_ADRS_GEN_PURP_1, 32, "GP1_COMM " },
156 { SPI_ADRS_GEN_PURP_2, 32, "GP2_COMM " },
157 { SPI_ADRS_DEV_CTRL_STAT, 32, "DEV_CTRL_STA" },
158 { SPI_ADRS_DMA_DATA, 16, "DMA_DATA " },
159 { SPI_ADRS_DMA_WRITE_CTRL, 16, "DMA_WR_CTRL " },
160 { SPI_ADRS_DMA_WRITE_LEN, 16, "DMA_WR_LEN " },
161 { SPI_ADRS_DMA_WRITE_BASE, 32, "DMA_WR_BASE " },
162 { SPI_ADRS_DMA_READ_CTRL, 16, "DMA_RD_CTRL " },
163 { SPI_ADRS_DMA_READ_LEN, 16, "DMA_RD_LEN " },
164 { SPI_ADRS_DMA_WRITE_BASE, 32, "DMA_RD_BASE " }
165};
166
167static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, __le32 bits)
168{
169 int i;
170 __le32 buffer;
171
172 for (i = 0; i < 2000; i++) {
173 p54spi_spi_read(priv, reg, &buffer, sizeof(buffer));
174 if (buffer == bits)
175 return 1;
176
177 msleep(1);
178 }
179 return 0;
180}
181
182static int p54spi_request_firmware(struct ieee80211_hw *dev)
183{
184 struct p54s_priv *priv = dev->priv;
185 int ret;
186
187 /* FIXME: should driver use it's own struct device? */
188 ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev);
189
190 if (ret < 0) {
191 dev_err(&priv->spi->dev, "request_firmware() failed: %d", ret);
192 return ret;
193 }
194
195 ret = p54_parse_firmware(dev, priv->firmware);
196 if (ret) {
197 release_firmware(priv->firmware);
198 return ret;
199 }
200
201 return 0;
202}
203
204static int p54spi_request_eeprom(struct ieee80211_hw *dev)
205{
206 struct p54s_priv *priv = dev->priv;
207 const struct firmware *eeprom;
208 int ret;
209
210 /*
211 * allow users to customize their eeprom.
212 */
213
214 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
215 if (ret < 0) {
216 dev_info(&priv->spi->dev, "loading default eeprom...\n");
217 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
218 sizeof(p54spi_eeprom));
219 } else {
220 dev_info(&priv->spi->dev, "loading user eeprom...\n");
221 ret = p54_parse_eeprom(dev, (void *) eeprom->data,
222 (int)eeprom->size);
223 release_firmware(eeprom);
224 }
225 return ret;
226}
227
228static int p54spi_upload_firmware(struct ieee80211_hw *dev)
229{
230 struct p54s_priv *priv = dev->priv;
231 unsigned long fw_len, fw_addr;
232 long _fw_len;
233
234 /* stop the device */
235 p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
236 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET |
237 SPI_CTRL_STAT_START_HALTED));
238
239 msleep(TARGET_BOOT_SLEEP);
240
241 p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
242 SPI_CTRL_STAT_HOST_OVERRIDE |
243 SPI_CTRL_STAT_START_HALTED));
244
245 msleep(TARGET_BOOT_SLEEP);
246
247 fw_addr = ISL38XX_DEV_FIRMWARE_ADDR;
248 fw_len = priv->firmware->size;
249
250 while (fw_len > 0) {
251 _fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE);
252
253 p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL,
254 cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE));
255
256 if (p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL,
257 cpu_to_le32(HOST_ALLOWED)) == 0) {
258 dev_err(&priv->spi->dev, "fw_upload not allowed "
259 "to DMA write.");
260 return -EAGAIN;
261 }
262
263 p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN,
264 cpu_to_le16(_fw_len));
265 p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE,
266 cpu_to_le32(fw_addr));
267
268 p54spi_spi_write(priv, SPI_ADRS_DMA_DATA,
269 &priv->firmware->data, _fw_len);
270
271 fw_len -= _fw_len;
272 fw_addr += _fw_len;
273
274 /* FIXME: I think this doesn't work if firmware is large,
275 * this loop goes to second round. fw->data is not
276 * increased at all! */
277 }
278
279 BUG_ON(fw_len != 0);
280
281 /* enable host interrupts */
282 p54spi_write32(priv, SPI_ADRS_HOST_INT_EN,
283 cpu_to_le32(SPI_HOST_INTS_DEFAULT));
284
285 /* boot the device */
286 p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
287 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET |
288 SPI_CTRL_STAT_RAM_BOOT));
289
290 msleep(TARGET_BOOT_SLEEP);
291
292 p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
293 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT));
294 msleep(TARGET_BOOT_SLEEP);
295 return 0;
296}
297
298static void p54spi_power_off(struct p54s_priv *priv)
299{
300 disable_irq(gpio_to_irq(p54spi_gpio_irq));
301 gpio_set_value(p54spi_gpio_power, 0);
302}
303
304static void p54spi_power_on(struct p54s_priv *priv)
305{
306 gpio_set_value(p54spi_gpio_power, 1);
307 enable_irq(gpio_to_irq(p54spi_gpio_irq));
308
309 /*
310 * need to wait a while before device can be accessed, the lenght
311 * is just a guess
312 */
313 msleep(10);
314}
315
316static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val)
317{
318 p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val));
319}
320
321static void p54spi_wakeup(struct p54s_priv *priv)
322{
323 unsigned long timeout;
324 u32 ints;
325
326 /* wake the chip */
327 p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS,
328 cpu_to_le32(SPI_TARGET_INT_WAKEUP));
329
330 /* And wait for the READY interrupt */
331 timeout = jiffies + HZ;
332
333 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
334 while (!(ints & SPI_HOST_INT_READY)) {
335 if (time_after(jiffies, timeout))
336 goto out;
337 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
338 }
339
340 p54spi_int_ack(priv, SPI_HOST_INT_READY);
341
342out:
343 return;
344}
345
346static inline void p54spi_sleep(struct p54s_priv *priv)
347{
348 p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS,
349 cpu_to_le32(SPI_TARGET_INT_SLEEP));
350}
351
352static void p54spi_int_ready(struct p54s_priv *priv)
353{
354 p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32(
355 SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE));
356
357 switch (priv->fw_state) {
358 case FW_STATE_BOOTING:
359 priv->fw_state = FW_STATE_READY;
360 complete(&priv->fw_comp);
361 break;
362 case FW_STATE_RESETTING:
363 priv->fw_state = FW_STATE_READY;
364 /* TODO: reinitialize state */
365 break;
366 default:
367 break;
368 }
369}
370
371static int p54spi_rx(struct p54s_priv *priv)
372{
373 struct sk_buff *skb;
374 u16 len;
375
376 p54spi_wakeup(priv);
377
378 /* dummy read to flush SPI DMA controller bug */
379 p54spi_read16(priv, SPI_ADRS_GEN_PURP_1);
380
381 len = p54spi_read16(priv, SPI_ADRS_DMA_DATA);
382
383 if (len == 0) {
384 dev_err(&priv->spi->dev, "rx request of zero bytes");
385 return 0;
386 }
387
388 skb = dev_alloc_skb(len);
389 if (!skb) {
390 dev_err(&priv->spi->dev, "could not alloc skb");
391 return 0;
392 }
393
394 p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, skb_put(skb, len), len);
395 p54spi_sleep(priv);
396
397 if (p54_rx(priv->hw, skb) == 0)
398 dev_kfree_skb(skb);
399
400 return 0;
401}
402
403
404static irqreturn_t p54spi_interrupt(int irq, void *config)
405{
406 struct spi_device *spi = config;
407 struct p54s_priv *priv = dev_get_drvdata(&spi->dev);
408
409 queue_work(priv->hw->workqueue, &priv->work);
410
411 return IRQ_HANDLED;
412}
413
414static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb)
415{
416 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
417 struct p54s_dma_regs dma_regs;
418 unsigned long timeout;
419 int ret = 0;
420 u32 ints;
421
422 p54spi_wakeup(priv);
423
424 dma_regs.cmd = cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE);
425 dma_regs.len = cpu_to_le16(skb->len);
426 dma_regs.addr = hdr->req_id;
427
428 p54spi_spi_write(priv, SPI_ADRS_DMA_WRITE_CTRL, &dma_regs,
429 sizeof(dma_regs));
430
431 p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, skb->data, skb->len);
432
433 timeout = jiffies + 2 * HZ;
434 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
435 while (!(ints & SPI_HOST_INT_WR_READY)) {
436 if (time_after(jiffies, timeout)) {
437 dev_err(&priv->spi->dev, "WR_READY timeout");
438 ret = -1;
439 goto out;
440 }
441 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
442 }
443
444 p54spi_int_ack(priv, SPI_HOST_INT_WR_READY);
445 p54spi_sleep(priv);
446
447out:
448 if (FREE_AFTER_TX(skb))
449 p54_free_skb(priv->hw, skb);
450 return ret;
451}
452
453static int p54spi_wq_tx(struct p54s_priv *priv)
454{
455 struct p54s_tx_info *entry;
456 struct sk_buff *skb;
457 struct ieee80211_tx_info *info;
458 struct p54_tx_info *minfo;
459 struct p54s_tx_info *dinfo;
460 int ret = 0;
461
462 spin_lock_bh(&priv->tx_lock);
463
464 while (!list_empty(&priv->tx_pending)) {
465 entry = list_entry(priv->tx_pending.next,
466 struct p54s_tx_info, tx_list);
467
468 list_del_init(&entry->tx_list);
469
470 spin_unlock_bh(&priv->tx_lock);
471
472 dinfo = container_of((void *) entry, struct p54s_tx_info,
473 tx_list);
474 minfo = container_of((void *) dinfo, struct p54_tx_info,
475 data);
476 info = container_of((void *) minfo, struct ieee80211_tx_info,
477 rate_driver_data);
478 skb = container_of((void *) info, struct sk_buff, cb);
479
480 ret = p54spi_tx_frame(priv, skb);
481
482 spin_lock_bh(&priv->tx_lock);
483
484 if (ret < 0) {
485 p54_free_skb(priv->hw, skb);
486 goto out;
487 }
488 }
489
490out:
491 spin_unlock_bh(&priv->tx_lock);
492 return ret;
493}
494
495static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
496{
497 struct p54s_priv *priv = dev->priv;
498 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
499 struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data;
500 struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data;
501
502 BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data)));
503
504 spin_lock_bh(&priv->tx_lock);
505 list_add_tail(&di->tx_list, &priv->tx_pending);
506 spin_unlock_bh(&priv->tx_lock);
507
508 queue_work(priv->hw->workqueue, &priv->work);
509}
510
511static void p54spi_work(struct work_struct *work)
512{
513 struct p54s_priv *priv = container_of(work, struct p54s_priv, work);
514 u32 ints;
515 int ret;
516
517 mutex_lock(&priv->mutex);
518
519 if (priv->fw_state == FW_STATE_OFF &&
520 priv->fw_state == FW_STATE_RESET)
521 goto out;
522
523 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
524
525 if (ints & SPI_HOST_INT_READY) {
526 p54spi_int_ready(priv);
527 p54spi_int_ack(priv, SPI_HOST_INT_READY);
528 }
529
530 if (priv->fw_state != FW_STATE_READY)
531 goto out;
532
533 if (ints & SPI_HOST_INT_UPDATE) {
534 p54spi_int_ack(priv, SPI_HOST_INT_UPDATE);
535 ret = p54spi_rx(priv);
536 if (ret < 0)
537 goto out;
538 }
539 if (ints & SPI_HOST_INT_SW_UPDATE) {
540 p54spi_int_ack(priv, SPI_HOST_INT_SW_UPDATE);
541 ret = p54spi_rx(priv);
542 if (ret < 0)
543 goto out;
544 }
545
546 ret = p54spi_wq_tx(priv);
547 if (ret < 0)
548 goto out;
549
550 ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
551
552out:
553 mutex_unlock(&priv->mutex);
554}
555
556static int p54spi_op_start(struct ieee80211_hw *dev)
557{
558 struct p54s_priv *priv = dev->priv;
559 unsigned long timeout;
560 int ret = 0;
561
562 if (mutex_lock_interruptible(&priv->mutex)) {
563 ret = -EINTR;
564 goto out;
565 }
566
567 priv->fw_state = FW_STATE_BOOTING;
568
569 p54spi_power_on(priv);
570
571 ret = p54spi_upload_firmware(dev);
572 if (ret < 0) {
573 p54spi_power_off(priv);
574 goto out_unlock;
575 }
576
577 mutex_unlock(&priv->mutex);
578
579 timeout = msecs_to_jiffies(2000);
580 timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp,
581 timeout);
582 if (!timeout) {
583 dev_err(&priv->spi->dev, "firmware boot failed");
584 p54spi_power_off(priv);
585 ret = -1;
586 goto out;
587 }
588
589 if (mutex_lock_interruptible(&priv->mutex)) {
590 ret = -EINTR;
591 p54spi_power_off(priv);
592 goto out;
593 }
594
595 WARN_ON(priv->fw_state != FW_STATE_READY);
596
597out_unlock:
598 mutex_unlock(&priv->mutex);
599
600out:
601 return ret;
602}
603
604static void p54spi_op_stop(struct ieee80211_hw *dev)
605{
606 struct p54s_priv *priv = dev->priv;
607
608 if (mutex_lock_interruptible(&priv->mutex)) {
609 /* FIXME: how to handle this error? */
610 return;
611 }
612
613 WARN_ON(priv->fw_state != FW_STATE_READY);
614
615 cancel_work_sync(&priv->work);
616
617 p54spi_power_off(priv);
618 spin_lock_bh(&priv->tx_lock);
619 INIT_LIST_HEAD(&priv->tx_pending);
620 spin_unlock_bh(&priv->tx_lock);
621
622 priv->fw_state = FW_STATE_OFF;
623 mutex_unlock(&priv->mutex);
624}
625
626static int __devinit p54spi_probe(struct spi_device *spi)
627{
628 struct p54s_priv *priv = NULL;
629 struct ieee80211_hw *hw;
630 int ret = -EINVAL;
631
632 hw = p54_init_common(sizeof(*priv));
633 if (!hw) {
634 dev_err(&priv->spi->dev, "could not alloc ieee80211_hw");
635 return -ENOMEM;
636 }
637
638 priv = hw->priv;
639 priv->hw = hw;
640 dev_set_drvdata(&spi->dev, priv);
641 priv->spi = spi;
642
643 spi->bits_per_word = 16;
644 spi->max_speed_hz = 24000000;
645
646 ret = spi_setup(spi);
647 if (ret < 0) {
648 dev_err(&priv->spi->dev, "spi_setup failed");
649 goto err_free_common;
650 }
651
652 ret = gpio_request(p54spi_gpio_power, "p54spi power");
653 if (ret < 0) {
654 dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
655 goto err_free_common;
656 }
657
658 ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
659 if (ret < 0) {
660 dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
661 goto err_free_common;
662 }
663
664 gpio_direction_output(p54spi_gpio_power, 0);
665 gpio_direction_input(p54spi_gpio_irq);
666
667 ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
668 p54spi_interrupt, IRQF_DISABLED, "p54spi",
669 priv->spi);
670 if (ret < 0) {
671 dev_err(&priv->spi->dev, "request_irq() failed");
672 goto err_free_common;
673 }
674
675 set_irq_type(gpio_to_irq(p54spi_gpio_irq),
676 IRQ_TYPE_EDGE_RISING);
677
678 disable_irq(gpio_to_irq(p54spi_gpio_irq));
679
680 INIT_WORK(&priv->work, p54spi_work);
681 init_completion(&priv->fw_comp);
682 INIT_LIST_HEAD(&priv->tx_pending);
683 mutex_init(&priv->mutex);
684 SET_IEEE80211_DEV(hw, &spi->dev);
685 priv->common.open = p54spi_op_start;
686 priv->common.stop = p54spi_op_stop;
687 priv->common.tx = p54spi_op_tx;
688
689 ret = p54spi_request_firmware(hw);
690 if (ret < 0)
691 goto err_free_common;
692
693 ret = p54spi_request_eeprom(hw);
694 if (ret)
695 goto err_free_common;
696
697 ret = ieee80211_register_hw(hw);
698 if (ret) {
699 dev_err(&priv->spi->dev, "unable to register "
700 "mac80211 hw: %d", ret);
701 goto err_free_common;
702 }
703
704 dev_info(&priv->spi->dev, "device is bound to %s\n",
705 wiphy_name(hw->wiphy));
706 return 0;
707
708err_free_common:
709 p54_free_common(priv->hw);
710 return ret;
711}
712
713static int __devexit p54spi_remove(struct spi_device *spi)
714{
715 struct p54s_priv *priv = dev_get_drvdata(&spi->dev);
716
717 ieee80211_unregister_hw(priv->hw);
718
719 free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
720
721 gpio_free(p54spi_gpio_power);
722 gpio_free(p54spi_gpio_irq);
723 release_firmware(priv->firmware);
724
725 mutex_destroy(&priv->mutex);
726
727 p54_free_common(priv->hw);
728 ieee80211_free_hw(priv->hw);
729
730 return 0;
731}
732
733
734static struct spi_driver p54spi_driver = {
735 .driver = {
736 /* use cx3110x name because board-n800.c uses that for the
737 * SPI port */
738 .name = "cx3110x",
739 .bus = &spi_bus_type,
740 .owner = THIS_MODULE,
741 },
742
743 .probe = p54spi_probe,
744 .remove = __devexit_p(p54spi_remove),
745};
746
747static int __init p54spi_init(void)
748{
749 int ret;
750
751 ret = spi_register_driver(&p54spi_driver);
752 if (ret < 0) {
753 printk(KERN_ERR "failed to register SPI driver: %d", ret);
754 goto out;
755 }
756
757out:
758 return ret;
759}
760
761static void __exit p54spi_exit(void)
762{
763 spi_unregister_driver(&p54spi_driver);
764}
765
766module_init(p54spi_init);
767module_exit(p54spi_exit);
768
769MODULE_LICENSE("GPL");
770MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h
new file mode 100644
index 00000000000..7fbe8d8fc67
--- /dev/null
+++ b/drivers/net/wireless/p54/p54spi.h
@@ -0,0 +1,125 @@
1/*
2 * Copyright (C) 2008 Christian Lamparter <chunkeey@web.de>
3 *
4 * This driver is a port from stlc45xx:
5 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21
22#ifndef P54SPI_H
23#define P54SPI_H
24
25#include <linux/mutex.h>
26#include <linux/list.h>
27#include <net/mac80211.h>
28
29#include "p54.h"
30
31/* Bit 15 is read/write bit; ON = READ, OFF = WRITE */
32#define SPI_ADRS_READ_BIT_15 0x8000
33
34#define SPI_ADRS_ARM_INTERRUPTS 0x00
35#define SPI_ADRS_ARM_INT_EN 0x04
36
37#define SPI_ADRS_HOST_INTERRUPTS 0x08
38#define SPI_ADRS_HOST_INT_EN 0x0c
39#define SPI_ADRS_HOST_INT_ACK 0x10
40
41#define SPI_ADRS_GEN_PURP_1 0x14
42#define SPI_ADRS_GEN_PURP_2 0x18
43
44#define SPI_ADRS_DEV_CTRL_STAT 0x26 /* high word */
45
46#define SPI_ADRS_DMA_DATA 0x28
47
48#define SPI_ADRS_DMA_WRITE_CTRL 0x2c
49#define SPI_ADRS_DMA_WRITE_LEN 0x2e
50#define SPI_ADRS_DMA_WRITE_BASE 0x30
51
52#define SPI_ADRS_DMA_READ_CTRL 0x34
53#define SPI_ADRS_DMA_READ_LEN 0x36
54#define SPI_ADRS_DMA_READ_BASE 0x38
55
56#define SPI_CTRL_STAT_HOST_OVERRIDE 0x8000
57#define SPI_CTRL_STAT_START_HALTED 0x4000
58#define SPI_CTRL_STAT_RAM_BOOT 0x2000
59#define SPI_CTRL_STAT_HOST_RESET 0x1000
60#define SPI_CTRL_STAT_HOST_CPU_EN 0x0800
61
62#define SPI_DMA_WRITE_CTRL_ENABLE 0x0001
63#define SPI_DMA_READ_CTRL_ENABLE 0x0001
64#define HOST_ALLOWED (1 << 7)
65
66#define SPI_TIMEOUT 100 /* msec */
67
68#define SPI_MAX_TX_PACKETS 32
69
70#define SPI_MAX_PACKET_SIZE 32767
71
72#define SPI_TARGET_INT_WAKEUP 0x00000001
73#define SPI_TARGET_INT_SLEEP 0x00000002
74#define SPI_TARGET_INT_RDDONE 0x00000004
75
76#define SPI_TARGET_INT_CTS 0x00004000
77#define SPI_TARGET_INT_DR 0x00008000
78
79#define SPI_HOST_INT_READY 0x00000001
80#define SPI_HOST_INT_WR_READY 0x00000002
81#define SPI_HOST_INT_SW_UPDATE 0x00000004
82#define SPI_HOST_INT_UPDATE 0x10000000
83
84/* clear to send */
85#define SPI_HOST_INT_CR 0x00004000
86
87/* data ready */
88#define SPI_HOST_INT_DR 0x00008000
89
90#define SPI_HOST_INTS_DEFAULT \
91 (SPI_HOST_INT_READY | SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE)
92
93#define TARGET_BOOT_SLEEP 50
94
95struct p54s_dma_regs {
96 __le16 cmd;
97 __le16 len;
98 __le32 addr;
99} __attribute__ ((packed));
100
101struct p54s_tx_info {
102 struct list_head tx_list;
103};
104
105struct p54s_priv {
106 /* p54_common has to be the first entry */
107 struct p54_common common;
108 struct ieee80211_hw *hw;
109 struct spi_device *spi;
110
111 struct work_struct work;
112
113 struct mutex mutex;
114 struct completion fw_comp;
115
116 spinlock_t tx_lock;
117
118 /* protected by tx_lock */
119 struct list_head tx_pending;
120
121 enum fw_state fw_state;
122 const struct firmware *firmware;
123};
124
125#endif /* P54SPI_H */
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
new file mode 100644
index 00000000000..1ea1050911d
--- /dev/null
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -0,0 +1,678 @@
1/*
2 * Copyright (C) 2003 Conexant Americas Inc. All Rights Reserved.
3 * Copyright (C) 2004, 2005, 2006 Nokia Corporation
4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2008 Christian Lamparter <chunkeey@web.de>
6 *
7 * based on:
8 * - cx3110x's pda.h from Nokia
9 * - cx3110-transfer.log by Johannes Berg
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#ifndef P54SPI_EEPROM_H
27#define P54SPI_EEPROM_H
28
29static unsigned char p54spi_eeprom[] = {
30
31/* struct eeprom_pda_wrap */
320x47, 0x4d, 0x55, 0xaa, /* magic */
330x00, 0x00, /* pad */
340x00, 0x00, /* eeprom_pda_data_wrap length */
350x00, 0x00, 0x00, 0x00, /* arm opcode */
36
37/* bogus MAC address */
380x04, 0x00, 0x01, 0x01, /* PDR_MAC_ADDRESS */
39 0x00, 0x02, 0xee, 0xc0, 0xff, 0xee,
40
41/* struct bootrec_exp_if */
420x06, 0x00, 0x01, 0x10, /* PDR_INTERFACE_LIST */
43 0x00, 0x00, /* role */
44 0x0f, 0x00, /* if_id */
45 0x85, 0x00, /* variant = Longbow RF, 2GHz */
46 0x01, 0x00, /* btm_compat */
47 0x1f, 0x00, /* top_compat */
48
490x03, 0x00, 0x02, 0x10, /* PDR_HARDWARE_PLATFORM_COMPONENT_ID */
50 0x03, 0x20, 0x00, 0x43,
51
52/* struct pda_country[6] */
530x0d, 0x00, 0x07, 0x10, /* PDR_COUNTRY_LIST */
54 0x10, 0x00, 0x00, 0x00,
55 0x20, 0x00, 0x00, 0x00,
56 0x30, 0x00, 0x00, 0x00,
57 0x31, 0x00, 0x00, 0x00,
58 0x32, 0x00, 0x00, 0x00,
59 0x40, 0x00, 0x00, 0x00,
60
61/* struct pda_country */
620x03, 0x00, 0x08, 0x10, /* PDR_DEFAULT_COUNTRY */
63 0x30, 0x00, 0x00, 0x00, /* ETSI */
64
650x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */
66 0x08, 0x08, 0x08, 0x08,
67
680x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */
69 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71
72/* struct pda_custom_wrapper */
730x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
74 0x0d, 0x00, 0xee, 0x00, /* 13 entries, 238 bytes per entry */
75 0x00, 0x00, 0x16, 0x0c, /* no offset, 3094 total len */
76 /* 2412 MHz */
77 0x6c, 0x09,
78 0x10, 0x01, 0x9a, 0x84,
79 0xaa, 0x8a, 0xaa, 0x8a, 0xaa, 0x8a, 0xaa, 0x8a,
80 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6,
81 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6,
82 0xf0, 0x00, 0x94, 0x6c,
83 0x99, 0x82, 0x99, 0x82, 0x99, 0x82, 0x99, 0x82,
84 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae,
85 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae,
86 0xd0, 0x00, 0xaa, 0x5a,
87 0x88, 0x7a, 0x88, 0x7a, 0x88, 0x7a, 0x88, 0x7a,
88 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6,
89 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6,
90 0xa0, 0x00, 0xf3, 0x47,
91 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e,
92 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a,
93 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a,
94 0x50, 0x00, 0x59, 0x36,
95 0x43, 0x5a, 0x43, 0x5a, 0x43, 0x5a, 0x43, 0x5a,
96 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85,
97 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85,
98 0x00, 0x00, 0xe4, 0x2d,
99 0x18, 0x46, 0x18, 0x46, 0x18, 0x46, 0x18, 0x46,
100 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71,
101 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71,
102 0x00, 0x80, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
105 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x80, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
112
113 /* 2417 MHz */
114 0x71, 0x09,
115 0x10, 0x01, 0xb9, 0x83,
116 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a,
117 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6,
118 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6,
119 0xf0, 0x00, 0x2e, 0x6c,
120 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, 0x68, 0x82,
121 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad,
122 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad,
123 0xd0, 0x00, 0x8d, 0x5a,
124 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a,
125 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5,
126 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5,
127 0xa0, 0x00, 0x0a, 0x48,
128 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e,
129 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99,
130 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99,
131 0x50, 0x00, 0x7c, 0x36,
132 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59,
133 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85,
134 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85,
135 0x00, 0x00, 0xf5, 0x2d,
136 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45,
137 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71,
138 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71,
139 0x00, 0x80, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x80, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
149
150 /* 2422 MHz */
151 0x76, 0x09,
152 0x10, 0x01, 0xb9, 0x83,
153 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a,
154 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6,
155 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6,
156 0xf0, 0x00, 0x2e, 0x6c,
157 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, 0x68, 0x82,
158 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad,
159 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad,
160 0xd0, 0x00, 0x8d, 0x5a,
161 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a,
162 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5,
163 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5,
164 0xa0, 0x00, 0x0a, 0x48,
165 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e,
166 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99,
167 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99,
168 0x50, 0x00, 0x7c, 0x36,
169 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59,
170 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85,
171 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85,
172 0x00, 0x00, 0xf5, 0x2d,
173 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45,
174 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71,
175 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71,
176 0x00, 0x80, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x80, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
184 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
186
187 /* 2427 MHz */
188 0x7b, 0x09,
189 0x10, 0x01, 0x48, 0x83,
190 0x67, 0x8a, 0x67, 0x8a, 0x67, 0x8a, 0x67, 0x8a,
191 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5,
192 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5,
193 0xf0, 0x00, 0xfb, 0x6b,
194 0x50, 0x82, 0x50, 0x82, 0x50, 0x82, 0x50, 0x82,
195 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad,
196 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad,
197 0xd0, 0x00, 0x7e, 0x5a,
198 0x38, 0x7a, 0x38, 0x7a, 0x38, 0x7a, 0x38, 0x7a,
199 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5,
200 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5,
201 0xa0, 0x00, 0x15, 0x48,
202 0x14, 0x6e, 0x14, 0x6e, 0x14, 0x6e, 0x14, 0x6e,
203 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99,
204 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99,
205 0x50, 0x00, 0x8e, 0x36,
206 0xd9, 0x59, 0xd9, 0x59, 0xd9, 0x59, 0xd9, 0x59,
207 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85,
208 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85,
209 0x00, 0x00, 0xfe, 0x2d,
210 0x9d, 0x45, 0x9d, 0x45, 0x9d, 0x45, 0x9d, 0x45,
211 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71,
212 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71,
213 0x00, 0x80, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
217 0x00, 0x80, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
223
224 /* 2432 MHz */
225 0x80, 0x09,
226 0x10, 0x01, 0xd7, 0x82,
227 0x51, 0x8a, 0x51, 0x8a, 0x51, 0x8a, 0x51, 0x8a,
228 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5,
229 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5,
230 0xf0, 0x00, 0xc8, 0x6b,
231 0x37, 0x82, 0x37, 0x82, 0x37, 0x82, 0x37, 0x82,
232 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad,
233 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad,
234 0xd0, 0x00, 0x6f, 0x5a,
235 0x1d, 0x7a, 0x1d, 0x7a, 0x1d, 0x7a, 0x1d, 0x7a,
236 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5,
237 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5,
238 0xa0, 0x00, 0x20, 0x48,
239 0xf6, 0x6d, 0xf6, 0x6d, 0xf6, 0x6d, 0xf6, 0x6d,
240 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, 0x88, 0x99,
241 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, 0x88, 0x99,
242 0x50, 0x00, 0x9f, 0x36,
243 0xb5, 0x59, 0xb5, 0x59, 0xb5, 0x59, 0xb5, 0x59,
244 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, 0x47, 0x85,
245 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, 0x47, 0x85,
246 0x00, 0x00, 0x06, 0x2e,
247 0x74, 0x45, 0x74, 0x45, 0x74, 0x45, 0x74, 0x45,
248 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, 0x06, 0x71,
249 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, 0x06, 0x71,
250 0x00, 0x80, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
254 0x00, 0x80, 0x00, 0x00,
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
260
261 /* 2437 MHz */
262 0x85, 0x09,
263 0x10, 0x01, 0x67, 0x82,
264 0x3a, 0x8a, 0x3a, 0x8a, 0x3a, 0x8a, 0x3a, 0x8a,
265 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5,
266 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5,
267 0xf0, 0x00, 0x95, 0x6b,
268 0x1f, 0x82, 0x1f, 0x82, 0x1f, 0x82, 0x1f, 0x82,
269 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad,
270 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad,
271 0xd0, 0x00, 0x61, 0x5a,
272 0x02, 0x7a, 0x02, 0x7a, 0x02, 0x7a, 0x02, 0x7a,
273 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5,
274 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5,
275 0xa0, 0x00, 0x2c, 0x48,
276 0xd8, 0x6d, 0xd8, 0x6d, 0xd8, 0x6d, 0xd8, 0x6d,
277 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99,
278 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99,
279 0x50, 0x00, 0xb1, 0x36,
280 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, 0x92, 0x59,
281 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85,
282 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85,
283 0x00, 0x00, 0x0f, 0x2e,
284 0x4b, 0x45, 0x4b, 0x45, 0x4b, 0x45, 0x4b, 0x45,
285 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70,
286 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70,
287 0x00, 0x80, 0x00, 0x00,
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x00, 0x80, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
297
298 /* 2442 MHz */
299 0x8a, 0x09,
300 0x10, 0x01, 0xf6, 0x81,
301 0x24, 0x8a, 0x24, 0x8a, 0x24, 0x8a, 0x24, 0x8a,
302 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5,
303 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5,
304 0xf0, 0x00, 0x62, 0x6b,
305 0x06, 0x82, 0x06, 0x82, 0x06, 0x82, 0x06, 0x82,
306 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, 0x98, 0xad,
307 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, 0x98, 0xad,
308 0xd0, 0x00, 0x52, 0x5a,
309 0xe7, 0x79, 0xe7, 0x79, 0xe7, 0x79, 0xe7, 0x79,
310 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5,
311 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5,
312 0xa0, 0x00, 0x37, 0x48,
313 0xba, 0x6d, 0xba, 0x6d, 0xba, 0x6d, 0xba, 0x6d,
314 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99,
315 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99,
316 0x50, 0x00, 0xc2, 0x36,
317 0x6e, 0x59, 0x6e, 0x59, 0x6e, 0x59, 0x6e, 0x59,
318 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, 0x00, 0x85,
319 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, 0x00, 0x85,
320 0x00, 0x00, 0x17, 0x2e,
321 0x22, 0x45, 0x22, 0x45, 0x22, 0x45, 0x22, 0x45,
322 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70,
323 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70,
324 0x00, 0x80, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x80, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
334
335 /* 2447 MHz */
336 0x8f, 0x09,
337 0x10, 0x01, 0x75, 0x83,
338 0x61, 0x8a, 0x61, 0x8a, 0x61, 0x8a, 0x61, 0x8a,
339 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5,
340 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5,
341 0xf0, 0x00, 0x4b, 0x6c,
342 0x3f, 0x82, 0x3f, 0x82, 0x3f, 0x82, 0x3f, 0x82,
343 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad,
344 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad,
345 0xd0, 0x00, 0xda, 0x5a,
346 0x1c, 0x7a, 0x1c, 0x7a, 0x1c, 0x7a, 0x1c, 0x7a,
347 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5,
348 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5,
349 0xa0, 0x00, 0x6d, 0x48,
350 0xe9, 0x6d, 0xe9, 0x6d, 0xe9, 0x6d, 0xe9, 0x6d,
351 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99,
352 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99,
353 0x50, 0x00, 0xc6, 0x36,
354 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, 0x92, 0x59,
355 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85,
356 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85,
357 0x00, 0x00, 0x15, 0x2e,
358 0x3c, 0x45, 0x3c, 0x45, 0x3c, 0x45, 0x3c, 0x45,
359 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, 0xce, 0x70,
360 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, 0xce, 0x70,
361 0x00, 0x80, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x80, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
371
372 /* 2452 MHz */
373 0x94, 0x09,
374 0x10, 0x01, 0xf4, 0x84,
375 0x9e, 0x8a, 0x9e, 0x8a, 0x9e, 0x8a, 0x9e, 0x8a,
376 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6,
377 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6,
378 0xf0, 0x00, 0x34, 0x6d,
379 0x77, 0x82, 0x77, 0x82, 0x77, 0x82, 0x77, 0x82,
380 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, 0x09, 0xae,
381 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, 0x09, 0xae,
382 0xd0, 0x00, 0x62, 0x5b,
383 0x50, 0x7a, 0x50, 0x7a, 0x50, 0x7a, 0x50, 0x7a,
384 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5,
385 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5,
386 0xa0, 0x00, 0xa2, 0x48,
387 0x17, 0x6e, 0x17, 0x6e, 0x17, 0x6e, 0x17, 0x6e,
388 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99,
389 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99,
390 0x50, 0x00, 0xc9, 0x36,
391 0xb7, 0x59, 0xb7, 0x59, 0xb7, 0x59, 0xb7, 0x59,
392 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, 0x49, 0x85,
393 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, 0x49, 0x85,
394 0x00, 0x00, 0x12, 0x2e,
395 0x57, 0x45, 0x57, 0x45, 0x57, 0x45, 0x57, 0x45,
396 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70,
397 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70,
398 0x00, 0x80, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x80, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
408
409 /* 2452 MHz */
410 0x99, 0x09,
411 0x10, 0x01, 0x74, 0x86,
412 0xdb, 0x8a, 0xdb, 0x8a, 0xdb, 0x8a, 0xdb, 0x8a,
413 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6,
414 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6,
415 0xf0, 0x00, 0x1e, 0x6e,
416 0xb0, 0x82, 0xb0, 0x82, 0xb0, 0x82, 0xb0, 0x82,
417 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, 0x42, 0xae,
418 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, 0x42, 0xae,
419 0xd0, 0x00, 0xeb, 0x5b,
420 0x85, 0x7a, 0x85, 0x7a, 0x85, 0x7a, 0x85, 0x7a,
421 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6,
422 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6,
423 0xa0, 0x00, 0xd8, 0x48,
424 0x46, 0x6e, 0x46, 0x6e, 0x46, 0x6e, 0x46, 0x6e,
425 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99,
426 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99,
427 0x50, 0x00, 0xcd, 0x36,
428 0xdb, 0x59, 0xdb, 0x59, 0xdb, 0x59, 0xdb, 0x59,
429 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85,
430 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85,
431 0x00, 0x00, 0x10, 0x2e,
432 0x71, 0x45, 0x71, 0x45, 0x71, 0x45, 0x71, 0x45,
433 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, 0x03, 0x71,
434 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, 0x03, 0x71,
435 0x00, 0x80, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x80, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
445
446 /* 2557 MHz */
447 0x9e, 0x09,
448 0x10, 0x01, 0xf3, 0x87,
449 0x17, 0x8b, 0x17, 0x8b, 0x17, 0x8b, 0x17, 0x8b,
450 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6,
451 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6,
452 0xf0, 0x00, 0x07, 0x6f,
453 0xe9, 0x82, 0xe9, 0x82, 0xe9, 0x82, 0xe9, 0x82,
454 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae,
455 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae,
456 0xd0, 0x00, 0x73, 0x5c,
457 0xba, 0x7a, 0xba, 0x7a, 0xba, 0x7a, 0xba, 0x7a,
458 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6,
459 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6,
460 0xa0, 0x00, 0x0d, 0x49,
461 0x74, 0x6e, 0x74, 0x6e, 0x74, 0x6e, 0x74, 0x6e,
462 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a,
463 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a,
464 0x50, 0x00, 0xd1, 0x36,
465 0xff, 0x59, 0xff, 0x59, 0xff, 0x59, 0xff, 0x59,
466 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, 0x91, 0x85,
467 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, 0x91, 0x85,
468 0x00, 0x00, 0x0e, 0x2e,
469 0x8b, 0x45, 0x8b, 0x45, 0x8b, 0x45, 0x8b, 0x45,
470 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71,
471 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71,
472 0x00, 0x80, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x80, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
482
483 /* 2562 MHz */
484 0xa3, 0x09,
485 0x10, 0x01, 0x72, 0x89,
486 0x54, 0x8b, 0x54, 0x8b, 0x54, 0x8b, 0x54, 0x8b,
487 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6,
488 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6,
489 0xf0, 0x00, 0xf0, 0x6f,
490 0x21, 0x83, 0x21, 0x83, 0x21, 0x83, 0x21, 0x83,
491 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae,
492 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae,
493 0xd0, 0x00, 0xfb, 0x5c,
494 0xee, 0x7a, 0xee, 0x7a, 0xee, 0x7a, 0xee, 0x7a,
495 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6,
496 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6,
497 0xa0, 0x00, 0x43, 0x49,
498 0xa3, 0x6e, 0xa3, 0x6e, 0xa3, 0x6e, 0xa3, 0x6e,
499 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a,
500 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a,
501 0x50, 0x00, 0xd4, 0x36,
502 0x24, 0x5a, 0x24, 0x5a, 0x24, 0x5a, 0x24, 0x5a,
503 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85,
504 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85,
505 0x00, 0x00, 0x0b, 0x2e,
506 0xa6, 0x45, 0xa6, 0x45, 0xa6, 0x45, 0xa6, 0x45,
507 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, 0x38, 0x71,
508 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, 0x38, 0x71,
509 0x00, 0x80, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x80, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
519
520 /* 2572 MHz */
521 0xa8, 0x09,
522 0x10, 0x01, 0xf1, 0x8a,
523 0x91, 0x8b, 0x91, 0x8b, 0x91, 0x8b, 0x91, 0x8b,
524 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7,
525 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7,
526 0xf0, 0x00, 0xd9, 0x70,
527 0x5a, 0x83, 0x5a, 0x83, 0x5a, 0x83, 0x5a, 0x83,
528 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, 0xec, 0xae,
529 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, 0xec, 0xae,
530 0xd0, 0x00, 0x83, 0x5d,
531 0x23, 0x7b, 0x23, 0x7b, 0x23, 0x7b, 0x23, 0x7b,
532 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6,
533 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6,
534 0xa0, 0x00, 0x78, 0x49,
535 0xd1, 0x6e, 0xd1, 0x6e, 0xd1, 0x6e, 0xd1, 0x6e,
536 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a,
537 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a,
538 0x50, 0x00, 0xd8, 0x36,
539 0x48, 0x5a, 0x48, 0x5a, 0x48, 0x5a, 0x48, 0x5a,
540 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, 0xda, 0x85,
541 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, 0xda, 0x85,
542 0x00, 0x00, 0x09, 0x2e,
543 0xc0, 0x45, 0xc0, 0x45, 0xc0, 0x45, 0xc0, 0x45,
544 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, 0x52, 0x71,
545 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, 0x52, 0x71,
546 0x00, 0x80, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x80, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, 0x00, 0x00,
555 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
556
557/*
558 * Not really sure if this is actually the power_limit database,
559 * it looks a bit "related" to PDR_PRISM_ZIF_TX_IQ_CALIBRATION
560 */
561/* struct pda_custom_wrapper */
5620xae, 0x00, 0xef, 0xbe, /* PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM */
563 0x0d, 0x00, 0x1a, 0x00, /* 13 entries, 26 bytes per entry */
564 0x00, 0x00, 0x52, 0x01, /* no offset, 338 bytes total */
565
566 /* 2412 MHz */
567 0x6c, 0x09,
568 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
569 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00,
570 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
571
572 /* 2417 MHz */
573 0x71, 0x09,
574 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
575 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
576 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
577
578 /* 2422 MHz */
579 0x76, 0x09,
580 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
581 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
582 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
583
584 /* 2427 MHz */
585 0x7b, 0x09,
586 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
587 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
588 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
589
590 /* 2432 MHz */
591 0x80, 0x09,
592 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
593 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
594 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
595
596 /* 2437 MHz */
597 0x85, 0x09,
598 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
599 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
600 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
601
602 /* 2442 MHz */
603 0x8a, 0x09,
604 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
605 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
606 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
607
608 /* 2447 MHz */
609 0x8f, 0x09,
610 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
611 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
612 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
613
614 /* 2452 MHz */
615 0x94, 0x09,
616 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
617 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
618 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
619
620 /* 2457 MHz */
621 0x99, 0x09,
622 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
623 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
624 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
625
626 /* 2462 MHz */
627 0x9e, 0x09,
628 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
629 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
630 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
631
632 /* 2467 MHz */
633 0xa3, 0x09,
634 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
635 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
636 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
637
638 /* 2472 MHz */
639 0xa8, 0x09,
640 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
641 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
642 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
643
644/* struct pda_iq_autocal_entry[13] */
6450x42, 0x00, 0x06, 0x19, /* PDR_PRISM_ZIF_TX_IQ_CALIBRATION */
646 /* 2412 MHz */
647 0x6c, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00,
648 /* 2417 MHz */
649 0x71, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00,
650 /* 2422 MHz */
651 0x76, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00,
652 /* 2427 MHz */
653 0x7b, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00,
654 /* 2432 MHz */
655 0x80, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00,
656 /* 2437 MHz */
657 0x85, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00,
658 /* 2442 MHz */
659 0x8a, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00,
660 /* 2447 MHz */
661 0x8f, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00,
662 /* 2452 MHz */
663 0x94, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00,
664 /* 2457 MHz */
665 0x99, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
666 /* 2462 MHz */
667 0x9e, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
668 /* 2467 MHz */
669 0xa3, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
670 /* 2472 MHz */
671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
672
6730x02, 0x00, 0x00, 0x00, /* PDR_END */
674 0xa8, 0xf5 /* bogus data */
675};
676
677#endif /* P54SPI_EEPROM_H */
678
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 5de2ebfb28c..9539ddcf379 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -424,9 +424,46 @@ static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep,
424 data, len, &alen, 2000); 424 data, len, &alen, 2000);
425} 425}
426 426
427static const char p54u_romboot_3887[] = "~~~~";
428static const char p54u_firmware_upload_3887[] = "<\r";
429
430static int p54u_device_reset_3887(struct ieee80211_hw *dev)
431{
432 struct p54u_priv *priv = dev->priv;
433 int ret, lock = (priv->intf->condition != USB_INTERFACE_BINDING);
434 u8 buf[4];
435
436 if (lock) {
437 ret = usb_lock_device_for_reset(priv->udev, priv->intf);
438 if (ret < 0) {
439 dev_err(&priv->udev->dev, "(p54usb) unable to lock "
440 " device for reset: %d\n", ret);
441 return ret;
442 }
443 }
444
445 ret = usb_reset_device(priv->udev);
446 if (lock)
447 usb_unlock_device(priv->udev);
448
449 if (ret) {
450 dev_err(&priv->udev->dev, "(p54usb) unable to reset "
451 "device: %d\n", ret);
452 return ret;
453 }
454
455 memcpy(&buf, p54u_romboot_3887, sizeof(buf));
456 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
457 buf, sizeof(buf));
458 if (ret)
459 dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
460 "boot ROM: %d\n", ret);
461
462 return ret;
463}
464
427static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) 465static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
428{ 466{
429 static char start_string[] = "~~~~<\r";
430 struct p54u_priv *priv = dev->priv; 467 struct p54u_priv *priv = dev->priv;
431 const struct firmware *fw_entry = NULL; 468 const struct firmware *fw_entry = NULL;
432 int err, alen; 469 int err, alen;
@@ -445,12 +482,9 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
445 goto err_bufalloc; 482 goto err_bufalloc;
446 } 483 }
447 484
448 memcpy(buf, start_string, 4); 485 err = p54u_device_reset_3887(dev);
449 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); 486 if (err)
450 if (err) {
451 dev_err(&priv->udev->dev, "(p54usb) reset failed! (%d)\n", err);
452 goto err_reset; 487 goto err_reset;
453 }
454 488
455 err = request_firmware(&fw_entry, "isl3887usb", &priv->udev->dev); 489 err = request_firmware(&fw_entry, "isl3887usb", &priv->udev->dev);
456 if (err) { 490 if (err) {
@@ -466,15 +500,22 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
466 if (err) 500 if (err)
467 goto err_upload_failed; 501 goto err_upload_failed;
468 502
503 if (priv->common.fw_interface != FW_LM87) {
504 dev_err(&priv->udev->dev, "wrong firmware, "
505 "please get a LM87 firmware and try again.\n");
506 err = -EINVAL;
507 goto err_upload_failed;
508 }
509
469 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size); 510 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size);
470 strcpy(buf, start_string); 511 strcpy(buf, p54u_firmware_upload_3887);
471 left -= strlen(start_string); 512 left -= strlen(p54u_firmware_upload_3887);
472 tmp += strlen(start_string); 513 tmp += strlen(p54u_firmware_upload_3887);
473 514
474 data = fw_entry->data; 515 data = fw_entry->data;
475 remains = fw_entry->size; 516 remains = fw_entry->size;
476 517
477 hdr = (struct x2_header *)(buf + strlen(start_string)); 518 hdr = (struct x2_header *)(buf + strlen(p54u_firmware_upload_3887));
478 memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE); 519 memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE);
479 hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR); 520 hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR);
480 hdr->fw_length = cpu_to_le32(fw_entry->size); 521 hdr->fw_length = cpu_to_le32(fw_entry->size);
@@ -616,6 +657,14 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
616 return err; 657 return err;
617 } 658 }
618 659
660 if (priv->common.fw_interface != FW_LM86) {
661 dev_err(&priv->udev->dev, "wrong firmware, "
662 "please get a LM86(USB) firmware and try again.\n");
663 kfree(buf);
664 release_firmware(fw_entry);
665 return -EINVAL;
666 }
667
619#define P54U_WRITE(type, addr, data) \ 668#define P54U_WRITE(type, addr, data) \
620 do {\ 669 do {\
621 err = p54u_write(priv, buf, type,\ 670 err = p54u_write(priv, buf, type,\
@@ -876,6 +925,9 @@ static int __devinit p54u_probe(struct usb_interface *intf,
876 SET_IEEE80211_DEV(dev, &intf->dev); 925 SET_IEEE80211_DEV(dev, &intf->dev);
877 usb_set_intfdata(intf, dev); 926 usb_set_intfdata(intf, dev);
878 priv->udev = udev; 927 priv->udev = udev;
928 priv->intf = intf;
929 skb_queue_head_init(&priv->rx_queue);
930 init_usb_anchor(&priv->submitted);
879 931
880 usb_get_dev(udev); 932 usb_get_dev(udev);
881 933
@@ -918,9 +970,6 @@ static int __devinit p54u_probe(struct usb_interface *intf,
918 if (err) 970 if (err)
919 goto err_free_dev; 971 goto err_free_dev;
920 972
921 skb_queue_head_init(&priv->rx_queue);
922 init_usb_anchor(&priv->submitted);
923
924 p54u_open(dev); 973 p54u_open(dev);
925 err = p54_read_eeprom(dev); 974 err = p54_read_eeprom(dev);
926 p54u_stop(dev); 975 p54u_stop(dev);
@@ -958,11 +1007,23 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
958 ieee80211_free_hw(dev); 1007 ieee80211_free_hw(dev);
959} 1008}
960 1009
1010static int p54u_pre_reset(struct usb_interface *intf)
1011{
1012 return 0;
1013}
1014
1015static int p54u_post_reset(struct usb_interface *intf)
1016{
1017 return 0;
1018}
1019
961static struct usb_driver p54u_driver = { 1020static struct usb_driver p54u_driver = {
962 .name = "p54usb", 1021 .name = "p54usb",
963 .id_table = p54u_table, 1022 .id_table = p54u_table,
964 .probe = p54u_probe, 1023 .probe = p54u_probe,
965 .disconnect = p54u_disconnect, 1024 .disconnect = p54u_disconnect,
1025 .pre_reset = p54u_pre_reset,
1026 .post_reset = p54u_post_reset,
966}; 1027};
967 1028
968static int __init p54u_init(void) 1029static int __init p54u_init(void)
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index 54ee738bf2a..8bc58982d8d 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -126,6 +126,7 @@ struct p54u_rx_info {
126struct p54u_priv { 126struct p54u_priv {
127 struct p54_common common; 127 struct p54_common common;
128 struct usb_device *udev; 128 struct usb_device *udev;
129 struct usb_interface *intf;
129 enum { 130 enum {
130 P54U_NET2280 = 0, 131 P54U_NET2280 = 0,
131 P54U_3887 132 P54U_3887
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index e43bae97ed8..88895bd9e49 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -23,6 +23,7 @@
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/etherdevice.h> 24#include <linux/etherdevice.h>
25#include <linux/if_arp.h> 25#include <linux/if_arp.h>
26#include <asm/byteorder.h>
26 27
27#include "prismcompat.h" 28#include "prismcompat.h"
28#include "isl_38xx.h" 29#include "isl_38xx.h"
@@ -471,8 +472,8 @@ islpci_eth_receive(islpci_private *priv)
471 wmb(); 472 wmb();
472 473
473 /* increment the driver read pointer */ 474 /* increment the driver read pointer */
474 add_le32p(&control_block-> 475 le32_add_cpu(&control_block->
475 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1); 476 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
476 } 477 }
477 478
478 /* trigger the device */ 479 /* trigger the device */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index f91a88fc1e3..87a1734663d 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -85,12 +85,6 @@ extern int pc_debug;
85#define PIMFOR_FLAG_APPLIC_ORIGIN 0x01 85#define PIMFOR_FLAG_APPLIC_ORIGIN 0x01
86#define PIMFOR_FLAG_LITTLE_ENDIAN 0x02 86#define PIMFOR_FLAG_LITTLE_ENDIAN 0x02
87 87
88static inline void
89add_le32p(__le32 * le_number, u32 add)
90{
91 *le_number = cpu_to_le32(le32_to_cpup(le_number) + add);
92}
93
94void display_buffer(char *, int); 88void display_buffer(char *, int);
95 89
96/* 90/*
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ed93ac41297..82af21eeb59 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -90,44 +90,44 @@ MODULE_PARM_DESC(workaround_interval,
90 90
91 91
92/* various RNDIS OID defs */ 92/* various RNDIS OID defs */
93#define OID_GEN_LINK_SPEED ccpu2(0x00010107) 93#define OID_GEN_LINK_SPEED cpu_to_le32(0x00010107)
94#define OID_GEN_RNDIS_CONFIG_PARAMETER ccpu2(0x0001021b) 94#define OID_GEN_RNDIS_CONFIG_PARAMETER cpu_to_le32(0x0001021b)
95 95
96#define OID_GEN_XMIT_OK ccpu2(0x00020101) 96#define OID_GEN_XMIT_OK cpu_to_le32(0x00020101)
97#define OID_GEN_RCV_OK ccpu2(0x00020102) 97#define OID_GEN_RCV_OK cpu_to_le32(0x00020102)
98#define OID_GEN_XMIT_ERROR ccpu2(0x00020103) 98#define OID_GEN_XMIT_ERROR cpu_to_le32(0x00020103)
99#define OID_GEN_RCV_ERROR ccpu2(0x00020104) 99#define OID_GEN_RCV_ERROR cpu_to_le32(0x00020104)
100#define OID_GEN_RCV_NO_BUFFER ccpu2(0x00020105) 100#define OID_GEN_RCV_NO_BUFFER cpu_to_le32(0x00020105)
101 101
102#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101) 102#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101)
103#define OID_802_3_CURRENT_ADDRESS ccpu2(0x01010102) 103#define OID_802_3_CURRENT_ADDRESS cpu_to_le32(0x01010102)
104#define OID_802_3_MULTICAST_LIST ccpu2(0x01010103) 104#define OID_802_3_MULTICAST_LIST cpu_to_le32(0x01010103)
105#define OID_802_3_MAXIMUM_LIST_SIZE ccpu2(0x01010104) 105#define OID_802_3_MAXIMUM_LIST_SIZE cpu_to_le32(0x01010104)
106 106
107#define OID_802_11_BSSID ccpu2(0x0d010101) 107#define OID_802_11_BSSID cpu_to_le32(0x0d010101)
108#define OID_802_11_SSID ccpu2(0x0d010102) 108#define OID_802_11_SSID cpu_to_le32(0x0d010102)
109#define OID_802_11_INFRASTRUCTURE_MODE ccpu2(0x0d010108) 109#define OID_802_11_INFRASTRUCTURE_MODE cpu_to_le32(0x0d010108)
110#define OID_802_11_ADD_WEP ccpu2(0x0d010113) 110#define OID_802_11_ADD_WEP cpu_to_le32(0x0d010113)
111#define OID_802_11_REMOVE_WEP ccpu2(0x0d010114) 111#define OID_802_11_REMOVE_WEP cpu_to_le32(0x0d010114)
112#define OID_802_11_DISASSOCIATE ccpu2(0x0d010115) 112#define OID_802_11_DISASSOCIATE cpu_to_le32(0x0d010115)
113#define OID_802_11_AUTHENTICATION_MODE ccpu2(0x0d010118) 113#define OID_802_11_AUTHENTICATION_MODE cpu_to_le32(0x0d010118)
114#define OID_802_11_PRIVACY_FILTER ccpu2(0x0d010119) 114#define OID_802_11_PRIVACY_FILTER cpu_to_le32(0x0d010119)
115#define OID_802_11_BSSID_LIST_SCAN ccpu2(0x0d01011a) 115#define OID_802_11_BSSID_LIST_SCAN cpu_to_le32(0x0d01011a)
116#define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b) 116#define OID_802_11_ENCRYPTION_STATUS cpu_to_le32(0x0d01011b)
117#define OID_802_11_ADD_KEY ccpu2(0x0d01011d) 117#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
118#define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e) 118#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
119#define OID_802_11_ASSOCIATION_INFORMATION ccpu2(0x0d01011f) 119#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
120#define OID_802_11_PMKID ccpu2(0x0d010123) 120#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
121#define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203) 121#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
122#define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204) 122#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
123#define OID_802_11_TX_POWER_LEVEL ccpu2(0x0d010205) 123#define OID_802_11_TX_POWER_LEVEL cpu_to_le32(0x0d010205)
124#define OID_802_11_RSSI ccpu2(0x0d010206) 124#define OID_802_11_RSSI cpu_to_le32(0x0d010206)
125#define OID_802_11_RSSI_TRIGGER ccpu2(0x0d010207) 125#define OID_802_11_RSSI_TRIGGER cpu_to_le32(0x0d010207)
126#define OID_802_11_FRAGMENTATION_THRESHOLD ccpu2(0x0d010209) 126#define OID_802_11_FRAGMENTATION_THRESHOLD cpu_to_le32(0x0d010209)
127#define OID_802_11_RTS_THRESHOLD ccpu2(0x0d01020a) 127#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a)
128#define OID_802_11_SUPPORTED_RATES ccpu2(0x0d01020e) 128#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e)
129#define OID_802_11_CONFIGURATION ccpu2(0x0d010211) 129#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211)
130#define OID_802_11_BSSID_LIST ccpu2(0x0d010217) 130#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217)
131 131
132 132
133/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ 133/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */
@@ -144,8 +144,8 @@ MODULE_PARM_DESC(workaround_interval,
144 144
145 145
146/* codes for "status" field of completion messages */ 146/* codes for "status" field of completion messages */
147#define RNDIS_STATUS_ADAPTER_NOT_READY ccpu2(0xc0010011) 147#define RNDIS_STATUS_ADAPTER_NOT_READY cpu_to_le32(0xc0010011)
148#define RNDIS_STATUS_ADAPTER_NOT_OPEN ccpu2(0xc0010012) 148#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012)
149 149
150 150
151/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c 151/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c
@@ -369,9 +369,6 @@ struct rndis_wext_private {
369}; 369};
370 370
371 371
372static const int freq_chan[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
373 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
374
375static const int rates_80211g[8] = { 6, 9, 12, 18, 24, 36, 48, 54 }; 372static const int rates_80211g[8] = { 6, 9, 12, 18, 24, 36, 48, 54 };
376 373
377static const int bcm4320_power_output[4] = { 25, 50, 75, 100 }; 374static const int bcm4320_power_output[4] = { 25, 50, 75, 100 };
@@ -445,7 +442,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
445 442
446 memset(u.get, 0, sizeof *u.get); 443 memset(u.get, 0, sizeof *u.get);
447 u.get->msg_type = RNDIS_MSG_QUERY; 444 u.get->msg_type = RNDIS_MSG_QUERY;
448 u.get->msg_len = ccpu2(sizeof *u.get); 445 u.get->msg_len = cpu_to_le32(sizeof *u.get);
449 u.get->oid = oid; 446 u.get->oid = oid;
450 447
451 ret = rndis_command(dev, u.header, buflen); 448 ret = rndis_command(dev, u.header, buflen);
@@ -494,8 +491,8 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
494 u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); 491 u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len);
495 u.set->oid = oid; 492 u.set->oid = oid;
496 u.set->len = cpu_to_le32(len); 493 u.set->len = cpu_to_le32(len);
497 u.set->offset = ccpu2(sizeof(*u.set) - 8); 494 u.set->offset = cpu_to_le32(sizeof(*u.set) - 8);
498 u.set->handle = ccpu2(0); 495 u.set->handle = cpu_to_le32(0);
499 memcpy(u.buf + sizeof(*u.set), data, len); 496 memcpy(u.buf + sizeof(*u.set), data, len);
500 497
501 ret = rndis_command(dev, u.header, buflen); 498 ret = rndis_command(dev, u.header, buflen);
@@ -640,8 +637,8 @@ static void dsconfig_to_freq(unsigned int dsconfig, struct iw_freq *freq)
640static int freq_to_dsconfig(struct iw_freq *freq, unsigned int *dsconfig) 637static int freq_to_dsconfig(struct iw_freq *freq, unsigned int *dsconfig)
641{ 638{
642 if (freq->m < 1000 && freq->e == 0) { 639 if (freq->m < 1000 && freq->e == 0) {
643 if (freq->m >= 1 && freq->m <= ARRAY_SIZE(freq_chan)) 640 if (freq->m >= 1 && freq->m <= 14)
644 *dsconfig = freq_chan[freq->m - 1] * 1000; 641 *dsconfig = ieee80211_dsss_chan_to_freq(freq->m) * 1000;
645 else 642 else
646 return -1; 643 return -1;
647 } else { 644 } else {
@@ -1178,11 +1175,11 @@ static int rndis_iw_get_range(struct net_device *dev,
1178 range->throughput = 11 * 1000 * 1000 / 2; 1175 range->throughput = 11 * 1000 * 1000 / 2;
1179 } 1176 }
1180 1177
1181 range->num_channels = ARRAY_SIZE(freq_chan); 1178 range->num_channels = 14;
1182 1179
1183 for (i = 0; i < ARRAY_SIZE(freq_chan) && i < IW_MAX_FREQUENCIES; i++) { 1180 for (i = 0; (i < 14) && (i < IW_MAX_FREQUENCIES); i++) {
1184 range->freq[i].i = i + 1; 1181 range->freq[i].i = i + 1;
1185 range->freq[i].m = freq_chan[i] * 100000; 1182 range->freq[i].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
1186 range->freq[i].e = 1; 1183 range->freq[i].e = 1;
1187 } 1184 }
1188 range->num_frequency = i; 1185 range->num_frequency = i;
@@ -1633,7 +1630,7 @@ static int rndis_iw_set_scan(struct net_device *dev,
1633 devdbg(usbdev, "SIOCSIWSCAN"); 1630 devdbg(usbdev, "SIOCSIWSCAN");
1634 1631
1635 if (wrqu->data.flags == 0) { 1632 if (wrqu->data.flags == 0) {
1636 tmp = ccpu2(1); 1633 tmp = cpu_to_le32(1);
1637 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, 1634 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1638 sizeof(tmp)); 1635 sizeof(tmp));
1639 evt.data.flags = 0; 1636 evt.data.flags = 0;
@@ -2431,7 +2428,7 @@ static void rndis_update_wireless_stats(struct work_struct *work)
2431 /* Send scan OID. Use of both OIDs is required to get device 2428 /* Send scan OID. Use of both OIDs is required to get device
2432 * working. 2429 * working.
2433 */ 2430 */
2434 tmp = ccpu2(1); 2431 tmp = cpu_to_le32(1);
2435 rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, 2432 rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
2436 sizeof(tmp)); 2433 sizeof(tmp));
2437 2434
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 178b313293b..bfc5d9cf716 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -97,10 +97,11 @@ config RT2X00_LIB_CRYPTO
97 97
98config RT2X00_LIB_RFKILL 98config RT2X00_LIB_RFKILL
99 boolean 99 boolean
100 default y if (RT2X00_LIB=y && RFKILL=y) || (RT2X00_LIB=m && RFKILL!=n) 100 default y if (RT2X00_LIB=y && INPUT=y) || (RT2X00_LIB=m && INPUT!=n)
101 select INPUT_POLLDEV
101 102
102comment "rt2x00 rfkill support disabled due to modularized RFKILL and built-in rt2x00" 103comment "rt2x00 rfkill support disabled due to modularized INPUT and built-in rt2x00"
103 depends on RT2X00_LIB=y && RFKILL=m 104 depends on RT2X00_LIB=y && INPUT=m
104 105
105config RT2X00_LIB_LEDS 106config RT2X00_LIB_LEDS
106 boolean 107 boolean
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 917cb4f3b03..f22d808d8c5 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -2,6 +2,7 @@ rt2x00lib-y += rt2x00dev.o
2rt2x00lib-y += rt2x00mac.o 2rt2x00lib-y += rt2x00mac.o
3rt2x00lib-y += rt2x00config.o 3rt2x00lib-y += rt2x00config.o
4rt2x00lib-y += rt2x00queue.o 4rt2x00lib-y += rt2x00queue.o
5rt2x00lib-y += rt2x00link.o
5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o 6rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o 7rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o
7rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o 8rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 6a977679124..b0848259b45 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -524,6 +524,32 @@ static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev,
524 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 524 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
525} 525}
526 526
527static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
528 struct rt2x00lib_conf *libconf)
529{
530 enum dev_state state =
531 (libconf->conf->flags & IEEE80211_CONF_PS) ?
532 STATE_SLEEP : STATE_AWAKE;
533 u32 reg;
534
535 if (state == STATE_SLEEP) {
536 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
537 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
538 (libconf->conf->beacon_int - 20) * 16);
539 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
540 libconf->conf->listen_interval - 1);
541
542 /* We must first disable autowake before it can be enabled */
543 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
544 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
545
546 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
547 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
548 }
549
550 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
551}
552
527static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, 553static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
528 struct rt2x00lib_conf *libconf, 554 struct rt2x00lib_conf *libconf,
529 const unsigned int flags) 555 const unsigned int flags)
@@ -537,6 +563,8 @@ static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
537 rt2400pci_config_retry_limit(rt2x00dev, libconf); 563 rt2400pci_config_retry_limit(rt2x00dev, libconf);
538 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) 564 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
539 rt2400pci_config_duration(rt2x00dev, libconf); 565 rt2400pci_config_duration(rt2x00dev, libconf);
566 if (flags & IEEE80211_CONF_CHANGE_PS)
567 rt2400pci_config_ps(rt2x00dev, libconf);
540} 568}
541 569
542static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, 570static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev,
@@ -572,35 +600,37 @@ static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev,
572 qual->false_cca = bbp; 600 qual->false_cca = bbp;
573} 601}
574 602
575static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev) 603static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev,
604 struct link_qual *qual, u8 vgc_level)
576{ 605{
577 rt2400pci_bbp_write(rt2x00dev, 13, 0x08); 606 rt2400pci_bbp_write(rt2x00dev, 13, vgc_level);
578 rt2x00dev->link.vgc_level = 0x08; 607 qual->vgc_level = vgc_level;
608 qual->vgc_level_reg = vgc_level;
579} 609}
580 610
581static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev) 611static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
612 struct link_qual *qual)
582{ 613{
583 u8 reg; 614 rt2400pci_set_vgc(rt2x00dev, qual, 0x08);
615}
584 616
617static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev,
618 struct link_qual *qual, const u32 count)
619{
585 /* 620 /*
586 * The link tuner should not run longer then 60 seconds, 621 * The link tuner should not run longer then 60 seconds,
587 * and should run once every 2 seconds. 622 * and should run once every 2 seconds.
588 */ 623 */
589 if (rt2x00dev->link.count > 60 || !(rt2x00dev->link.count & 1)) 624 if (count > 60 || !(count & 1))
590 return; 625 return;
591 626
592 /* 627 /*
593 * Base r13 link tuning on the false cca count. 628 * Base r13 link tuning on the false cca count.
594 */ 629 */
595 rt2400pci_bbp_read(rt2x00dev, 13, &reg); 630 if ((qual->false_cca > 512) && (qual->vgc_level < 0x20))
596 631 rt2400pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level);
597 if (rt2x00dev->link.qual.false_cca > 512 && reg < 0x20) { 632 else if ((qual->false_cca < 100) && (qual->vgc_level > 0x08))
598 rt2400pci_bbp_write(rt2x00dev, 13, ++reg); 633 rt2400pci_set_vgc(rt2x00dev, qual, --qual->vgc_level);
599 rt2x00dev->link.vgc_level = reg;
600 } else if (rt2x00dev->link.qual.false_cca < 100 && reg > 0x08) {
601 rt2400pci_bbp_write(rt2x00dev, 13, --reg);
602 rt2x00dev->link.vgc_level = reg;
603 }
604} 634}
605 635
606/* 636/*
@@ -904,21 +934,10 @@ static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
904 934
905static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) 935static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev)
906{ 936{
907 u32 reg;
908
909 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
910
911 /*
912 * Disable synchronisation.
913 */
914 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
915
916 /* 937 /*
917 * Cancel RX and TX. 938 * Disable power
918 */ 939 */
919 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 940 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
920 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
921 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
922} 941}
923 942
924static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 943static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1115,6 +1134,20 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1115 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1134 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1116} 1135}
1117 1136
1137static void rt2400pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
1138 const enum data_queue_qid qid)
1139{
1140 u32 reg;
1141
1142 if (qid == QID_BEACON) {
1143 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1144 } else {
1145 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1146 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
1147 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1148 }
1149}
1150
1118/* 1151/*
1119 * RX control handlers 1152 * RX control handlers
1120 */ 1153 */
@@ -1365,7 +1398,9 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1365 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1398 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1366 1399
1367 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1400 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1368 if (value == LED_MODE_TXRX_ACTIVITY) 1401 if (value == LED_MODE_TXRX_ACTIVITY ||
1402 value == LED_MODE_DEFAULT ||
1403 value == LED_MODE_ASUS)
1369 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1404 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1370 LED_TYPE_ACTIVITY); 1405 LED_TYPE_ACTIVITY);
1371#endif /* CONFIG_RT2X00_LIB_LEDS */ 1406#endif /* CONFIG_RT2X00_LIB_LEDS */
@@ -1419,7 +1454,9 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1419 * Initialize all hw fields. 1454 * Initialize all hw fields.
1420 */ 1455 */
1421 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1456 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1422 IEEE80211_HW_SIGNAL_DBM; 1457 IEEE80211_HW_SIGNAL_DBM |
1458 IEEE80211_HW_SUPPORTS_PS |
1459 IEEE80211_HW_PS_NULLFUNC_STACK;
1423 rt2x00dev->hw->extra_tx_headroom = 0; 1460 rt2x00dev->hw->extra_tx_headroom = 0;
1424 1461
1425 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 1462 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
@@ -1572,6 +1609,7 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1572 .write_tx_data = rt2x00pci_write_tx_data, 1609 .write_tx_data = rt2x00pci_write_tx_data,
1573 .write_beacon = rt2400pci_write_beacon, 1610 .write_beacon = rt2400pci_write_beacon,
1574 .kick_tx_queue = rt2400pci_kick_tx_queue, 1611 .kick_tx_queue = rt2400pci_kick_tx_queue,
1612 .kill_tx_queue = rt2400pci_kill_tx_queue,
1575 .fill_rxdone = rt2400pci_fill_rxdone, 1613 .fill_rxdone = rt2400pci_fill_rxdone,
1576 .config_filter = rt2400pci_config_filter, 1614 .config_filter = rt2400pci_config_filter,
1577 .config_intf = rt2400pci_config_intf, 1615 .config_intf = rt2400pci_config_intf,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index 9aefda4ab3c..72ac31c3cb7 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index d3bc218ec85..eb82860c54f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -573,6 +573,32 @@ static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev,
573 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 573 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
574} 574}
575 575
576static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
577 struct rt2x00lib_conf *libconf)
578{
579 enum dev_state state =
580 (libconf->conf->flags & IEEE80211_CONF_PS) ?
581 STATE_SLEEP : STATE_AWAKE;
582 u32 reg;
583
584 if (state == STATE_SLEEP) {
585 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
586 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
587 (libconf->conf->beacon_int - 20) * 16);
588 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
589 libconf->conf->listen_interval - 1);
590
591 /* We must first disable autowake before it can be enabled */
592 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
593 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
594
595 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
596 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
597 }
598
599 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
600}
601
576static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, 602static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
577 struct rt2x00lib_conf *libconf, 603 struct rt2x00lib_conf *libconf,
578 const unsigned int flags) 604 const unsigned int flags)
@@ -588,6 +614,8 @@ static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
588 rt2500pci_config_retry_limit(rt2x00dev, libconf); 614 rt2500pci_config_retry_limit(rt2x00dev, libconf);
589 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) 615 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
590 rt2500pci_config_duration(rt2x00dev, libconf); 616 rt2500pci_config_duration(rt2x00dev, libconf);
617 if (flags & IEEE80211_CONF_CHANGE_PS)
618 rt2500pci_config_ps(rt2x00dev, libconf);
591} 619}
592 620
593/* 621/*
@@ -611,29 +639,33 @@ static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev,
611 qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); 639 qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA);
612} 640}
613 641
614static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev) 642static inline void rt2500pci_set_vgc(struct rt2x00_dev *rt2x00dev,
643 struct link_qual *qual, u8 vgc_level)
615{ 644{
616 rt2500pci_bbp_write(rt2x00dev, 17, 0x48); 645 if (qual->vgc_level_reg != vgc_level) {
617 rt2x00dev->link.vgc_level = 0x48; 646 rt2500pci_bbp_write(rt2x00dev, 17, vgc_level);
647 qual->vgc_level_reg = vgc_level;
648 }
618} 649}
619 650
620static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev) 651static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
652 struct link_qual *qual)
621{ 653{
622 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); 654 rt2500pci_set_vgc(rt2x00dev, qual, 0x48);
623 u8 r17; 655}
624 656
657static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
658 struct link_qual *qual, const u32 count)
659{
625 /* 660 /*
626 * To prevent collisions with MAC ASIC on chipsets 661 * To prevent collisions with MAC ASIC on chipsets
627 * up to version C the link tuning should halt after 20 662 * up to version C the link tuning should halt after 20
628 * seconds while being associated. 663 * seconds while being associated.
629 */ 664 */
630 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D && 665 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
631 rt2x00dev->intf_associated && 666 rt2x00dev->intf_associated && count > 20)
632 rt2x00dev->link.count > 20)
633 return; 667 return;
634 668
635 rt2500pci_bbp_read(rt2x00dev, 17, &r17);
636
637 /* 669 /*
638 * Chipset versions C and lower should directly continue 670 * Chipset versions C and lower should directly continue
639 * to the dynamic CCA tuning. Chipset version D and higher 671 * to the dynamic CCA tuning. Chipset version D and higher
@@ -649,29 +681,25 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
649 * then corrupt the R17 tuning. To remidy this the tuning should 681 * then corrupt the R17 tuning. To remidy this the tuning should
650 * be stopped (While making sure the R17 value will not exceed limits) 682 * be stopped (While making sure the R17 value will not exceed limits)
651 */ 683 */
652 if (rssi < -80 && rt2x00dev->link.count > 20) { 684 if (qual->rssi < -80 && count > 20) {
653 if (r17 >= 0x41) { 685 if (qual->vgc_level_reg >= 0x41)
654 r17 = rt2x00dev->link.vgc_level; 686 rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level);
655 rt2500pci_bbp_write(rt2x00dev, 17, r17);
656 }
657 return; 687 return;
658 } 688 }
659 689
660 /* 690 /*
661 * Special big-R17 for short distance 691 * Special big-R17 for short distance
662 */ 692 */
663 if (rssi >= -58) { 693 if (qual->rssi >= -58) {
664 if (r17 != 0x50) 694 rt2500pci_set_vgc(rt2x00dev, qual, 0x50);
665 rt2500pci_bbp_write(rt2x00dev, 17, 0x50);
666 return; 695 return;
667 } 696 }
668 697
669 /* 698 /*
670 * Special mid-R17 for middle distance 699 * Special mid-R17 for middle distance
671 */ 700 */
672 if (rssi >= -74) { 701 if (qual->rssi >= -74) {
673 if (r17 != 0x41) 702 rt2500pci_set_vgc(rt2x00dev, qual, 0x41);
674 rt2500pci_bbp_write(rt2x00dev, 17, 0x41);
675 return; 703 return;
676 } 704 }
677 705
@@ -679,8 +707,8 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
679 * Leave short or middle distance condition, restore r17 707 * Leave short or middle distance condition, restore r17
680 * to the dynamic tuning range. 708 * to the dynamic tuning range.
681 */ 709 */
682 if (r17 >= 0x41) { 710 if (qual->vgc_level_reg >= 0x41) {
683 rt2500pci_bbp_write(rt2x00dev, 17, rt2x00dev->link.vgc_level); 711 rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level);
684 return; 712 return;
685 } 713 }
686 714
@@ -690,12 +718,12 @@ dynamic_cca_tune:
690 * R17 is inside the dynamic tuning range, 718 * R17 is inside the dynamic tuning range,
691 * start tuning the link based on the false cca counter. 719 * start tuning the link based on the false cca counter.
692 */ 720 */
693 if (rt2x00dev->link.qual.false_cca > 512 && r17 < 0x40) { 721 if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) {
694 rt2500pci_bbp_write(rt2x00dev, 17, ++r17); 722 rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg);
695 rt2x00dev->link.vgc_level = r17; 723 qual->vgc_level = qual->vgc_level_reg;
696 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > 0x32) { 724 } else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) {
697 rt2500pci_bbp_write(rt2x00dev, 17, --r17); 725 rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg);
698 rt2x00dev->link.vgc_level = r17; 726 qual->vgc_level = qual->vgc_level_reg;
699 } 727 }
700} 728}
701 729
@@ -1065,21 +1093,10 @@ static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1065 1093
1066static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) 1094static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1067{ 1095{
1068 u32 reg;
1069
1070 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
1071
1072 /*
1073 * Disable synchronisation.
1074 */
1075 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1076
1077 /* 1096 /*
1078 * Cancel RX and TX. 1097 * Disable power
1079 */ 1098 */
1080 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1099 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
1081 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
1082 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1083} 1100}
1084 1101
1085static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1102static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1205,7 +1222,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1205 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1222 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1206 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1223 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1207 rt2x00_set_field32(&word, TXD_W0_OFDM, 1224 rt2x00_set_field32(&word, TXD_W0_OFDM,
1208 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1225 (txdesc->rate_mode == RATE_MODE_OFDM));
1209 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1226 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
1210 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1227 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1211 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1228 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
@@ -1275,6 +1292,20 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1275 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1292 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1276} 1293}
1277 1294
1295static void rt2500pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
1296 const enum data_queue_qid qid)
1297{
1298 u32 reg;
1299
1300 if (qid == QID_BEACON) {
1301 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1302 } else {
1303 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1304 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
1305 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1306 }
1307}
1308
1278/* 1309/*
1279 * RX control handlers 1310 * RX control handlers
1280 */ 1311 */
@@ -1524,7 +1555,9 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1524 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1555 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1525 1556
1526 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1557 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1527 if (value == LED_MODE_TXRX_ACTIVITY) 1558 if (value == LED_MODE_TXRX_ACTIVITY ||
1559 value == LED_MODE_DEFAULT ||
1560 value == LED_MODE_ASUS)
1528 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1561 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1529 LED_TYPE_ACTIVITY); 1562 LED_TYPE_ACTIVITY);
1530#endif /* CONFIG_RT2X00_LIB_LEDS */ 1563#endif /* CONFIG_RT2X00_LIB_LEDS */
@@ -1721,7 +1754,9 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1721 * Initialize all hw fields. 1754 * Initialize all hw fields.
1722 */ 1755 */
1723 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1756 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1724 IEEE80211_HW_SIGNAL_DBM; 1757 IEEE80211_HW_SIGNAL_DBM |
1758 IEEE80211_HW_SUPPORTS_PS |
1759 IEEE80211_HW_PS_NULLFUNC_STACK;
1725 1760
1726 rt2x00dev->hw->extra_tx_headroom = 0; 1761 rt2x00dev->hw->extra_tx_headroom = 0;
1727 1762
@@ -1873,6 +1908,7 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1873 .write_tx_data = rt2x00pci_write_tx_data, 1908 .write_tx_data = rt2x00pci_write_tx_data,
1874 .write_beacon = rt2500pci_write_beacon, 1909 .write_beacon = rt2500pci_write_beacon,
1875 .kick_tx_queue = rt2500pci_kick_tx_queue, 1910 .kick_tx_queue = rt2500pci_kick_tx_queue,
1911 .kill_tx_queue = rt2500pci_kill_tx_queue,
1876 .fill_rxdone = rt2500pci_fill_rxdone, 1912 .fill_rxdone = rt2500pci_fill_rxdone,
1877 .config_filter = rt2500pci_config_filter, 1913 .config_filter = rt2500pci_config_filter,
1878 .config_intf = rt2500pci_config_intf, 1914 .config_intf = rt2500pci_config_intf,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index e135247f7f8..17a0c9c8c18 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index af6b5847be5..270691ac236 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -280,6 +280,18 @@ static const struct rt2x00debug rt2500usb_rt2x00debug = {
280}; 280};
281#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 281#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
282 282
283#ifdef CONFIG_RT2X00_LIB_RFKILL
284static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
285{
286 u16 reg;
287
288 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
289 return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
290}
291#else
292#define rt2500usb_rfkill_poll NULL
293#endif /* CONFIG_RT2X00_LIB_RFKILL */
294
283#ifdef CONFIG_RT2X00_LIB_LEDS 295#ifdef CONFIG_RT2X00_LIB_LEDS
284static void rt2500usb_brightness_set(struct led_classdev *led_cdev, 296static void rt2500usb_brightness_set(struct led_classdev *led_cdev,
285 enum led_brightness brightness) 297 enum led_brightness brightness)
@@ -634,6 +646,32 @@ static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev,
634 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 646 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
635} 647}
636 648
649static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
650 struct rt2x00lib_conf *libconf)
651{
652 enum dev_state state =
653 (libconf->conf->flags & IEEE80211_CONF_PS) ?
654 STATE_SLEEP : STATE_AWAKE;
655 u16 reg;
656
657 if (state == STATE_SLEEP) {
658 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
659 rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON,
660 libconf->conf->beacon_int - 20);
661 rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP,
662 libconf->conf->listen_interval - 1);
663
664 /* We must first disable autowake before it can be enabled */
665 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
666 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
667
668 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
669 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
670 }
671
672 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
673}
674
637static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, 675static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
638 struct rt2x00lib_conf *libconf, 676 struct rt2x00lib_conf *libconf,
639 const unsigned int flags) 677 const unsigned int flags)
@@ -647,6 +685,8 @@ static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
647 libconf->conf->power_level); 685 libconf->conf->power_level);
648 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) 686 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
649 rt2500usb_config_duration(rt2x00dev, libconf); 687 rt2500usb_config_duration(rt2x00dev, libconf);
688 if (flags & IEEE80211_CONF_CHANGE_PS)
689 rt2500usb_config_ps(rt2x00dev, libconf);
650} 690}
651 691
652/* 692/*
@@ -670,7 +710,8 @@ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev,
670 qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); 710 qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR);
671} 711}
672 712
673static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev) 713static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
714 struct link_qual *qual)
674{ 715{
675 u16 eeprom; 716 u16 eeprom;
676 u16 value; 717 u16 value;
@@ -691,7 +732,7 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev)
691 value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); 732 value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER);
692 rt2500usb_bbp_write(rt2x00dev, 17, value); 733 rt2500usb_bbp_write(rt2x00dev, 17, value);
693 734
694 rt2x00dev->link.vgc_level = value; 735 qual->vgc_level = value;
695} 736}
696 737
697/* 738/*
@@ -1176,7 +1217,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1176 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1217 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1177 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1218 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1178 rt2x00_set_field32(&word, TXD_W0_OFDM, 1219 rt2x00_set_field32(&word, TXD_W0_OFDM,
1179 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1220 (txdesc->rate_mode == RATE_MODE_OFDM));
1180 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1221 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1181 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1222 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1182 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1223 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
@@ -1562,12 +1603,22 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1562 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1603 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1563 1604
1564 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1605 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1565 if (value == LED_MODE_TXRX_ACTIVITY) 1606 if (value == LED_MODE_TXRX_ACTIVITY ||
1607 value == LED_MODE_DEFAULT ||
1608 value == LED_MODE_ASUS)
1566 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, 1609 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual,
1567 LED_TYPE_ACTIVITY); 1610 LED_TYPE_ACTIVITY);
1568#endif /* CONFIG_RT2X00_LIB_LEDS */ 1611#endif /* CONFIG_RT2X00_LIB_LEDS */
1569 1612
1570 /* 1613 /*
1614 * Detect if this device has an hardware controlled radio.
1615 */
1616#ifdef CONFIG_RT2X00_LIB_RFKILL
1617 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1618 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1619#endif /* CONFIG_RT2X00_LIB_RFKILL */
1620
1621 /*
1571 * Check if the BBP tuning should be disabled. 1622 * Check if the BBP tuning should be disabled.
1572 */ 1623 */
1573 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 1624 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
@@ -1752,7 +1803,9 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1752 rt2x00dev->hw->flags = 1803 rt2x00dev->hw->flags =
1753 IEEE80211_HW_RX_INCLUDES_FCS | 1804 IEEE80211_HW_RX_INCLUDES_FCS |
1754 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1805 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1755 IEEE80211_HW_SIGNAL_DBM; 1806 IEEE80211_HW_SIGNAL_DBM |
1807 IEEE80211_HW_SUPPORTS_PS |
1808 IEEE80211_HW_PS_NULLFUNC_STACK;
1756 1809
1757 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1810 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1758 1811
@@ -1839,7 +1892,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1839 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 1892 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1840 if (!modparam_nohwcrypt) { 1893 if (!modparam_nohwcrypt) {
1841 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 1894 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1842 __set_bit(CONFIG_CRYPTO_COPY_IV, &rt2x00dev->flags); 1895 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
1843 } 1896 }
1844 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); 1897 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1845 1898
@@ -1873,6 +1926,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1873 .uninitialize = rt2x00usb_uninitialize, 1926 .uninitialize = rt2x00usb_uninitialize,
1874 .clear_entry = rt2x00usb_clear_entry, 1927 .clear_entry = rt2x00usb_clear_entry,
1875 .set_device_state = rt2500usb_set_device_state, 1928 .set_device_state = rt2500usb_set_device_state,
1929 .rfkill_poll = rt2500usb_rfkill_poll,
1876 .link_stats = rt2500usb_link_stats, 1930 .link_stats = rt2500usb_link_stats,
1877 .reset_tuner = rt2500usb_reset_tuner, 1931 .reset_tuner = rt2500usb_reset_tuner,
1878 .link_tuner = rt2500usb_link_tuner, 1932 .link_tuner = rt2500usb_link_tuner,
@@ -1881,6 +1935,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1881 .write_beacon = rt2500usb_write_beacon, 1935 .write_beacon = rt2500usb_write_beacon,
1882 .get_tx_data_len = rt2500usb_get_tx_data_len, 1936 .get_tx_data_len = rt2500usb_get_tx_data_len,
1883 .kick_tx_queue = rt2500usb_kick_tx_queue, 1937 .kick_tx_queue = rt2500usb_kick_tx_queue,
1938 .kill_tx_queue = rt2x00usb_kill_tx_queue,
1884 .fill_rxdone = rt2500usb_fill_rxdone, 1939 .fill_rxdone = rt2500usb_fill_rxdone,
1885 .config_shared_key = rt2500usb_config_key, 1940 .config_shared_key = rt2500usb_config_key,
1886 .config_pairwise_key = rt2500usb_config_key, 1941 .config_pairwise_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 4347dfdabcd..afce0e0322c 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -189,6 +189,14 @@
189 * MAC_CSR19: GPIO control register. 189 * MAC_CSR19: GPIO control register.
190 */ 190 */
191#define MAC_CSR19 0x0426 191#define MAC_CSR19 0x0426
192#define MAC_CSR19_BIT0 FIELD32(0x0001)
193#define MAC_CSR19_BIT1 FIELD32(0x0002)
194#define MAC_CSR19_BIT2 FIELD32(0x0004)
195#define MAC_CSR19_BIT3 FIELD32(0x0008)
196#define MAC_CSR19_BIT4 FIELD32(0x0010)
197#define MAC_CSR19_BIT5 FIELD32(0x0020)
198#define MAC_CSR19_BIT6 FIELD32(0x0040)
199#define MAC_CSR19_BIT7 FIELD32(0x0080)
192 200
193/* 201/*
194 * MAC_CSR20: LED control register. 202 * MAC_CSR20: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 39ecf3b82ca..84bd6f19acb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,7 @@
33#include <linux/leds.h> 33#include <linux/leds.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36#include <linux/input-polldev.h>
36 37
37#include <net/mac80211.h> 38#include <net/mac80211.h>
38 39
@@ -44,7 +45,7 @@
44/* 45/*
45 * Module information. 46 * Module information.
46 */ 47 */
47#define DRV_VERSION "2.2.3" 48#define DRV_VERSION "2.3.0"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 49#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 50
50/* 51/*
@@ -177,52 +178,41 @@ struct antenna_setup {
177 */ 178 */
178struct link_qual { 179struct link_qual {
179 /* 180 /*
180 * Statistics required for Link tuning. 181 * Statistics required for Link tuning by driver
181 * For the average RSSI value we use the "Walking average" approach. 182 * The rssi value is provided by rt2x00lib during the
182 * When adding RSSI to the average value the following calculation 183 * link_tuner() callback function.
183 * is needed: 184 * The false_cca field is filled during the link_stats()
184 * 185 * callback function and could be used during the
185 * avg_rssi = ((avg_rssi * 7) + rssi) / 8; 186 * link_tuner() callback function.
186 *
187 * The advantage of this approach is that we only need 1 variable
188 * to store the average in (No need for a count and a total).
189 * But more importantly, normal average values will over time
190 * move less and less towards newly added values this results
191 * that with link tuning, the device can have a very good RSSI
192 * for a few minutes but when the device is moved away from the AP
193 * the average will not decrease fast enough to compensate.
194 * The walking average compensates this and will move towards
195 * the new values correctly allowing a effective link tuning.
196 */ 187 */
197 int avg_rssi; 188 int rssi;
198 int false_cca; 189 int false_cca;
199 190
200 /* 191 /*
201 * Statistics required for Signal quality calculation. 192 * VGC levels
202 * For calculating the Signal quality we have to determine 193 * Hardware driver will tune the VGC level during each call
203 * the total number of success and failed RX and TX frames. 194 * to the link_tuner() callback function. This vgc_level is
204 * After that we also use the average RSSI value to help 195 * is determined based on the link quality statistics like
205 * determining the signal quality. 196 * average RSSI and the false CCA count.
206 * For the calculation we will use the following algorithm:
207 *
208 * rssi_percentage = (avg_rssi * 100) / rssi_offset
209 * rx_percentage = (rx_success * 100) / rx_total
210 * tx_percentage = (tx_success * 100) / tx_total
211 * avg_signal = ((WEIGHT_RSSI * avg_rssi) +
212 * (WEIGHT_TX * tx_percentage) +
213 * (WEIGHT_RX * rx_percentage)) / 100
214 * 197 *
215 * This value should then be checked to not be greated then 100. 198 * In some cases the drivers need to differentiate between
199 * the currently "desired" VGC level and the level configured
200 * in the hardware. The latter is important to reduce the
201 * number of BBP register reads to reduce register access
202 * overhead. For this reason we store both values here.
203 */
204 u8 vgc_level;
205 u8 vgc_level_reg;
206
207 /*
208 * Statistics required for Signal quality calculation.
209 * These fields might be changed during the link_stats()
210 * callback function.
216 */ 211 */
217 int rx_percentage;
218 int rx_success; 212 int rx_success;
219 int rx_failed; 213 int rx_failed;
220 int tx_percentage;
221 int tx_success; 214 int tx_success;
222 int tx_failed; 215 int tx_failed;
223#define WEIGHT_RSSI 20
224#define WEIGHT_RX 40
225#define WEIGHT_TX 40
226}; 216};
227 217
228/* 218/*
@@ -286,9 +276,16 @@ struct link {
286 struct link_ant ant; 276 struct link_ant ant;
287 277
288 /* 278 /*
289 * Active VGC level 279 * Currently active average RSSI value
290 */ 280 */
291 int vgc_level; 281 int avg_rssi;
282
283 /*
284 * Currently precalculated percentages of successful
285 * TX and RX frames.
286 */
287 int rx_percentage;
288 int tx_percentage;
292 289
293 /* 290 /*
294 * Work structure for scheduling periodic link tuning. 291 * Work structure for scheduling periodic link tuning.
@@ -297,55 +294,6 @@ struct link {
297}; 294};
298 295
299/* 296/*
300 * Small helper macro to work with moving/walking averages.
301 */
302#define MOVING_AVERAGE(__avg, __val, __samples) \
303 ( (((__avg) * ((__samples) - 1)) + (__val)) / (__samples) )
304
305/*
306 * When we lack RSSI information return something less then -80 to
307 * tell the driver to tune the device to maximum sensitivity.
308 */
309#define DEFAULT_RSSI ( -128 )
310
311/*
312 * Link quality access functions.
313 */
314static inline int rt2x00_get_link_rssi(struct link *link)
315{
316 if (link->qual.avg_rssi && link->qual.rx_success)
317 return link->qual.avg_rssi;
318 return DEFAULT_RSSI;
319}
320
321static inline int rt2x00_get_link_ant_rssi(struct link *link)
322{
323 if (link->ant.rssi_ant && link->qual.rx_success)
324 return link->ant.rssi_ant;
325 return DEFAULT_RSSI;
326}
327
328static inline void rt2x00_reset_link_ant_rssi(struct link *link)
329{
330 link->ant.rssi_ant = 0;
331}
332
333static inline int rt2x00_get_link_ant_rssi_history(struct link *link,
334 enum antenna ant)
335{
336 if (link->ant.rssi_history[ant - ANTENNA_A])
337 return link->ant.rssi_history[ant - ANTENNA_A];
338 return DEFAULT_RSSI;
339}
340
341static inline int rt2x00_update_ant_rssi(struct link *link, int rssi)
342{
343 int old_rssi = link->ant.rssi_history[link->ant.active.rx - ANTENNA_A];
344 link->ant.rssi_history[link->ant.active.rx - ANTENNA_A] = rssi;
345 return old_rssi;
346}
347
348/*
349 * Interface structure 297 * Interface structure
350 * Per interface configuration details, this structure 298 * Per interface configuration details, this structure
351 * is allocated as the private data for ieee80211_vif. 299 * is allocated as the private data for ieee80211_vif.
@@ -448,7 +396,7 @@ struct rt2x00lib_erp {
448 int ack_timeout; 396 int ack_timeout;
449 int ack_consume_time; 397 int ack_consume_time;
450 398
451 u64 basic_rates; 399 u32 basic_rates;
452 400
453 int slot_time; 401 int slot_time;
454 402
@@ -520,9 +468,10 @@ struct rt2x00lib_ops {
520 */ 468 */
521 int (*probe_hw) (struct rt2x00_dev *rt2x00dev); 469 int (*probe_hw) (struct rt2x00_dev *rt2x00dev);
522 char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev); 470 char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev);
523 u16 (*get_firmware_crc) (const void *data, const size_t len); 471 int (*check_firmware) (struct rt2x00_dev *rt2x00dev,
524 int (*load_firmware) (struct rt2x00_dev *rt2x00dev, const void *data, 472 const u8 *data, const size_t len);
525 const size_t len); 473 int (*load_firmware) (struct rt2x00_dev *rt2x00dev,
474 const u8 *data, const size_t len);
526 475
527 /* 476 /*
528 * Device initialization/deinitialization handlers. 477 * Device initialization/deinitialization handlers.
@@ -544,8 +493,10 @@ struct rt2x00lib_ops {
544 int (*rfkill_poll) (struct rt2x00_dev *rt2x00dev); 493 int (*rfkill_poll) (struct rt2x00_dev *rt2x00dev);
545 void (*link_stats) (struct rt2x00_dev *rt2x00dev, 494 void (*link_stats) (struct rt2x00_dev *rt2x00dev,
546 struct link_qual *qual); 495 struct link_qual *qual);
547 void (*reset_tuner) (struct rt2x00_dev *rt2x00dev); 496 void (*reset_tuner) (struct rt2x00_dev *rt2x00dev,
548 void (*link_tuner) (struct rt2x00_dev *rt2x00dev); 497 struct link_qual *qual);
498 void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
499 struct link_qual *qual, const u32 count);
549 500
550 /* 501 /*
551 * TX control handlers 502 * TX control handlers
@@ -558,6 +509,8 @@ struct rt2x00lib_ops {
558 int (*get_tx_data_len) (struct queue_entry *entry); 509 int (*get_tx_data_len) (struct queue_entry *entry);
559 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 510 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
560 const enum data_queue_qid queue); 511 const enum data_queue_qid queue);
512 void (*kill_tx_queue) (struct rt2x00_dev *rt2x00dev,
513 const enum data_queue_qid queue);
561 514
562 /* 515 /*
563 * RX control handlers 516 * RX control handlers
@@ -625,7 +578,6 @@ enum rt2x00_flags {
625 DEVICE_STATE_REGISTERED_HW, 578 DEVICE_STATE_REGISTERED_HW,
626 DEVICE_STATE_INITIALIZED, 579 DEVICE_STATE_INITIALIZED,
627 DEVICE_STATE_STARTED, 580 DEVICE_STATE_STARTED,
628 DEVICE_STATE_STARTED_SUSPEND,
629 DEVICE_STATE_ENABLED_RADIO, 581 DEVICE_STATE_ENABLED_RADIO,
630 DEVICE_STATE_DISABLED_RADIO_HW, 582 DEVICE_STATE_DISABLED_RADIO_HW,
631 583
@@ -637,6 +589,7 @@ enum rt2x00_flags {
637 DRIVER_REQUIRE_ATIM_QUEUE, 589 DRIVER_REQUIRE_ATIM_QUEUE,
638 DRIVER_REQUIRE_SCHEDULED, 590 DRIVER_REQUIRE_SCHEDULED,
639 DRIVER_REQUIRE_DMA, 591 DRIVER_REQUIRE_DMA,
592 DRIVER_REQUIRE_COPY_IV,
640 593
641 /* 594 /*
642 * Driver features 595 * Driver features
@@ -653,7 +606,6 @@ enum rt2x00_flags {
653 CONFIG_EXTERNAL_LNA_BG, 606 CONFIG_EXTERNAL_LNA_BG,
654 CONFIG_DOUBLE_ANTENNA, 607 CONFIG_DOUBLE_ANTENNA,
655 CONFIG_DISABLE_LINK_TUNING, 608 CONFIG_DISABLE_LINK_TUNING,
656 CONFIG_CRYPTO_COPY_IV,
657}; 609};
658 610
659/* 611/*
@@ -689,8 +641,8 @@ struct rt2x00_dev {
689 unsigned long rfkill_state; 641 unsigned long rfkill_state;
690#define RFKILL_STATE_ALLOCATED 1 642#define RFKILL_STATE_ALLOCATED 1
691#define RFKILL_STATE_REGISTERED 2 643#define RFKILL_STATE_REGISTERED 2
692 struct rfkill *rfkill; 644#define RFKILL_STATE_BLOCKED 3
693 struct delayed_work rfkill_work; 645 struct input_polled_dev *rfkill_poll_dev;
694#endif /* CONFIG_RT2X00_LIB_RFKILL */ 646#endif /* CONFIG_RT2X00_LIB_RFKILL */
695 647
696 /* 648 /*
@@ -918,7 +870,7 @@ static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
918 return (chipset->rf == chip); 870 return (chipset->rf == chip);
919} 871}
920 872
921static inline u16 rt2x00_rev(const struct rt2x00_chip *chipset) 873static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
922{ 874{
923 return chipset->rev; 875 return chipset->rev;
924} 876}
@@ -982,7 +934,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
982 int mc_count, struct dev_addr_list *mc_list); 934 int mc_count, struct dev_addr_list *mc_list);
983#ifdef CONFIG_RT2X00_LIB_CRYPTO 935#ifdef CONFIG_RT2X00_LIB_CRYPTO
984int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 936int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
985 const u8 *local_address, const u8 *address, 937 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
986 struct ieee80211_key_conf *key); 938 struct ieee80211_key_conf *key);
987#else 939#else
988#define rt2x00mac_set_key NULL 940#define rt2x00mac_set_key NULL
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index e66fb316cd6..9c2f5517af2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -32,7 +32,7 @@
32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
33 struct rt2x00_intf *intf, 33 struct rt2x00_intf *intf,
34 enum nl80211_iftype type, 34 enum nl80211_iftype type,
35 u8 *mac, u8 *bssid) 35 const u8 *mac, const u8 *bssid)
36{ 36{
37 struct rt2x00intf_conf conf; 37 struct rt2x00intf_conf conf;
38 unsigned int flags = 0; 38 unsigned int flags = 0;
@@ -42,6 +42,8 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
42 switch (type) { 42 switch (type) {
43 case NL80211_IFTYPE_ADHOC: 43 case NL80211_IFTYPE_ADHOC:
44 case NL80211_IFTYPE_AP: 44 case NL80211_IFTYPE_AP:
45 case NL80211_IFTYPE_MESH_POINT:
46 case NL80211_IFTYPE_WDS:
45 conf.sync = TSF_SYNC_BEACON; 47 conf.sync = TSF_SYNC_BEACON;
46 break; 48 break;
47 case NL80211_IFTYPE_STATION: 49 case NL80211_IFTYPE_STATION:
@@ -152,8 +154,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
152 */ 154 */
153 rt2x00dev->ops->lib->config_ant(rt2x00dev, ant); 155 rt2x00dev->ops->lib->config_ant(rt2x00dev, ant);
154 156
155 rt2x00lib_reset_link_tuner(rt2x00dev); 157 rt2x00link_reset_tuner(rt2x00dev, true);
156 rt2x00_reset_link_ant_rssi(&rt2x00dev->link);
157 158
158 memcpy(active, ant, sizeof(*ant)); 159 memcpy(active, ant, sizeof(*ant));
159 160
@@ -191,7 +192,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
191 * which means we need to reset the link tuner. 192 * which means we need to reset the link tuner.
192 */ 193 */
193 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) 194 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
194 rt2x00lib_reset_link_tuner(rt2x00dev); 195 rt2x00link_reset_tuner(rt2x00dev, false);
195 196
196 rt2x00dev->curr_band = conf->channel->band; 197 rt2x00dev->curr_band = conf->channel->band;
197 rt2x00dev->tx_power = conf->power_level; 198 rt2x00dev->tx_power = conf->power_level;
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index aee9cba13eb..0b41845d954 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -49,9 +49,14 @@ enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
49void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, 49void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
50 struct txentry_desc *txdesc) 50 struct txentry_desc *txdesc)
51{ 51{
52 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 53 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 54 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
54 55
56 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) ||
57 !hw_key || entry->skb->do_not_encrypt)
58 return;
59
55 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags); 60 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
56 61
57 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key); 62 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
@@ -69,11 +74,17 @@ void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
69 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags); 74 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
70} 75}
71 76
72unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info) 77unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
78 struct sk_buff *skb)
73{ 79{
80 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
74 struct ieee80211_key_conf *key = tx_info->control.hw_key; 81 struct ieee80211_key_conf *key = tx_info->control.hw_key;
75 unsigned int overhead = 0; 82 unsigned int overhead = 0;
76 83
84 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) ||
85 !key || skb->do_not_encrypt)
86 return overhead;
87
77 /* 88 /*
78 * Extend frame length to include IV/EIV/ICV/MMIC, 89 * Extend frame length to include IV/EIV/ICV/MMIC,
79 * note that these lengths should only be added when 90 * note that these lengths should only be added when
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 54dd10060bf..dcdce7f746b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -130,9 +130,11 @@ struct rt2x00debug_intf {
130}; 130};
131 131
132void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 132void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
133 enum cipher cipher, enum rx_crypto status) 133 struct rxdone_entry_desc *rxdesc)
134{ 134{
135 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; 135 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
136 enum cipher cipher = rxdesc->cipher;
137 enum rx_crypto status = rxdesc->cipher_status;
136 138
137 if (cipher == CIPHER_TKIP_NO_MIC) 139 if (cipher == CIPHER_TKIP_NO_MIC)
138 cipher = CIPHER_TKIP; 140 cipher = CIPHER_TKIP;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index a92104dfee9..035cbc98c59 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 87c0f2c8307..05f94e21b42 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -30,60 +30,6 @@
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31 31
32/* 32/*
33 * Link tuning handlers
34 */
35void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
36{
37 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
38 return;
39
40 /*
41 * Reset link information.
42 * Both the currently active vgc level as well as
43 * the link tuner counter should be reset. Resetting
44 * the counter is important for devices where the
45 * device should only perform link tuning during the
46 * first minute after being enabled.
47 */
48 rt2x00dev->link.count = 0;
49 rt2x00dev->link.vgc_level = 0;
50
51 /*
52 * Reset the link tuner.
53 */
54 rt2x00dev->ops->lib->reset_tuner(rt2x00dev);
55}
56
57static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
58{
59 /*
60 * Clear all (possibly) pre-existing quality statistics.
61 */
62 memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual));
63
64 /*
65 * The RX and TX percentage should start at 50%
66 * this will assure we will get at least get some
67 * decent value when the link tuner starts.
68 * The value will be dropped and overwritten with
69 * the correct (measured )value anyway during the
70 * first run of the link tuner.
71 */
72 rt2x00dev->link.qual.rx_percentage = 50;
73 rt2x00dev->link.qual.tx_percentage = 50;
74
75 rt2x00lib_reset_link_tuner(rt2x00dev);
76
77 queue_delayed_work(rt2x00dev->hw->workqueue,
78 &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
79}
80
81static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev)
82{
83 cancel_delayed_work_sync(&rt2x00dev->link.work);
84}
85
86/*
87 * Radio control handlers. 33 * Radio control handlers.
88 */ 34 */
89int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) 35int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -137,9 +83,10 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
137 return; 83 return;
138 84
139 /* 85 /*
140 * Stop the TX queues. 86 * Stop the TX queues in mac80211.
141 */ 87 */
142 ieee80211_stop_queues(rt2x00dev->hw); 88 ieee80211_stop_queues(rt2x00dev->hw);
89 rt2x00queue_stop_queues(rt2x00dev);
143 90
144 /* 91 /*
145 * Disable RX. 92 * Disable RX.
@@ -161,238 +108,15 @@ void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
161 * When we are disabling the RX, we should also stop the link tuner. 108 * When we are disabling the RX, we should also stop the link tuner.
162 */ 109 */
163 if (state == STATE_RADIO_RX_OFF) 110 if (state == STATE_RADIO_RX_OFF)
164 rt2x00lib_stop_link_tuner(rt2x00dev); 111 rt2x00link_stop_tuner(rt2x00dev);
165 112
166 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 113 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
167 114
168 /* 115 /*
169 * When we are enabling the RX, we should also start the link tuner. 116 * When we are enabling the RX, we should also start the link tuner.
170 */ 117 */
171 if (state == STATE_RADIO_RX_ON && 118 if (state == STATE_RADIO_RX_ON)
172 (rt2x00dev->intf_ap_count || rt2x00dev->intf_sta_count)) 119 rt2x00link_start_tuner(rt2x00dev);
173 rt2x00lib_start_link_tuner(rt2x00dev);
174}
175
176static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev)
177{
178 struct antenna_setup ant;
179 int sample_a =
180 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_A);
181 int sample_b =
182 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_B);
183
184 memcpy(&ant, &rt2x00dev->link.ant.active, sizeof(ant));
185
186 /*
187 * We are done sampling. Now we should evaluate the results.
188 */
189 rt2x00dev->link.ant.flags &= ~ANTENNA_MODE_SAMPLE;
190
191 /*
192 * During the last period we have sampled the RSSI
193 * from both antenna's. It now is time to determine
194 * which antenna demonstrated the best performance.
195 * When we are already on the antenna with the best
196 * performance, then there really is nothing for us
197 * left to do.
198 */
199 if (sample_a == sample_b)
200 return;
201
202 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
203 ant.rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
204
205 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
206 ant.tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
207
208 rt2x00lib_config_antenna(rt2x00dev, &ant);
209}
210
211static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev)
212{
213 struct antenna_setup ant;
214 int rssi_curr = rt2x00_get_link_ant_rssi(&rt2x00dev->link);
215 int rssi_old = rt2x00_update_ant_rssi(&rt2x00dev->link, rssi_curr);
216
217 memcpy(&ant, &rt2x00dev->link.ant.active, sizeof(ant));
218
219 /*
220 * Legacy driver indicates that we should swap antenna's
221 * when the difference in RSSI is greater that 5. This
222 * also should be done when the RSSI was actually better
223 * then the previous sample.
224 * When the difference exceeds the threshold we should
225 * sample the rssi from the other antenna to make a valid
226 * comparison between the 2 antennas.
227 */
228 if (abs(rssi_curr - rssi_old) < 5)
229 return;
230
231 rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE;
232
233 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
234 ant.rx = (ant.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
235
236 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
237 ant.tx = (ant.tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
238
239 rt2x00lib_config_antenna(rt2x00dev, &ant);
240}
241
242static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev)
243{
244 /*
245 * Determine if software diversity is enabled for
246 * either the TX or RX antenna (or both).
247 * Always perform this check since within the link
248 * tuner interval the configuration might have changed.
249 */
250 rt2x00dev->link.ant.flags &= ~ANTENNA_RX_DIVERSITY;
251 rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY;
252
253 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
254 rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY;
255 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
256 rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY;
257
258 if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) &&
259 !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) {
260 rt2x00dev->link.ant.flags = 0;
261 return;
262 }
263
264 /*
265 * If we have only sampled the data over the last period
266 * we should now harvest the data. Otherwise just evaluate
267 * the data. The latter should only be performed once
268 * every 2 seconds.
269 */
270 if (rt2x00dev->link.ant.flags & ANTENNA_MODE_SAMPLE)
271 rt2x00lib_evaluate_antenna_sample(rt2x00dev);
272 else if (rt2x00dev->link.count & 1)
273 rt2x00lib_evaluate_antenna_eval(rt2x00dev);
274}
275
276static void rt2x00lib_update_link_stats(struct link *link, int rssi)
277{
278 int avg_rssi = rssi;
279
280 /*
281 * Update global RSSI
282 */
283 if (link->qual.avg_rssi)
284 avg_rssi = MOVING_AVERAGE(link->qual.avg_rssi, rssi, 8);
285 link->qual.avg_rssi = avg_rssi;
286
287 /*
288 * Update antenna RSSI
289 */
290 if (link->ant.rssi_ant)
291 rssi = MOVING_AVERAGE(link->ant.rssi_ant, rssi, 8);
292 link->ant.rssi_ant = rssi;
293}
294
295static void rt2x00lib_precalculate_link_signal(struct link_qual *qual)
296{
297 if (qual->rx_failed || qual->rx_success)
298 qual->rx_percentage =
299 (qual->rx_success * 100) /
300 (qual->rx_failed + qual->rx_success);
301 else
302 qual->rx_percentage = 50;
303
304 if (qual->tx_failed || qual->tx_success)
305 qual->tx_percentage =
306 (qual->tx_success * 100) /
307 (qual->tx_failed + qual->tx_success);
308 else
309 qual->tx_percentage = 50;
310
311 qual->rx_success = 0;
312 qual->rx_failed = 0;
313 qual->tx_success = 0;
314 qual->tx_failed = 0;
315}
316
317static int rt2x00lib_calculate_link_signal(struct rt2x00_dev *rt2x00dev,
318 int rssi)
319{
320 int rssi_percentage = 0;
321 int signal;
322
323 /*
324 * We need a positive value for the RSSI.
325 */
326 if (rssi < 0)
327 rssi += rt2x00dev->rssi_offset;
328
329 /*
330 * Calculate the different percentages,
331 * which will be used for the signal.
332 */
333 if (rt2x00dev->rssi_offset)
334 rssi_percentage = (rssi * 100) / rt2x00dev->rssi_offset;
335
336 /*
337 * Add the individual percentages and use the WEIGHT
338 * defines to calculate the current link signal.
339 */
340 signal = ((WEIGHT_RSSI * rssi_percentage) +
341 (WEIGHT_TX * rt2x00dev->link.qual.tx_percentage) +
342 (WEIGHT_RX * rt2x00dev->link.qual.rx_percentage)) / 100;
343
344 return (signal > 100) ? 100 : signal;
345}
346
347static void rt2x00lib_link_tuner(struct work_struct *work)
348{
349 struct rt2x00_dev *rt2x00dev =
350 container_of(work, struct rt2x00_dev, link.work.work);
351
352 /*
353 * When the radio is shutting down we should
354 * immediately cease all link tuning.
355 */
356 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
357 return;
358
359 /*
360 * Update statistics.
361 */
362 rt2x00dev->ops->lib->link_stats(rt2x00dev, &rt2x00dev->link.qual);
363 rt2x00dev->low_level_stats.dot11FCSErrorCount +=
364 rt2x00dev->link.qual.rx_failed;
365
366 /*
367 * Only perform the link tuning when Link tuning
368 * has been enabled (This could have been disabled from the EEPROM).
369 */
370 if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags))
371 rt2x00dev->ops->lib->link_tuner(rt2x00dev);
372
373 /*
374 * Precalculate a portion of the link signal which is
375 * in based on the tx/rx success/failure counters.
376 */
377 rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual);
378
379 /*
380 * Send a signal to the led to update the led signal strength.
381 */
382 rt2x00leds_led_quality(rt2x00dev, rt2x00dev->link.qual.avg_rssi);
383
384 /*
385 * Evaluate antenna setup, make this the last step since this could
386 * possibly reset some statistics.
387 */
388 rt2x00lib_evaluate_antenna(rt2x00dev);
389
390 /*
391 * Increase tuner counter, and reschedule the next link tuner run.
392 */
393 rt2x00dev->link.count++;
394 queue_delayed_work(rt2x00dev->hw->workqueue,
395 &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
396} 120}
397 121
398static void rt2x00lib_packetfilter_scheduled(struct work_struct *work) 122static void rt2x00lib_packetfilter_scheduled(struct work_struct *work)
@@ -434,7 +158,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
434 return; 158 return;
435 159
436 if (delayed_flags & DELAYED_UPDATE_BEACON) 160 if (delayed_flags & DELAYED_UPDATE_BEACON)
437 rt2x00queue_update_beacon(rt2x00dev, vif); 161 rt2x00queue_update_beacon(rt2x00dev, vif, true);
438 162
439 if (delayed_flags & DELAYED_CONFIG_ERP) 163 if (delayed_flags & DELAYED_CONFIG_ERP)
440 rt2x00lib_config_erp(rt2x00dev, intf, &conf); 164 rt2x00lib_config_erp(rt2x00dev, intf, &conf);
@@ -467,7 +191,9 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
467 struct rt2x00_intf *intf = vif_to_intf(vif); 191 struct rt2x00_intf *intf = vif_to_intf(vif);
468 192
469 if (vif->type != NL80211_IFTYPE_AP && 193 if (vif->type != NL80211_IFTYPE_AP &&
470 vif->type != NL80211_IFTYPE_ADHOC) 194 vif->type != NL80211_IFTYPE_ADHOC &&
195 vif->type != NL80211_IFTYPE_MESH_POINT &&
196 vif->type != NL80211_IFTYPE_WDS)
471 return; 197 return;
472 198
473 /* 199 /*
@@ -490,7 +216,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
490 rt2x00lib_beacondone_iter, 216 rt2x00lib_beacondone_iter,
491 rt2x00dev); 217 rt2x00dev);
492 218
493 schedule_work(&rt2x00dev->intf_work); 219 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
494} 220}
495EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 221EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
496 222
@@ -597,7 +323,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
597 struct sk_buff *skb; 323 struct sk_buff *skb;
598 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; 324 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status;
599 struct ieee80211_supported_band *sband; 325 struct ieee80211_supported_band *sband;
600 struct ieee80211_hdr *hdr;
601 const struct rt2x00_rate *rate; 326 const struct rt2x00_rate *rate;
602 unsigned int header_length; 327 unsigned int header_length;
603 unsigned int align; 328 unsigned int align;
@@ -668,30 +393,22 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
668 393
669 if (idx < 0) { 394 if (idx < 0) {
670 WARNING(rt2x00dev, "Frame received with unrecognized signal," 395 WARNING(rt2x00dev, "Frame received with unrecognized signal,"
671 "signal=0x%.2x, plcp=%d.\n", rxdesc.signal, 396 "signal=0x%.2x, type=%d.\n", rxdesc.signal,
672 !!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP)); 397 (rxdesc.dev_flags & RXDONE_SIGNAL_MASK));
673 idx = 0; 398 idx = 0;
674 } 399 }
675 400
676 /* 401 /*
677 * Only update link status if this is a beacon frame carrying our bssid. 402 * Update extra components
678 */ 403 */
679 hdr = (struct ieee80211_hdr *)entry->skb->data; 404 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
680 if (ieee80211_is_beacon(hdr->frame_control) && 405 rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
681 (rxdesc.dev_flags & RXDONE_MY_BSS))
682 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi);
683
684 rt2x00debug_update_crypto(rt2x00dev,
685 rxdesc.cipher,
686 rxdesc.cipher_status);
687
688 rt2x00dev->link.qual.rx_success++;
689 406
690 rx_status->mactime = rxdesc.timestamp; 407 rx_status->mactime = rxdesc.timestamp;
691 rx_status->rate_idx = idx; 408 rx_status->rate_idx = idx;
692 rx_status->qual = 409 rx_status->qual = rt2x00link_calculate_signal(rt2x00dev, rxdesc.rssi);
693 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc.rssi);
694 rx_status->signal = rxdesc.rssi; 410 rx_status->signal = rxdesc.rssi;
411 rx_status->noise = rxdesc.noise;
695 rx_status->flag = rxdesc.flags; 412 rx_status->flag = rxdesc.flags;
696 rx_status->antenna = rt2x00dev->link.ant.active.rx; 413 rx_status->antenna = rt2x00dev->link.ant.active.rx;
697 414
@@ -1067,7 +784,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1067 if (rt2x00dev->ops->bcn->entry_num > 0) 784 if (rt2x00dev->ops->bcn->entry_num > 0)
1068 rt2x00dev->hw->wiphy->interface_modes |= 785 rt2x00dev->hw->wiphy->interface_modes |=
1069 BIT(NL80211_IFTYPE_ADHOC) | 786 BIT(NL80211_IFTYPE_ADHOC) |
1070 BIT(NL80211_IFTYPE_AP); 787 BIT(NL80211_IFTYPE_AP) |
788 BIT(NL80211_IFTYPE_MESH_POINT) |
789 BIT(NL80211_IFTYPE_WDS);
1071 790
1072 /* 791 /*
1073 * Let the driver probe the device to detect the capabilities. 792 * Let the driver probe the device to detect the capabilities.
@@ -1083,7 +802,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1083 */ 802 */
1084 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 803 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1085 INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); 804 INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
1086 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
1087 805
1088 /* 806 /*
1089 * Allocate queue array. 807 * Allocate queue array.
@@ -1104,6 +822,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1104 /* 822 /*
1105 * Register extra components. 823 * Register extra components.
1106 */ 824 */
825 rt2x00link_register(rt2x00dev);
1107 rt2x00leds_register(rt2x00dev); 826 rt2x00leds_register(rt2x00dev);
1108 rt2x00rfkill_allocate(rt2x00dev); 827 rt2x00rfkill_allocate(rt2x00dev);
1109 rt2x00debug_register(rt2x00dev); 828 rt2x00debug_register(rt2x00dev);
@@ -1163,23 +882,17 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
1163#ifdef CONFIG_PM 882#ifdef CONFIG_PM
1164int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) 883int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1165{ 884{
1166 int retval;
1167
1168 NOTICE(rt2x00dev, "Going to sleep.\n"); 885 NOTICE(rt2x00dev, "Going to sleep.\n");
1169 886
1170 /* 887 /*
1171 * Only continue if mac80211 has open interfaces. 888 * Prevent mac80211 from accessing driver while suspended.
1172 */ 889 */
1173 if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || 890 if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
1174 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 891 return 0;
1175 goto exit;
1176
1177 set_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags);
1178 892
1179 /* 893 /*
1180 * Disable radio. 894 * Cleanup as much as possible.
1181 */ 895 */
1182 rt2x00lib_stop(rt2x00dev);
1183 rt2x00lib_uninitialize(rt2x00dev); 896 rt2x00lib_uninitialize(rt2x00dev);
1184 897
1185 /* 898 /*
@@ -1188,7 +901,6 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1188 rt2x00leds_suspend(rt2x00dev); 901 rt2x00leds_suspend(rt2x00dev);
1189 rt2x00debug_deregister(rt2x00dev); 902 rt2x00debug_deregister(rt2x00dev);
1190 903
1191exit:
1192 /* 904 /*
1193 * Set device mode to sleep for power management, 905 * Set device mode to sleep for power management,
1194 * on some hardware this call seems to consistently fail. 906 * on some hardware this call seems to consistently fail.
@@ -1200,8 +912,7 @@ exit:
1200 * the radio and the other components already disabled the 912 * the radio and the other components already disabled the
1201 * device is as good as disabled. 913 * device is as good as disabled.
1202 */ 914 */
1203 retval = rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP); 915 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP))
1204 if (retval)
1205 WARNING(rt2x00dev, "Device failed to enter sleep state, " 916 WARNING(rt2x00dev, "Device failed to enter sleep state, "
1206 "continue suspending.\n"); 917 "continue suspending.\n");
1207 918
@@ -1209,32 +920,8 @@ exit:
1209} 920}
1210EXPORT_SYMBOL_GPL(rt2x00lib_suspend); 921EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
1211 922
1212static void rt2x00lib_resume_intf(void *data, u8 *mac,
1213 struct ieee80211_vif *vif)
1214{
1215 struct rt2x00_dev *rt2x00dev = data;
1216 struct rt2x00_intf *intf = vif_to_intf(vif);
1217
1218 spin_lock(&intf->lock);
1219
1220 rt2x00lib_config_intf(rt2x00dev, intf,
1221 vif->type, intf->mac, intf->bssid);
1222
1223
1224 /*
1225 * Master or Ad-hoc mode require a new beacon update.
1226 */
1227 if (vif->type == NL80211_IFTYPE_AP ||
1228 vif->type == NL80211_IFTYPE_ADHOC)
1229 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
1230
1231 spin_unlock(&intf->lock);
1232}
1233
1234int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) 923int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1235{ 924{
1236 int retval;
1237
1238 NOTICE(rt2x00dev, "Waking up.\n"); 925 NOTICE(rt2x00dev, "Waking up.\n");
1239 926
1240 /* 927 /*
@@ -1244,60 +931,11 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1244 rt2x00leds_resume(rt2x00dev); 931 rt2x00leds_resume(rt2x00dev);
1245 932
1246 /* 933 /*
1247 * Only continue if mac80211 had open interfaces.
1248 */
1249 if (!test_and_clear_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags))
1250 return 0;
1251
1252 /*
1253 * Reinitialize device and all active interfaces.
1254 */
1255 retval = rt2x00lib_start(rt2x00dev);
1256 if (retval)
1257 goto exit;
1258
1259 /*
1260 * Reconfigure device.
1261 */
1262 retval = rt2x00mac_config(rt2x00dev->hw, ~0);
1263 if (retval)
1264 goto exit;
1265
1266 /*
1267 * Iterator over each active interface to
1268 * reconfigure the hardware.
1269 */
1270 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
1271 rt2x00lib_resume_intf, rt2x00dev);
1272
1273 /*
1274 * We are ready again to receive requests from mac80211. 934 * We are ready again to receive requests from mac80211.
1275 */ 935 */
1276 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 936 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1277 937
1278 /*
1279 * It is possible that during that mac80211 has attempted
1280 * to send frames while we were suspending or resuming.
1281 * In that case we have disabled the TX queue and should
1282 * now enable it again
1283 */
1284 ieee80211_wake_queues(rt2x00dev->hw);
1285
1286 /*
1287 * During interface iteration we might have changed the
1288 * delayed_flags, time to handles the event by calling
1289 * the work handler directly.
1290 */
1291 rt2x00lib_intf_scheduled(&rt2x00dev->intf_work);
1292
1293 return 0; 938 return 0;
1294
1295exit:
1296 rt2x00lib_stop(rt2x00dev);
1297 rt2x00lib_uninitialize(rt2x00dev);
1298 rt2x00debug_deregister(rt2x00dev);
1299
1300 return retval;
1301} 939}
1302EXPORT_SYMBOL_GPL(rt2x00lib_resume); 940EXPORT_SYMBOL_GPL(rt2x00lib_resume);
1303#endif /* CONFIG_PM */ 941#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 7169c222a48..fdedb512292 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index bab05a56e7a..d2deea2f267 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -35,7 +35,6 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
35 const struct firmware *fw; 35 const struct firmware *fw;
36 char *fw_name; 36 char *fw_name;
37 int retval; 37 int retval;
38 u16 crc;
39 38
40 /* 39 /*
41 * Read correct firmware from harddisk. 40 * Read correct firmware from harddisk.
@@ -61,16 +60,26 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
61 return -ENOENT; 60 return -ENOENT;
62 } 61 }
63 62
64 crc = rt2x00dev->ops->lib->get_firmware_crc(fw->data, fw->size);
65 if (crc != (fw->data[fw->size - 2] << 8 | fw->data[fw->size - 1])) {
66 ERROR(rt2x00dev, "Firmware checksum error.\n");
67 retval = -ENOENT;
68 goto exit;
69 }
70
71 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", 63 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n",
72 fw->data[fw->size - 4], fw->data[fw->size - 3]); 64 fw->data[fw->size - 4], fw->data[fw->size - 3]);
73 65
66 retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size);
67 switch (retval) {
68 case FW_OK:
69 break;
70 case FW_BAD_CRC:
71 ERROR(rt2x00dev, "Firmware checksum error.\n");
72 goto exit;
73 case FW_BAD_LENGTH:
74 ERROR(rt2x00dev,
75 "Invalid firmware file length (len=%zu)\n", fw->size);
76 goto exit;
77 case FW_BAD_VERSION:
78 ERROR(rt2x00dev,
79 "Current firmware does not support detected chipset.\n");
80 goto exit;
81 };
82
74 rt2x00dev->fw = fw; 83 rt2x00dev->fw = fw;
75 84
76 return 0; 85 return 0;
@@ -78,7 +87,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
78exit: 87exit:
79 release_firmware(fw); 88 release_firmware(fw);
80 89
81 return retval; 90 return -ENOENT;
82} 91}
83 92
84int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) 93int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index a0cd35b6beb..49671fed91d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -134,7 +134,7 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
134 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy)); 134 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
135 135
136 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { 136 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
137 snprintf(name, sizeof(name), "%s:radio", dev_name); 137 snprintf(name, sizeof(name), "%s::radio", dev_name);
138 138
139 retval = rt2x00leds_register_led(rt2x00dev, 139 retval = rt2x00leds_register_led(rt2x00dev,
140 &rt2x00dev->led_radio, 140 &rt2x00dev->led_radio,
@@ -144,7 +144,7 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
144 } 144 }
145 145
146 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { 146 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
147 snprintf(name, sizeof(name), "%s:assoc", dev_name); 147 snprintf(name, sizeof(name), "%s::assoc", dev_name);
148 148
149 retval = rt2x00leds_register_led(rt2x00dev, 149 retval = rt2x00leds_register_led(rt2x00dev,
150 &rt2x00dev->led_assoc, 150 &rt2x00dev->led_assoc,
@@ -154,7 +154,7 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
154 } 154 }
155 155
156 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { 156 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
157 snprintf(name, sizeof(name), "%s:quality", dev_name); 157 snprintf(name, sizeof(name), "%s::quality", dev_name);
158 158
159 retval = rt2x00leds_register_led(rt2x00dev, 159 retval = rt2x00leds_register_led(rt2x00dev,
160 &rt2x00dev->led_qual, 160 &rt2x00dev->led_qual,
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 9df4a49bdca..1046977e6a1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 86cd26fbf76..a631613177d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
33 * Both the link tuner as the rfkill will be called once per second. 33 * Both the link tuner as the rfkill will be called once per second.
34 */ 34 */
35#define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) ) 35#define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) )
36#define RFKILL_POLL_INTERVAL ( round_jiffies_relative(HZ) ) 36#define RFKILL_POLL_INTERVAL ( 1000 )
37 37
38/* 38/*
39 * rt2x00_rate: Per rate device information 39 * rt2x00_rate: Per rate device information
@@ -63,7 +63,6 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
63int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); 63int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
64void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); 64void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev);
65void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state); 65void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state);
66void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev);
67 66
68/* 67/*
69 * Initialization handlers. 68 * Initialization handlers.
@@ -77,7 +76,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev);
77void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 76void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
78 struct rt2x00_intf *intf, 77 struct rt2x00_intf *intf,
79 enum nl80211_iftype type, 78 enum nl80211_iftype type,
80 u8 *mac, u8 *bssid); 79 const u8 *mac, const u8 *bssid);
81void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 80void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
82 struct rt2x00_intf *intf, 81 struct rt2x00_intf *intf,
83 struct ieee80211_bss_conf *conf); 82 struct ieee80211_bss_conf *conf);
@@ -124,9 +123,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb);
124 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware 123 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
125 * @rt2x00dev: Pointer to &struct rt2x00_dev. 124 * @rt2x00dev: Pointer to &struct rt2x00_dev.
126 * @vif: Interface for which the beacon should be updated. 125 * @vif: Interface for which the beacon should be updated.
126 * @enable_beacon: Enable beaconing
127 */ 127 */
128int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 128int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
129 struct ieee80211_vif *vif); 129 struct ieee80211_vif *vif,
130 const bool enable_beacon);
130 131
131/** 132/**
132 * rt2x00queue_index_inc - Index incrementation function 133 * rt2x00queue_index_inc - Index incrementation function
@@ -140,6 +141,15 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
140void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index); 141void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
141 142
142/** 143/**
144 * rt2x00queue_stop_queues - Halt all data queues
145 * @rt2x00dev: Pointer to &struct rt2x00_dev.
146 *
147 * This function will loop through all available queues to stop
148 * any pending outgoing frames.
149 */
150void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev);
151
152/**
143 * rt2x00queue_init_queues - Initialize all data queues 153 * rt2x00queue_init_queues - Initialize all data queues
144 * @rt2x00dev: Pointer to &struct rt2x00_dev. 154 * @rt2x00dev: Pointer to &struct rt2x00_dev.
145 * 155 *
@@ -154,6 +164,81 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev);
154int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev); 164int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev);
155void rt2x00queue_free(struct rt2x00_dev *rt2x00dev); 165void rt2x00queue_free(struct rt2x00_dev *rt2x00dev);
156 166
167/**
168 * rt2x00link_update_stats - Update link statistics from RX frame
169 * @rt2x00dev: Pointer to &struct rt2x00_dev.
170 * @skb: Received frame
171 * @rxdesc: Received frame descriptor
172 *
173 * Update link statistics based on the information from the
174 * received frame descriptor.
175 */
176void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
177 struct sk_buff *skb,
178 struct rxdone_entry_desc *rxdesc);
179
180/**
181 * rt2x00link_calculate_signal - Calculate signal quality
182 * @rt2x00dev: Pointer to &struct rt2x00_dev.
183 * @rssi: RX Frame RSSI
184 *
185 * Calculate the signal quality of a frame based on the rssi
186 * measured during the receiving of the frame and the global
187 * link quality statistics measured since the start of the
188 * link tuning. The result is a value between 0 and 100 which
189 * is an indication of the signal quality.
190 */
191int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi);
192
193/**
194 * rt2x00link_start_tuner - Start periodic link tuner work
195 * @rt2x00dev: Pointer to &struct rt2x00_dev.
196 *
197 * This start the link tuner periodic work, this work will
198 * be executed periodically until &rt2x00link_stop_tuner has
199 * been called.
200 */
201void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev);
202
203/**
204 * rt2x00link_stop_tuner - Stop periodic link tuner work
205 * @rt2x00dev: Pointer to &struct rt2x00_dev.
206 *
207 * After this function completed the link tuner will not
208 * be running until &rt2x00link_start_tuner is called.
209 */
210void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev);
211
212/**
213 * rt2x00link_reset_tuner - Reset periodic link tuner work
214 * @rt2x00dev: Pointer to &struct rt2x00_dev.
215 * @antenna: Should the antenna tuning also be reset
216 *
217 * The VGC limit configured in the hardware will be reset to 0
218 * which forces the driver to rediscover the correct value for
219 * the current association. This is needed when configuration
220 * options have changed which could drastically change the
221 * SNR level or link quality (i.e. changing the antenna setting).
222 *
223 * Resetting the link tuner will also cause the periodic work counter
224 * to be reset. Any driver which has a fixed limit on the number
225 * of rounds the link tuner is supposed to work will accept the
226 * tuner actions again if this limit was previously reached.
227 *
228 * If @antenna is set to true a the software antenna diversity
229 * tuning will also be reset.
230 */
231void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
232
233/**
234 * rt2x00link_register - Initialize link tuning functionality
235 * @rt2x00dev: Pointer to &struct rt2x00_dev.
236 *
237 * Initialize work structure and all link tuning related
238 * paramters. This will not start the link tuning process itself.
239 */
240void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
241
157/* 242/*
158 * Firmware handlers. 243 * Firmware handlers.
159 */ 244 */
@@ -179,7 +264,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
179void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 264void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
180 enum rt2x00_dump_type type, struct sk_buff *skb); 265 enum rt2x00_dump_type type, struct sk_buff *skb);
181void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 266void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
182 enum cipher cipher, enum rx_crypto status); 267 struct rxdone_entry_desc *rxdesc);
183#else 268#else
184static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 269static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
185{ 270{
@@ -196,8 +281,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
196} 281}
197 282
198static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 283static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
199 enum cipher cipher, 284 struct rxdone_entry_desc *rxdesc)
200 enum rx_crypto status)
201{ 285{
202} 286}
203#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 287#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
@@ -209,7 +293,8 @@ static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
209enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key); 293enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
210void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, 294void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
211 struct txentry_desc *txdesc); 295 struct txentry_desc *txdesc);
212unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info); 296unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
297 struct sk_buff *skb);
213void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len); 298void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len);
214void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len); 299void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
215void rt2x00crypto_tx_insert_iv(struct sk_buff *skb); 300void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
@@ -227,7 +312,8 @@ static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
227{ 312{
228} 313}
229 314
230static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info) 315static inline unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
316 struct sk_buff *skb)
231{ 317{
232 return 0; 318 return 0;
233} 319}
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
new file mode 100644
index 00000000000..9223a6d1f1d
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -0,0 +1,461 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 generic link tuning routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32/*
33 * When we lack RSSI information return something less then -80 to
34 * tell the driver to tune the device to maximum sensitivity.
35 */
36#define DEFAULT_RSSI -128
37
38/*
39 * When no TX/RX percentage could be calculated due to lack of
40 * frames on the air, we fallback to a percentage of 50%.
41 * This will assure we will get at least get some decent value
42 * when the link tuner starts.
43 * The value will be dropped and overwritten with the correct (measured)
44 * value anyway during the first run of the link tuner.
45 */
46#define DEFAULT_PERCENTAGE 50
47
48/*
49 * Small helper macro to work with moving/walking averages.
50 * When adding a value to the average value the following calculation
51 * is needed:
52 *
53 * avg_rssi = ((avg_rssi * 7) + rssi) / 8;
54 *
55 * The advantage of this approach is that we only need 1 variable
56 * to store the average in (No need for a count and a total).
57 * But more importantly, normal average values will over time
58 * move less and less towards newly added values this results
59 * that with link tuning, the device can have a very good RSSI
60 * for a few minutes but when the device is moved away from the AP
61 * the average will not decrease fast enough to compensate.
62 * The walking average compensates this and will move towards
63 * the new values correctly allowing a effective link tuning.
64 */
65#define MOVING_AVERAGE(__avg, __val, __samples) \
66 ( (((__avg) * ((__samples) - 1)) + (__val)) / (__samples) )
67
68/*
69 * Small helper macro for percentage calculation
70 * This is a very simple macro with the only catch that it will
71 * produce a default value in case no total value was provided.
72 */
73#define PERCENTAGE(__value, __total) \
74 ( (__total) ? (((__value) * 100) / (__total)) : (DEFAULT_PERCENTAGE) )
75
76/*
77 * For calculating the Signal quality we have determined
78 * the total number of success and failed RX and TX frames.
79 * With the addition of the average RSSI value we can determine
80 * the link quality using the following algorithm:
81 *
82 * rssi_percentage = (avg_rssi * 100) / rssi_offset
83 * rx_percentage = (rx_success * 100) / rx_total
84 * tx_percentage = (tx_success * 100) / tx_total
85 * avg_signal = ((WEIGHT_RSSI * avg_rssi) +
86 * (WEIGHT_TX * tx_percentage) +
87 * (WEIGHT_RX * rx_percentage)) / 100
88 *
89 * This value should then be checked to not be greater then 100.
90 * This means the values of WEIGHT_RSSI, WEIGHT_RX, WEIGHT_TX must
91 * sum up to 100 as well.
92 */
93#define WEIGHT_RSSI 20
94#define WEIGHT_RX 40
95#define WEIGHT_TX 40
96
97static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
98{
99 struct link_ant *ant = &rt2x00dev->link.ant;
100
101 if (ant->rssi_ant && rt2x00dev->link.qual.rx_success)
102 return ant->rssi_ant;
103 return DEFAULT_RSSI;
104}
105
106static int rt2x00link_antenna_get_rssi_history(struct rt2x00_dev *rt2x00dev,
107 enum antenna antenna)
108{
109 struct link_ant *ant = &rt2x00dev->link.ant;
110
111 if (ant->rssi_history[antenna - ANTENNA_A])
112 return ant->rssi_history[antenna - ANTENNA_A];
113 return DEFAULT_RSSI;
114}
115/* Small wrapper for rt2x00link_antenna_get_rssi_history() */
116#define rt2x00link_antenna_get_rssi_rx_history(__dev) \
117 rt2x00link_antenna_get_rssi_history((__dev), \
118 (__dev)->link.ant.active.rx)
119#define rt2x00link_antenna_get_rssi_tx_history(__dev) \
120 rt2x00link_antenna_get_rssi_history((__dev), \
121 (__dev)->link.ant.active.tx)
122
123static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
124 enum antenna antenna,
125 int rssi)
126{
127 struct link_ant *ant = &rt2x00dev->link.ant;
128 ant->rssi_history[ant->active.rx - ANTENNA_A] = rssi;
129}
130/* Small wrapper for rt2x00link_antenna_get_rssi_history() */
131#define rt2x00link_antenna_update_rssi_rx_history(__dev, __rssi) \
132 rt2x00link_antenna_update_rssi_history((__dev), \
133 (__dev)->link.ant.active.rx, \
134 (__rssi))
135#define rt2x00link_antenna_update_rssi_tx_history(__dev, __rssi) \
136 rt2x00link_antenna_update_rssi_history((__dev), \
137 (__dev)->link.ant.active.tx, \
138 (__rssi))
139
140static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
141{
142 rt2x00dev->link.ant.rssi_ant = 0;
143}
144
145static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
146{
147 struct link_ant *ant = &rt2x00dev->link.ant;
148 struct antenna_setup new_ant;
149 int sample_a = rt2x00link_antenna_get_rssi_history(rt2x00dev, ANTENNA_A);
150 int sample_b = rt2x00link_antenna_get_rssi_history(rt2x00dev, ANTENNA_B);
151
152 memcpy(&new_ant, &ant->active, sizeof(new_ant));
153
154 /*
155 * We are done sampling. Now we should evaluate the results.
156 */
157 ant->flags &= ~ANTENNA_MODE_SAMPLE;
158
159 /*
160 * During the last period we have sampled the RSSI
161 * from both antenna's. It now is time to determine
162 * which antenna demonstrated the best performance.
163 * When we are already on the antenna with the best
164 * performance, then there really is nothing for us
165 * left to do.
166 */
167 if (sample_a == sample_b)
168 return;
169
170 if (ant->flags & ANTENNA_RX_DIVERSITY)
171 new_ant.rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
172
173 if (ant->flags & ANTENNA_TX_DIVERSITY)
174 new_ant.tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
175
176 rt2x00lib_config_antenna(rt2x00dev, &new_ant);
177}
178
179static void rt2x00lib_antenna_diversity_eval(struct rt2x00_dev *rt2x00dev)
180{
181 struct link_ant *ant = &rt2x00dev->link.ant;
182 struct antenna_setup new_ant;
183 int rssi_curr;
184 int rssi_old;
185
186 memcpy(&new_ant, &ant->active, sizeof(new_ant));
187
188 /*
189 * Get current RSSI value along with the historical value,
190 * after that update the history with the current value.
191 */
192 rssi_curr = rt2x00link_antenna_get_link_rssi(rt2x00dev);
193 rssi_old = rt2x00link_antenna_get_rssi_rx_history(rt2x00dev);
194 rt2x00link_antenna_update_rssi_rx_history(rt2x00dev, rssi_curr);
195
196 /*
197 * Legacy driver indicates that we should swap antenna's
198 * when the difference in RSSI is greater that 5. This
199 * also should be done when the RSSI was actually better
200 * then the previous sample.
201 * When the difference exceeds the threshold we should
202 * sample the rssi from the other antenna to make a valid
203 * comparison between the 2 antennas.
204 */
205 if (abs(rssi_curr - rssi_old) < 5)
206 return;
207
208 ant->flags |= ANTENNA_MODE_SAMPLE;
209
210 if (ant->flags & ANTENNA_RX_DIVERSITY)
211 new_ant.rx = (new_ant.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
212
213 if (ant->flags & ANTENNA_TX_DIVERSITY)
214 new_ant.tx = (new_ant.tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
215
216 rt2x00lib_config_antenna(rt2x00dev, &new_ant);
217}
218
219static void rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
220{
221 struct link_ant *ant = &rt2x00dev->link.ant;
222
223 /*
224 * Determine if software diversity is enabled for
225 * either the TX or RX antenna (or both).
226 * Always perform this check since within the link
227 * tuner interval the configuration might have changed.
228 */
229 ant->flags &= ~ANTENNA_RX_DIVERSITY;
230 ant->flags &= ~ANTENNA_TX_DIVERSITY;
231
232 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
233 ant->flags |= ANTENNA_RX_DIVERSITY;
234 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
235 ant->flags |= ANTENNA_TX_DIVERSITY;
236
237 if (!(ant->flags & ANTENNA_RX_DIVERSITY) &&
238 !(ant->flags & ANTENNA_TX_DIVERSITY)) {
239 ant->flags = 0;
240 return;
241 }
242
243 /*
244 * If we have only sampled the data over the last period
245 * we should now harvest the data. Otherwise just evaluate
246 * the data. The latter should only be performed once
247 * every 2 seconds.
248 */
249 if (ant->flags & ANTENNA_MODE_SAMPLE)
250 rt2x00lib_antenna_diversity_sample(rt2x00dev);
251 else if (rt2x00dev->link.count & 1)
252 rt2x00lib_antenna_diversity_eval(rt2x00dev);
253}
254
255void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
256 struct sk_buff *skb,
257 struct rxdone_entry_desc *rxdesc)
258{
259 struct link *link = &rt2x00dev->link;
260 struct link_qual *qual = &rt2x00dev->link.qual;
261 struct link_ant *ant = &rt2x00dev->link.ant;
262 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
263 int avg_rssi = rxdesc->rssi;
264 int ant_rssi = rxdesc->rssi;
265
266 /*
267 * Frame was received successfully since non-succesfull
268 * frames would have been dropped by the hardware.
269 */
270 qual->rx_success++;
271
272 /*
273 * We are only interested in quality statistics from
274 * beacons which came from the BSS which we are
275 * associated with.
276 */
277 if (!ieee80211_is_beacon(hdr->frame_control) ||
278 !(rxdesc->dev_flags & RXDONE_MY_BSS))
279 return;
280
281 /*
282 * Update global RSSI
283 */
284 if (link->avg_rssi)
285 avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi, 8);
286 link->avg_rssi = avg_rssi;
287
288 /*
289 * Update antenna RSSI
290 */
291 if (ant->rssi_ant)
292 ant_rssi = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi, 8);
293 ant->rssi_ant = ant_rssi;
294}
295
296static void rt2x00link_precalculate_signal(struct rt2x00_dev *rt2x00dev)
297{
298 struct link *link = &rt2x00dev->link;
299 struct link_qual *qual = &rt2x00dev->link.qual;
300
301 link->rx_percentage =
302 PERCENTAGE(qual->rx_success, qual->rx_failed + qual->rx_success);
303 link->tx_percentage =
304 PERCENTAGE(qual->tx_success, qual->tx_failed + qual->tx_success);
305
306 qual->rx_success = 0;
307 qual->rx_failed = 0;
308 qual->tx_success = 0;
309 qual->tx_failed = 0;
310}
311
312int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi)
313{
314 struct link *link = &rt2x00dev->link;
315 int rssi_percentage = 0;
316 int signal;
317
318 /*
319 * We need a positive value for the RSSI.
320 */
321 if (rssi < 0)
322 rssi += rt2x00dev->rssi_offset;
323
324 /*
325 * Calculate the different percentages,
326 * which will be used for the signal.
327 */
328 rssi_percentage = PERCENTAGE(rssi, rt2x00dev->rssi_offset);
329
330 /*
331 * Add the individual percentages and use the WEIGHT
332 * defines to calculate the current link signal.
333 */
334 signal = ((WEIGHT_RSSI * rssi_percentage) +
335 (WEIGHT_TX * link->tx_percentage) +
336 (WEIGHT_RX * link->rx_percentage)) / 100;
337
338 return max_t(int, signal, 100);
339}
340
341void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
342{
343 struct link *link = &rt2x00dev->link;
344
345 /*
346 * Link tuning should only be performed when
347 * an active sta or master interface exists.
348 * Single monitor mode interfaces should never have
349 * work with link tuners.
350 */
351 if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
352 return;
353
354 link->rx_percentage = DEFAULT_PERCENTAGE;
355 link->tx_percentage = DEFAULT_PERCENTAGE;
356
357 rt2x00link_reset_tuner(rt2x00dev, false);
358
359 queue_delayed_work(rt2x00dev->hw->workqueue,
360 &link->work, LINK_TUNE_INTERVAL);
361}
362
363void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev)
364{
365 cancel_delayed_work_sync(&rt2x00dev->link.work);
366}
367
368void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
369{
370 struct link_qual *qual = &rt2x00dev->link.qual;
371
372 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
373 return;
374
375 /*
376 * Reset link information.
377 * Both the currently active vgc level as well as
378 * the link tuner counter should be reset. Resetting
379 * the counter is important for devices where the
380 * device should only perform link tuning during the
381 * first minute after being enabled.
382 */
383 rt2x00dev->link.count = 0;
384 memset(qual, 0, sizeof(*qual));
385
386 /*
387 * Reset the link tuner.
388 */
389 rt2x00dev->ops->lib->reset_tuner(rt2x00dev, qual);
390
391 if (antenna)
392 rt2x00link_antenna_reset(rt2x00dev);
393}
394
395static void rt2x00link_tuner(struct work_struct *work)
396{
397 struct rt2x00_dev *rt2x00dev =
398 container_of(work, struct rt2x00_dev, link.work.work);
399 struct link *link = &rt2x00dev->link;
400 struct link_qual *qual = &rt2x00dev->link.qual;
401
402 /*
403 * When the radio is shutting down we should
404 * immediately cease all link tuning.
405 */
406 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
407 return;
408
409 /*
410 * Update statistics.
411 */
412 rt2x00dev->ops->lib->link_stats(rt2x00dev, qual);
413 rt2x00dev->low_level_stats.dot11FCSErrorCount += qual->rx_failed;
414
415 /*
416 * Update quality RSSI for link tuning,
417 * when we have received some frames and we managed to
418 * collect the RSSI data we could use this. Otherwise we
419 * must fallback to the default RSSI value.
420 */
421 if (!link->avg_rssi || !qual->rx_success)
422 qual->rssi = DEFAULT_RSSI;
423 else
424 qual->rssi = link->avg_rssi;
425
426 /*
427 * Only perform the link tuning when Link tuning
428 * has been enabled (This could have been disabled from the EEPROM).
429 */
430 if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags))
431 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
432
433 /*
434 * Precalculate a portion of the link signal which is
435 * in based on the tx/rx success/failure counters.
436 */
437 rt2x00link_precalculate_signal(rt2x00dev);
438
439 /*
440 * Send a signal to the led to update the led signal strength.
441 */
442 rt2x00leds_led_quality(rt2x00dev, link->avg_rssi);
443
444 /*
445 * Evaluate antenna setup, make this the last step since this could
446 * possibly reset some statistics.
447 */
448 rt2x00lib_antenna_diversity(rt2x00dev);
449
450 /*
451 * Increase tuner counter, and reschedule the next link tuner run.
452 */
453 link->count++;
454 queue_delayed_work(rt2x00dev->hw->workqueue,
455 &link->work, LINK_TUNE_INTERVAL);
456}
457
458void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
459{
460 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
461}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 38edee5fe16..c41a0b9e473 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -79,8 +79,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
79 * RTS/CTS frame should use the length of the frame plus any 79 * RTS/CTS frame should use the length of the frame plus any
80 * encryption overhead that will be added by the hardware. 80 * encryption overhead that will be added by the hardware.
81 */ 81 */
82 if (!frag_skb->do_not_encrypt) 82 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
83 data_length += rt2x00crypto_tx_overhead(tx_info);
84 83
85 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 84 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
86 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, 85 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
@@ -226,6 +225,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
226 break; 225 break;
227 case NL80211_IFTYPE_STATION: 226 case NL80211_IFTYPE_STATION:
228 case NL80211_IFTYPE_ADHOC: 227 case NL80211_IFTYPE_ADHOC:
228 case NL80211_IFTYPE_MESH_POINT:
229 case NL80211_IFTYPE_WDS:
229 /* 230 /*
230 * We don't support mixed combinations of 231 * We don't support mixed combinations of
231 * sta and ap interfaces. 232 * sta and ap interfaces.
@@ -430,8 +431,10 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
430 /* 431 /*
431 * Update the beacon. 432 * Update the beacon.
432 */ 433 */
433 if (conf->changed & IEEE80211_IFCC_BEACON) 434 if (conf->changed & (IEEE80211_IFCC_BEACON |
434 status = rt2x00queue_update_beacon(rt2x00dev, vif); 435 IEEE80211_IFCC_BEACON_ENABLED))
436 status = rt2x00queue_update_beacon(rt2x00dev, vif,
437 conf->enable_beacon);
435 438
436 return status; 439 return status;
437} 440}
@@ -482,16 +485,36 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
482EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); 485EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
483 486
484#ifdef CONFIG_RT2X00_LIB_CRYPTO 487#ifdef CONFIG_RT2X00_LIB_CRYPTO
488static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len)
489{
490 if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
491 memcpy(&crypto->key,
492 &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
493 sizeof(crypto->key));
494
495 if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
496 memcpy(&crypto->tx_mic,
497 &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
498 sizeof(crypto->tx_mic));
499
500 if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
501 memcpy(&crypto->rx_mic,
502 &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
503 sizeof(crypto->rx_mic));
504}
505
485int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 506int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
486 const u8 *local_address, const u8 *address, 507 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
487 struct ieee80211_key_conf *key) 508 struct ieee80211_key_conf *key)
488{ 509{
489 struct rt2x00_dev *rt2x00dev = hw->priv; 510 struct rt2x00_dev *rt2x00dev = hw->priv;
490 struct ieee80211_sta *sta; 511 struct rt2x00_intf *intf = vif_to_intf(vif);
491 int (*set_key) (struct rt2x00_dev *rt2x00dev, 512 int (*set_key) (struct rt2x00_dev *rt2x00dev,
492 struct rt2x00lib_crypto *crypto, 513 struct rt2x00lib_crypto *crypto,
493 struct ieee80211_key_conf *key); 514 struct ieee80211_key_conf *key);
494 struct rt2x00lib_crypto crypto; 515 struct rt2x00lib_crypto crypto;
516 static const u8 bcast_addr[ETH_ALEN] =
517 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, };
495 518
496 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 519 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
497 return 0; 520 return 0;
@@ -509,45 +532,25 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
509 if (rt2x00dev->intf_sta_count) 532 if (rt2x00dev->intf_sta_count)
510 crypto.bssidx = 0; 533 crypto.bssidx = 0;
511 else 534 else
512 crypto.bssidx = 535 crypto.bssidx = intf->mac[5] & (rt2x00dev->ops->max_ap_intf - 1);
513 local_address[5] & (rt2x00dev->ops->max_ap_intf - 1);
514 536
515 crypto.cipher = rt2x00crypto_key_to_cipher(key); 537 crypto.cipher = rt2x00crypto_key_to_cipher(key);
516 if (crypto.cipher == CIPHER_NONE) 538 if (crypto.cipher == CIPHER_NONE)
517 return -EOPNOTSUPP; 539 return -EOPNOTSUPP;
518 540
519 crypto.cmd = cmd; 541 crypto.cmd = cmd;
520 crypto.address = address;
521
522 if (crypto.cipher == CIPHER_TKIP) {
523 if (key->keylen > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
524 memcpy(&crypto.key,
525 &key->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
526 sizeof(crypto.key));
527
528 if (key->keylen > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
529 memcpy(&crypto.tx_mic,
530 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
531 sizeof(crypto.tx_mic));
532
533 if (key->keylen > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
534 memcpy(&crypto.rx_mic,
535 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
536 sizeof(crypto.rx_mic));
537 } else
538 memcpy(&crypto.key, &key->key[0], key->keylen);
539 542
540 /* 543 if (sta) {
541 * Discover the Association ID from mac80211. 544 /* some drivers need the AID */
542 * Some drivers need this information when updating the
543 * hardware key (either adding or removing).
544 */
545 rcu_read_lock();
546 sta = ieee80211_find_sta(hw, address);
547 if (sta)
548 crypto.aid = sta->aid; 545 crypto.aid = sta->aid;
549 rcu_read_unlock(); 546 crypto.address = sta->addr;
547 } else
548 crypto.address = bcast_addr;
550 549
550 if (crypto.cipher == CIPHER_TKIP)
551 memcpy_tkip(&crypto, &key->key[0], key->keylen);
552 else
553 memcpy(&crypto.key, &key->key[0], key->keylen);
551 /* 554 /*
552 * Each BSS has a maximum of 4 shared keys. 555 * Each BSS has a maximum of 4 shared keys.
553 * Shared key index values: 556 * Shared key index values:
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index d52b22b82d1..e616c20d4a7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 9c0a4d77bc1..15a12487e04 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 0709decec9c..a5664bd8493 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -148,20 +148,105 @@ void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
148 dev_kfree_skb_any(skb); 148 dev_kfree_skb_any(skb);
149} 149}
150 150
151static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
152 struct txentry_desc *txdesc)
153{
154 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
155 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
156 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
157 unsigned long irqflags;
158
159 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
160 unlikely(!tx_info->control.vif))
161 return;
162
163 /*
164 * Hardware should insert sequence counter.
165 * FIXME: We insert a software sequence counter first for
166 * hardware that doesn't support hardware sequence counting.
167 *
168 * This is wrong because beacons are not getting sequence
169 * numbers assigned properly.
170 *
171 * A secondary problem exists for drivers that cannot toggle
172 * sequence counting per-frame, since those will override the
173 * sequence counter given by mac80211.
174 */
175 spin_lock_irqsave(&intf->seqlock, irqflags);
176
177 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
178 intf->seqno += 0x10;
179 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
180 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
181
182 spin_unlock_irqrestore(&intf->seqlock, irqflags);
183
184 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
185}
186
187static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
188 struct txentry_desc *txdesc,
189 const struct rt2x00_rate *hwrate)
190{
191 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
192 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
193 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
194 unsigned int data_length;
195 unsigned int duration;
196 unsigned int residual;
197
198 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
199 data_length = entry->skb->len + 4;
200 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
201
202 /*
203 * PLCP setup
204 * Length calculation depends on OFDM/CCK rate.
205 */
206 txdesc->signal = hwrate->plcp;
207 txdesc->service = 0x04;
208
209 if (hwrate->flags & DEV_RATE_OFDM) {
210 txdesc->length_high = (data_length >> 6) & 0x3f;
211 txdesc->length_low = data_length & 0x3f;
212 } else {
213 /*
214 * Convert length to microseconds.
215 */
216 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
217 duration = GET_DURATION(data_length, hwrate->bitrate);
218
219 if (residual != 0) {
220 duration++;
221
222 /*
223 * Check if we need to set the Length Extension
224 */
225 if (hwrate->bitrate == 110 && residual <= 30)
226 txdesc->service |= 0x80;
227 }
228
229 txdesc->length_high = (duration >> 8) & 0xff;
230 txdesc->length_low = duration & 0xff;
231
232 /*
233 * When preamble is enabled we should set the
234 * preamble bit for the signal.
235 */
236 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
237 txdesc->signal |= 0x08;
238 }
239}
240
151static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 241static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
152 struct txentry_desc *txdesc) 242 struct txentry_desc *txdesc)
153{ 243{
154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 244 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 245 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 246 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
157 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
158 struct ieee80211_rate *rate = 247 struct ieee80211_rate *rate =
159 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 248 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
160 const struct rt2x00_rate *hwrate; 249 const struct rt2x00_rate *hwrate;
161 unsigned int data_length;
162 unsigned int duration;
163 unsigned int residual;
164 unsigned long irqflags;
165 250
166 memset(txdesc, 0, sizeof(*txdesc)); 251 memset(txdesc, 0, sizeof(*txdesc));
167 252
@@ -173,27 +258,12 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
173 txdesc->cw_max = entry->queue->cw_max; 258 txdesc->cw_max = entry->queue->cw_max;
174 txdesc->aifs = entry->queue->aifs; 259 txdesc->aifs = entry->queue->aifs;
175 260
176 /* Data length + CRC */
177 data_length = entry->skb->len + 4;
178
179 /* 261 /*
180 * Check whether this frame is to be acked. 262 * Check whether this frame is to be acked.
181 */ 263 */
182 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 264 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
183 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 265 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
184 266
185 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
186 !entry->skb->do_not_encrypt) {
187 /* Apply crypto specific descriptor information */
188 rt2x00crypto_create_tx_descriptor(entry, txdesc);
189
190 /*
191 * Extend frame length to include all encryption overhead
192 * that will be added by the hardware.
193 */
194 data_length += rt2x00crypto_tx_overhead(tx_info);
195 }
196
197 /* 267 /*
198 * Check if this is a RTS/CTS frame 268 * Check if this is a RTS/CTS frame
199 */ 269 */
@@ -237,86 +307,27 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
237 * Set ifs to IFS_SIFS when the this is not the first fragment, 307 * Set ifs to IFS_SIFS when the this is not the first fragment,
238 * or this fragment came after RTS/CTS. 308 * or this fragment came after RTS/CTS.
239 */ 309 */
240 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { 310 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
241 txdesc->ifs = IFS_SIFS; 311 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
242 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
243 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 312 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
244 txdesc->ifs = IFS_BACKOFF; 313 txdesc->ifs = IFS_BACKOFF;
245 } else { 314 } else
246 txdesc->ifs = IFS_SIFS; 315 txdesc->ifs = IFS_SIFS;
247 }
248 316
249 /* 317 /*
250 * Hardware should insert sequence counter. 318 * Determine rate modulation.
251 * FIXME: We insert a software sequence counter first for
252 * hardware that doesn't support hardware sequence counting.
253 *
254 * This is wrong because beacons are not getting sequence
255 * numbers assigned properly.
256 *
257 * A secondary problem exists for drivers that cannot toggle
258 * sequence counting per-frame, since those will override the
259 * sequence counter given by mac80211.
260 */ 319 */
261 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 320 hwrate = rt2x00_get_rate(rate->hw_value);
262 if (likely(tx_info->control.vif)) { 321 txdesc->rate_mode = RATE_MODE_CCK;
263 struct rt2x00_intf *intf; 322 if (hwrate->flags & DEV_RATE_OFDM)
264 323 txdesc->rate_mode = RATE_MODE_OFDM;
265 intf = vif_to_intf(tx_info->control.vif);
266
267 spin_lock_irqsave(&intf->seqlock, irqflags);
268
269 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
270 intf->seqno += 0x10;
271 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
272 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
273
274 spin_unlock_irqrestore(&intf->seqlock, irqflags);
275
276 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
277 }
278 }
279 324
280 /* 325 /*
281 * PLCP setup 326 * Apply TX descriptor handling by components
282 * Length calculation depends on OFDM/CCK rate.
283 */ 327 */
284 hwrate = rt2x00_get_rate(rate->hw_value); 328 rt2x00crypto_create_tx_descriptor(entry, txdesc);
285 txdesc->signal = hwrate->plcp; 329 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
286 txdesc->service = 0x04; 330 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
287
288 if (hwrate->flags & DEV_RATE_OFDM) {
289 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
290
291 txdesc->length_high = (data_length >> 6) & 0x3f;
292 txdesc->length_low = data_length & 0x3f;
293 } else {
294 /*
295 * Convert length to microseconds.
296 */
297 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
298 duration = GET_DURATION(data_length, hwrate->bitrate);
299
300 if (residual != 0) {
301 duration++;
302
303 /*
304 * Check if we need to set the Length Extension
305 */
306 if (hwrate->bitrate == 110 && residual <= 30)
307 txdesc->service |= 0x80;
308 }
309
310 txdesc->length_high = (duration >> 8) & 0xff;
311 txdesc->length_low = duration & 0xff;
312
313 /*
314 * When preamble is enabled we should set the
315 * preamble bit for the signal.
316 */
317 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
318 txdesc->signal |= 0x08;
319 }
320} 331}
321 332
322static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 333static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
@@ -403,7 +414,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
403 */ 414 */
404 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 415 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
405 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 416 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
406 if (test_bit(CONFIG_CRYPTO_COPY_IV, &queue->rt2x00dev->flags)) 417 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
407 rt2x00crypto_tx_copy_iv(skb, iv_len); 418 rt2x00crypto_tx_copy_iv(skb, iv_len);
408 else 419 else
409 rt2x00crypto_tx_remove_iv(skb, iv_len); 420 rt2x00crypto_tx_remove_iv(skb, iv_len);
@@ -432,7 +443,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
432} 443}
433 444
434int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 445int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
435 struct ieee80211_vif *vif) 446 struct ieee80211_vif *vif,
447 const bool enable_beacon)
436{ 448{
437 struct rt2x00_intf *intf = vif_to_intf(vif); 449 struct rt2x00_intf *intf = vif_to_intf(vif);
438 struct skb_frame_desc *skbdesc; 450 struct skb_frame_desc *skbdesc;
@@ -442,6 +454,11 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
442 if (unlikely(!intf->beacon)) 454 if (unlikely(!intf->beacon))
443 return -ENOBUFS; 455 return -ENOBUFS;
444 456
457 if (!enable_beacon) {
458 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
459 return 0;
460 }
461
445 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 462 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
446 if (!intf->beacon->skb) 463 if (!intf->beacon->skb)
447 return -ENOMEM; 464 return -ENOMEM;
@@ -490,6 +507,9 @@ struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
490{ 507{
491 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 508 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
492 509
510 if (queue == QID_RX)
511 return rt2x00dev->rx;
512
493 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) 513 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
494 return &rt2x00dev->tx[queue]; 514 return &rt2x00dev->tx[queue];
495 515
@@ -566,6 +586,14 @@ static void rt2x00queue_reset(struct data_queue *queue)
566 spin_unlock_irqrestore(&queue->lock, irqflags); 586 spin_unlock_irqrestore(&queue->lock, irqflags);
567} 587}
568 588
589void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
590{
591 struct data_queue *queue;
592
593 txall_queue_for_each(rt2x00dev, queue)
594 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
595}
596
569void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 597void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
570{ 598{
571 struct data_queue *queue; 599 struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 28293715340..97e2ab08f08 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -158,6 +158,14 @@ enum rxdone_entry_desc_flags {
158}; 158};
159 159
160/** 160/**
161 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
162 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
163 * from &rxdone_entry_desc to a signal value type.
164 */
165#define RXDONE_SIGNAL_MASK \
166 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE )
167
168/**
161 * struct rxdone_entry_desc: RX Entry descriptor 169 * struct rxdone_entry_desc: RX Entry descriptor
162 * 170 *
163 * Summary of information that has been read from the RX frame descriptor. 171 * Summary of information that has been read from the RX frame descriptor.
@@ -165,6 +173,7 @@ enum rxdone_entry_desc_flags {
165 * @timestamp: RX Timestamp 173 * @timestamp: RX Timestamp
166 * @signal: Signal of the received frame. 174 * @signal: Signal of the received frame.
167 * @rssi: RSSI of the received frame. 175 * @rssi: RSSI of the received frame.
176 * @noise: Measured noise during frame reception.
168 * @size: Data size of the received frame. 177 * @size: Data size of the received frame.
169 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 178 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
170 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 179 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -177,6 +186,7 @@ struct rxdone_entry_desc {
177 u64 timestamp; 186 u64 timestamp;
178 int signal; 187 int signal;
179 int rssi; 188 int rssi;
189 int noise;
180 int size; 190 int size;
181 int flags; 191 int flags;
182 int dev_flags; 192 int dev_flags;
@@ -222,7 +232,6 @@ struct txdone_entry_desc {
222 * 232 *
223 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 233 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
224 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame. 234 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
225 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
226 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter. 235 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
227 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame. 236 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
228 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 237 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
@@ -238,7 +247,6 @@ struct txdone_entry_desc {
238enum txentry_desc_flags { 247enum txentry_desc_flags {
239 ENTRY_TXD_RTS_FRAME, 248 ENTRY_TXD_RTS_FRAME,
240 ENTRY_TXD_CTS_FRAME, 249 ENTRY_TXD_CTS_FRAME,
241 ENTRY_TXD_OFDM_RATE,
242 ENTRY_TXD_GENERATE_SEQ, 250 ENTRY_TXD_GENERATE_SEQ,
243 ENTRY_TXD_FIRST_FRAGMENT, 251 ENTRY_TXD_FIRST_FRAGMENT,
244 ENTRY_TXD_MORE_FRAG, 252 ENTRY_TXD_MORE_FRAG,
@@ -263,6 +271,7 @@ enum txentry_desc_flags {
263 * @length_low: PLCP length low word. 271 * @length_low: PLCP length low word.
264 * @signal: PLCP signal. 272 * @signal: PLCP signal.
265 * @service: PLCP service. 273 * @service: PLCP service.
274 * @rate_mode: Rate mode (See @enum rate_modulation).
266 * @retry_limit: Max number of retries. 275 * @retry_limit: Max number of retries.
267 * @aifs: AIFS value. 276 * @aifs: AIFS value.
268 * @ifs: IFS value. 277 * @ifs: IFS value.
@@ -282,6 +291,8 @@ struct txentry_desc {
282 u16 signal; 291 u16 signal;
283 u16 service; 292 u16 service;
284 293
294 u16 rate_mode;
295
285 short retry_limit; 296 short retry_limit;
286 short aifs; 297 short aifs;
287 short ifs; 298 short ifs;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index c2fba7c9f05..861322d97fc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -125,6 +125,26 @@ enum cipher {
125}; 125};
126 126
127/* 127/*
128 * Rate modulations
129 */
130enum rate_modulation {
131 RATE_MODE_CCK = 0,
132 RATE_MODE_OFDM = 1,
133 RATE_MODE_HT_MIX = 2,
134 RATE_MODE_HT_GREENFIELD = 3,
135};
136
137/*
138 * Firmware validation error codes
139 */
140enum firmware_errors {
141 FW_OK,
142 FW_BAD_CRC,
143 FW_BAD_LENGTH,
144 FW_BAD_VERSION,
145};
146
147/*
128 * Register handlers. 148 * Register handlers.
129 * We store the position of a register field inside a field structure, 149 * We store the position of a register field inside a field structure,
130 * This will simplify the process of setting and reading a certain field 150 * This will simplify the process of setting and reading a certain field
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index 3298cae1e12..b6d4c6700bf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -25,73 +25,30 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/rfkill.h>
29 28
30#include "rt2x00.h" 29#include "rt2x00.h"
31#include "rt2x00lib.h" 30#include "rt2x00lib.h"
32 31
33static int rt2x00rfkill_toggle_radio(void *data, enum rfkill_state state) 32static void rt2x00rfkill_poll(struct input_polled_dev *poll_dev)
34{ 33{
35 struct rt2x00_dev *rt2x00dev = data; 34 struct rt2x00_dev *rt2x00dev = poll_dev->private;
36 int retval = 0; 35 int state, old_state;
37
38 if (unlikely(!rt2x00dev))
39 return 0;
40
41 /*
42 * Only continue if there are enabled interfaces.
43 */
44 if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
45 return 0;
46
47 if (state == RFKILL_STATE_UNBLOCKED) {
48 INFO(rt2x00dev, "RFKILL event: enabling radio.\n");
49 clear_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
50 retval = rt2x00lib_enable_radio(rt2x00dev);
51 } else if (state == RFKILL_STATE_SOFT_BLOCKED) {
52 INFO(rt2x00dev, "RFKILL event: disabling radio.\n");
53 set_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
54 rt2x00lib_disable_radio(rt2x00dev);
55 } else {
56 WARNING(rt2x00dev, "RFKILL event: unknown state %d.\n", state);
57 }
58
59 return retval;
60}
61
62static int rt2x00rfkill_get_state(void *data, enum rfkill_state *state)
63{
64 struct rt2x00_dev *rt2x00dev = data;
65
66 /*
67 * rfkill_poll reports 1 when the key has been pressed and the
68 * radio should be blocked.
69 */
70 *state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
71 RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
72
73 return 0;
74}
75
76static void rt2x00rfkill_poll(struct work_struct *work)
77{
78 struct rt2x00_dev *rt2x00dev =
79 container_of(work, struct rt2x00_dev, rfkill_work.work);
80 enum rfkill_state state;
81 36
82 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state) || 37 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state) ||
83 !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) 38 !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
84 return; 39 return;
85 40
86 /* 41 /*
87 * Poll latest state and report it to rfkill who should sort 42 * Poll latest state, if the state is different then the previous state,
88 * out if the state should be toggled or not. 43 * we should generate an input event.
89 */ 44 */
90 if (!rt2x00rfkill_get_state(rt2x00dev, &state)) 45 state = !!rt2x00dev->ops->lib->rfkill_poll(rt2x00dev);
91 rfkill_force_state(rt2x00dev->rfkill, state); 46 old_state = !!test_bit(RFKILL_STATE_BLOCKED, &rt2x00dev->rfkill_state);
92 47
93 queue_delayed_work(rt2x00dev->hw->workqueue, 48 if (old_state != state) {
94 &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL); 49 input_report_switch(poll_dev->input, SW_RFKILL_ALL, state);
50 change_bit(RFKILL_STATE_BLOCKED, &rt2x00dev->rfkill_state);
51 }
95} 52}
96 53
97void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) 54void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
@@ -100,8 +57,8 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
100 test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 57 test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
101 return; 58 return;
102 59
103 if (rfkill_register(rt2x00dev->rfkill)) { 60 if (input_register_polled_device(rt2x00dev->rfkill_poll_dev)) {
104 ERROR(rt2x00dev, "Failed to register rfkill handler.\n"); 61 ERROR(rt2x00dev, "Failed to register polled device.\n");
105 return; 62 return;
106 } 63 }
107 64
@@ -109,10 +66,10 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
109 66
110 /* 67 /*
111 * Force initial poll which will detect the initial device state, 68 * Force initial poll which will detect the initial device state,
112 * and correctly sends the signal to the rfkill layer about this 69 * and correctly sends the signal to the input layer about this
113 * state. 70 * state.
114 */ 71 */
115 rt2x00rfkill_poll(&rt2x00dev->rfkill_work.work); 72 rt2x00rfkill_poll(rt2x00dev->rfkill_poll_dev);
116} 73}
117 74
118void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) 75void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
@@ -121,52 +78,50 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
121 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 78 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
122 return; 79 return;
123 80
124 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 81 input_unregister_polled_device(rt2x00dev->rfkill_poll_dev);
125
126 rfkill_unregister(rt2x00dev->rfkill);
127 82
128 __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state); 83 __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state);
129} 84}
130 85
131void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) 86void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
132{ 87{
133 struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy); 88 struct input_polled_dev *poll_dev;
134 89
135 if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) 90 if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
91 !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
136 return; 92 return;
137 93
138 rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN); 94 poll_dev = input_allocate_polled_device();
139 if (!rt2x00dev->rfkill) { 95 if (!poll_dev) {
140 ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); 96 ERROR(rt2x00dev, "Failed to allocate polled device.\n");
141 return; 97 return;
142 } 98 }
143 99
144 __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state); 100 poll_dev->private = rt2x00dev;
101 poll_dev->poll = rt2x00rfkill_poll;
102 poll_dev->poll_interval = RFKILL_POLL_INTERVAL;
145 103
146 rt2x00dev->rfkill->name = rt2x00dev->ops->name; 104 poll_dev->input->name = rt2x00dev->ops->name;
147 rt2x00dev->rfkill->data = rt2x00dev; 105 poll_dev->input->phys = wiphy_name(rt2x00dev->hw->wiphy);
148 rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; 106 poll_dev->input->id.bustype = BUS_HOST;
149 if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) { 107 poll_dev->input->id.vendor = 0x1814;
150 rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; 108 poll_dev->input->id.product = rt2x00dev->chip.rt;
151 rt2x00dev->rfkill->state = 109 poll_dev->input->id.version = rt2x00dev->chip.rev;
152 rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? 110 poll_dev->input->dev.parent = wiphy_dev(rt2x00dev->hw->wiphy);
153 RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; 111 poll_dev->input->evbit[0] = BIT(EV_SW);
154 } else { 112 poll_dev->input->swbit[0] = BIT(SW_RFKILL_ALL);
155 rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED;
156 }
157 113
158 INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); 114 rt2x00dev->rfkill_poll_dev = poll_dev;
159 115
160 return; 116 __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
161} 117}
162 118
163void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 119void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
164{ 120{
165 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) 121 if (!__test_and_clear_bit(RFKILL_STATE_ALLOCATED,
122 &rt2x00dev->rfkill_state))
166 return; 123 return;
167 124
168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 125 input_free_polled_device(rt2x00dev->rfkill_poll_dev);
169 126 rt2x00dev->rfkill_poll_dev = NULL;
170 rfkill_free(rt2x00dev->rfkill);
171 rt2x00dev->rfkill = NULL;
172} 127}
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 0b29d767a25..7d50ca82375 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -296,6 +296,41 @@ void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
296} 296}
297EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); 297EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
298 298
299void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
300 const enum data_queue_qid qid)
301{
302 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
303 struct queue_entry_priv_usb *entry_priv;
304 struct queue_entry_priv_usb_bcn *bcn_priv;
305 unsigned int i;
306 bool kill_guard;
307
308 /*
309 * When killing the beacon queue, we must also kill
310 * the beacon guard byte.
311 */
312 kill_guard =
313 (qid == QID_BEACON) &&
314 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags));
315
316 /*
317 * Cancel all entries.
318 */
319 for (i = 0; i < queue->limit; i++) {
320 entry_priv = queue->entries[i].priv_data;
321 usb_kill_urb(entry_priv->urb);
322
323 /*
324 * Kill guardian urb (if required by driver).
325 */
326 if (kill_guard) {
327 bcn_priv = queue->entries[i].priv_data;
328 usb_kill_urb(bcn_priv->guardian_urb);
329 }
330 }
331}
332EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
333
299/* 334/*
300 * RX data handlers. 335 * RX data handlers.
301 */ 336 */
@@ -338,35 +373,14 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
338 */ 373 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 374void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 375{
341 struct queue_entry_priv_usb *entry_priv;
342 struct queue_entry_priv_usb_bcn *bcn_priv;
343 struct data_queue *queue;
344 unsigned int i;
345
346 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, 376 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
347 REGISTER_TIMEOUT); 377 REGISTER_TIMEOUT);
348 378
349 /* 379 /*
350 * Cancel all queues. 380 * The USB version of kill_tx_queue also works
381 * on the RX queue.
351 */ 382 */
352 queue_for_each(rt2x00dev, queue) { 383 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX);
353 for (i = 0; i < queue->limit; i++) {
354 entry_priv = queue->entries[i].priv_data;
355 usb_kill_urb(entry_priv->urb);
356 }
357 }
358
359 /*
360 * Kill guardian urb (if required by driver).
361 */
362 if (!test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
363 return;
364
365 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
366 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
367 if (bcn_priv->guardian_urb)
368 usb_kill_urb(bcn_priv->guardian_urb);
369 }
370} 384}
371EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 385EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
372 386
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 2bd4ac855f5..bd2d59c85f1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -419,6 +419,17 @@ struct queue_entry_priv_usb_bcn {
419void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 419void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
420 const enum data_queue_qid qid); 420 const enum data_queue_qid qid);
421 421
422/**
423 * rt2x00usb_kill_tx_queue - Kill data queue
424 * @rt2x00dev: Pointer to &struct rt2x00_dev
425 * @qid: Data queue to kill
426 *
427 * This will walk through all entries of the queue and kill all
428 * previously kicked frames before they can be send.
429 */
430void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
431 const enum data_queue_qid qid);
432
422/* 433/*
423 * Device initialization handlers. 434 * Device initialization handlers.
424 */ 435 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 987e89009f7..0be147f364e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -146,12 +146,6 @@ static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev,
146 mutex_unlock(&rt2x00dev->csr_mutex); 146 mutex_unlock(&rt2x00dev->csr_mutex);
147} 147}
148 148
149#ifdef CONFIG_RT2X00_LIB_LEDS
150/*
151 * This function is only called from rt61pci_led_brightness()
152 * make gcc happy by placing this function inside the
153 * same ifdef statement as the caller.
154 */
155static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev, 149static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
156 const u8 command, const u8 token, 150 const u8 command, const u8 token,
157 const u8 arg0, const u8 arg1) 151 const u8 arg0, const u8 arg1)
@@ -180,7 +174,6 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
180 mutex_unlock(&rt2x00dev->csr_mutex); 174 mutex_unlock(&rt2x00dev->csr_mutex);
181 175
182} 176}
183#endif /* CONFIG_RT2X00_LIB_LEDS */
184 177
185static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 178static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
186{ 179{
@@ -967,6 +960,50 @@ static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev,
967 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 960 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
968} 961}
969 962
963static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
964 struct rt2x00lib_conf *libconf)
965{
966 enum dev_state state =
967 (libconf->conf->flags & IEEE80211_CONF_PS) ?
968 STATE_SLEEP : STATE_AWAKE;
969 u32 reg;
970
971 if (state == STATE_SLEEP) {
972 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg);
973 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
974 libconf->conf->beacon_int - 10);
975 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
976 libconf->conf->listen_interval - 1);
977 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
978
979 /* We must first disable autowake before it can be enabled */
980 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
981 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg);
982
983 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1);
984 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg);
985
986 rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000005);
987 rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x0000001c);
988 rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000060);
989
990 rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0);
991 } else {
992 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg);
993 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
994 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
995 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
996 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
997 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg);
998
999 rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007);
1000 rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x00000018);
1001 rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000020);
1002
1003 rt61pci_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
1004 }
1005}
1006
970static void rt61pci_config(struct rt2x00_dev *rt2x00dev, 1007static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
971 struct rt2x00lib_conf *libconf, 1008 struct rt2x00lib_conf *libconf,
972 const unsigned int flags) 1009 const unsigned int flags)
@@ -984,6 +1021,8 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
984 rt61pci_config_retry_limit(rt2x00dev, libconf); 1021 rt61pci_config_retry_limit(rt2x00dev, libconf);
985 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) 1022 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
986 rt61pci_config_duration(rt2x00dev, libconf); 1023 rt61pci_config_duration(rt2x00dev, libconf);
1024 if (flags & IEEE80211_CONF_CHANGE_PS)
1025 rt61pci_config_ps(rt2x00dev, libconf);
987} 1026}
988 1027
989/* 1028/*
@@ -1007,21 +1046,28 @@ static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev,
1007 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); 1046 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
1008} 1047}
1009 1048
1010static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev) 1049static inline void rt61pci_set_vgc(struct rt2x00_dev *rt2x00dev,
1050 struct link_qual *qual, u8 vgc_level)
1011{ 1051{
1012 rt61pci_bbp_write(rt2x00dev, 17, 0x20); 1052 if (qual->vgc_level != vgc_level) {
1013 rt2x00dev->link.vgc_level = 0x20; 1053 rt61pci_bbp_write(rt2x00dev, 17, vgc_level);
1054 qual->vgc_level = vgc_level;
1055 qual->vgc_level_reg = vgc_level;
1056 }
1057}
1058
1059static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
1060 struct link_qual *qual)
1061{
1062 rt61pci_set_vgc(rt2x00dev, qual, 0x20);
1014} 1063}
1015 1064
1016static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev) 1065static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1066 struct link_qual *qual, const u32 count)
1017{ 1067{
1018 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
1019 u8 r17;
1020 u8 up_bound; 1068 u8 up_bound;
1021 u8 low_bound; 1069 u8 low_bound;
1022 1070
1023 rt61pci_bbp_read(rt2x00dev, 17, &r17);
1024
1025 /* 1071 /*
1026 * Determine r17 bounds. 1072 * Determine r17 bounds.
1027 */ 1073 */
@@ -1051,38 +1097,32 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
1051 /* 1097 /*
1052 * Special big-R17 for very short distance 1098 * Special big-R17 for very short distance
1053 */ 1099 */
1054 if (rssi >= -35) { 1100 if (qual->rssi >= -35) {
1055 if (r17 != 0x60) 1101 rt61pci_set_vgc(rt2x00dev, qual, 0x60);
1056 rt61pci_bbp_write(rt2x00dev, 17, 0x60);
1057 return; 1102 return;
1058 } 1103 }
1059 1104
1060 /* 1105 /*
1061 * Special big-R17 for short distance 1106 * Special big-R17 for short distance
1062 */ 1107 */
1063 if (rssi >= -58) { 1108 if (qual->rssi >= -58) {
1064 if (r17 != up_bound) 1109 rt61pci_set_vgc(rt2x00dev, qual, up_bound);
1065 rt61pci_bbp_write(rt2x00dev, 17, up_bound);
1066 return; 1110 return;
1067 } 1111 }
1068 1112
1069 /* 1113 /*
1070 * Special big-R17 for middle-short distance 1114 * Special big-R17 for middle-short distance
1071 */ 1115 */
1072 if (rssi >= -66) { 1116 if (qual->rssi >= -66) {
1073 low_bound += 0x10; 1117 rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x10);
1074 if (r17 != low_bound)
1075 rt61pci_bbp_write(rt2x00dev, 17, low_bound);
1076 return; 1118 return;
1077 } 1119 }
1078 1120
1079 /* 1121 /*
1080 * Special mid-R17 for middle distance 1122 * Special mid-R17 for middle distance
1081 */ 1123 */
1082 if (rssi >= -74) { 1124 if (qual->rssi >= -74) {
1083 low_bound += 0x08; 1125 rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x08);
1084 if (r17 != low_bound)
1085 rt61pci_bbp_write(rt2x00dev, 17, low_bound);
1086 return; 1126 return;
1087 } 1127 }
1088 1128
@@ -1090,12 +1130,12 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
1090 * Special case: Change up_bound based on the rssi. 1130 * Special case: Change up_bound based on the rssi.
1091 * Lower up_bound when rssi is weaker then -74 dBm. 1131 * Lower up_bound when rssi is weaker then -74 dBm.
1092 */ 1132 */
1093 up_bound -= 2 * (-74 - rssi); 1133 up_bound -= 2 * (-74 - qual->rssi);
1094 if (low_bound > up_bound) 1134 if (low_bound > up_bound)
1095 up_bound = low_bound; 1135 up_bound = low_bound;
1096 1136
1097 if (r17 > up_bound) { 1137 if (qual->vgc_level > up_bound) {
1098 rt61pci_bbp_write(rt2x00dev, 17, up_bound); 1138 rt61pci_set_vgc(rt2x00dev, qual, up_bound);
1099 return; 1139 return;
1100 } 1140 }
1101 1141
@@ -1105,15 +1145,10 @@ dynamic_cca_tune:
1105 * r17 does not yet exceed upper limit, continue and base 1145 * r17 does not yet exceed upper limit, continue and base
1106 * the r17 tuning on the false CCA count. 1146 * the r17 tuning on the false CCA count.
1107 */ 1147 */
1108 if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) { 1148 if ((qual->false_cca > 512) && (qual->vgc_level < up_bound))
1109 if (++r17 > up_bound) 1149 rt61pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level);
1110 r17 = up_bound; 1150 else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound))
1111 rt61pci_bbp_write(rt2x00dev, 17, r17); 1151 rt61pci_set_vgc(rt2x00dev, qual, --qual->vgc_level);
1112 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) {
1113 if (--r17 < low_bound)
1114 r17 = low_bound;
1115 rt61pci_bbp_write(rt2x00dev, 17, r17);
1116 }
1117} 1152}
1118 1153
1119/* 1154/*
@@ -1141,25 +1176,37 @@ static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
1141 return fw_name; 1176 return fw_name;
1142} 1177}
1143 1178
1144static u16 rt61pci_get_firmware_crc(const void *data, const size_t len) 1179static int rt61pci_check_firmware(struct rt2x00_dev *rt2x00dev,
1180 const u8 *data, const size_t len)
1145{ 1181{
1182 u16 fw_crc;
1146 u16 crc; 1183 u16 crc;
1147 1184
1148 /* 1185 /*
1149 * Use the crc itu-t algorithm. 1186 * Only support 8kb firmware files.
1187 */
1188 if (len != 8192)
1189 return FW_BAD_LENGTH;
1190
1191 /*
1150 * The last 2 bytes in the firmware array are the crc checksum itself, 1192 * The last 2 bytes in the firmware array are the crc checksum itself,
1151 * this means that we should never pass those 2 bytes to the crc 1193 * this means that we should never pass those 2 bytes to the crc
1152 * algorithm. 1194 * algorithm.
1153 */ 1195 */
1196 fw_crc = (data[len - 2] << 8 | data[len - 1]);
1197
1198 /*
1199 * Use the crc itu-t algorithm.
1200 */
1154 crc = crc_itu_t(0, data, len - 2); 1201 crc = crc_itu_t(0, data, len - 2);
1155 crc = crc_itu_t_byte(crc, 0); 1202 crc = crc_itu_t_byte(crc, 0);
1156 crc = crc_itu_t_byte(crc, 0); 1203 crc = crc_itu_t_byte(crc, 0);
1157 1204
1158 return crc; 1205 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
1159} 1206}
1160 1207
1161static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data, 1208static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1162 const size_t len) 1209 const u8 *data, const size_t len)
1163{ 1210{
1164 int i; 1211 int i;
1165 u32 reg; 1212 u32 reg;
@@ -1656,24 +1703,10 @@ static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1656 1703
1657static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) 1704static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1658{ 1705{
1659 u32 reg;
1660
1661 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1662
1663 /*
1664 * Disable synchronisation.
1665 */
1666 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
1667
1668 /* 1706 /*
1669 * Cancel RX and TX. 1707 * Disable power
1670 */ 1708 */
1671 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1709 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1672 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1);
1673 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1);
1674 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1);
1675 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1);
1676 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1677} 1710}
1678 1711
1679static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1712static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
@@ -1812,7 +1845,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1812 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1845 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1813 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1846 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1814 rt2x00_set_field32(&word, TXD_W0_OFDM, 1847 rt2x00_set_field32(&word, TXD_W0_OFDM,
1815 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1848 (txdesc->rate_mode == RATE_MODE_OFDM));
1816 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1849 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1817 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1850 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1818 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1851 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
@@ -1896,6 +1929,24 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1896 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1929 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1897} 1930}
1898 1931
1932static void rt61pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
1933 const enum data_queue_qid qid)
1934{
1935 u32 reg;
1936
1937 if (qid == QID_BEACON) {
1938 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
1939 return;
1940 }
1941
1942 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1943 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (qid == QID_AC_BE));
1944 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (qid == QID_AC_BK));
1945 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (qid == QID_AC_VI));
1946 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (qid == QID_AC_VO));
1947 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1948}
1949
1899/* 1950/*
1900 * RX control handlers 1951 * RX control handlers
1901 */ 1952 */
@@ -2195,7 +2246,8 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2195 if (word == 0xffff) { 2246 if (word == 0xffff) {
2196 rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0); 2247 rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0);
2197 rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0); 2248 rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0);
2198 rt2x00_set_field16(&word, EEPROM_NIC_TX_RX_FIXED, 0); 2249 rt2x00_set_field16(&word, EEPROM_NIC_RX_FIXED, 0);
2250 rt2x00_set_field16(&word, EEPROM_NIC_TX_FIXED, 0);
2199 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); 2251 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
2200 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); 2252 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
2201 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); 2253 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
@@ -2339,24 +2391,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2339 */ 2391 */
2340 if (rt2x00_rf(&rt2x00dev->chip, RF2529) && 2392 if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
2341 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { 2393 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
2342 switch (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED)) { 2394 rt2x00dev->default_ant.rx =
2343 case 0: 2395 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
2344 rt2x00dev->default_ant.tx = ANTENNA_B; 2396 rt2x00dev->default_ant.tx =
2345 rt2x00dev->default_ant.rx = ANTENNA_A; 2397 ANTENNA_B - rt2x00_get_field16(eeprom, EEPROM_NIC_TX_FIXED);
2346 break;
2347 case 1:
2348 rt2x00dev->default_ant.tx = ANTENNA_B;
2349 rt2x00dev->default_ant.rx = ANTENNA_B;
2350 break;
2351 case 2:
2352 rt2x00dev->default_ant.tx = ANTENNA_A;
2353 rt2x00dev->default_ant.rx = ANTENNA_A;
2354 break;
2355 case 3:
2356 rt2x00dev->default_ant.tx = ANTENNA_A;
2357 rt2x00dev->default_ant.rx = ANTENNA_B;
2358 break;
2359 }
2360 2398
2361 if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) 2399 if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY))
2362 rt2x00dev->default_ant.tx = ANTENNA_SW_DIVERSITY; 2400 rt2x00dev->default_ant.tx = ANTENNA_SW_DIVERSITY;
@@ -2534,7 +2572,9 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2534 */ 2572 */
2535 rt2x00dev->hw->flags = 2573 rt2x00dev->hw->flags =
2536 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2574 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2537 IEEE80211_HW_SIGNAL_DBM; 2575 IEEE80211_HW_SIGNAL_DBM |
2576 IEEE80211_HW_SUPPORTS_PS |
2577 IEEE80211_HW_PS_NULLFUNC_STACK;
2538 rt2x00dev->hw->extra_tx_headroom = 0; 2578 rt2x00dev->hw->extra_tx_headroom = 0;
2539 2579
2540 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 2580 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
@@ -2633,6 +2673,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2633 struct rt2x00_field32 field; 2673 struct rt2x00_field32 field;
2634 int retval; 2674 int retval;
2635 u32 reg; 2675 u32 reg;
2676 u32 offset;
2636 2677
2637 /* 2678 /*
2638 * First pass the configuration through rt2x00lib, that will 2679 * First pass the configuration through rt2x00lib, that will
@@ -2644,24 +2685,23 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2644 if (retval) 2685 if (retval)
2645 return retval; 2686 return retval;
2646 2687
2688 /*
2689 * We only need to perform additional register initialization
2690 * for WMM queues/
2691 */
2692 if (queue_idx >= 4)
2693 return 0;
2694
2647 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2695 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2648 2696
2649 /* Update WMM TXOP register */ 2697 /* Update WMM TXOP register */
2650 if (queue_idx < 2) { 2698 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
2651 field.bit_offset = queue_idx * 16; 2699 field.bit_offset = (queue_idx & 1) * 16;
2652 field.bit_mask = 0xffff << field.bit_offset; 2700 field.bit_mask = 0xffff << field.bit_offset;
2653 2701
2654 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg); 2702 rt2x00pci_register_read(rt2x00dev, offset, &reg);
2655 rt2x00_set_field32(&reg, field, queue->txop); 2703 rt2x00_set_field32(&reg, field, queue->txop);
2656 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg); 2704 rt2x00pci_register_write(rt2x00dev, offset, reg);
2657 } else if (queue_idx < 4) {
2658 field.bit_offset = (queue_idx - 2) * 16;
2659 field.bit_mask = 0xffff << field.bit_offset;
2660
2661 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2662 rt2x00_set_field32(&reg, field, queue->txop);
2663 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2664 }
2665 2705
2666 /* Update WMM registers */ 2706 /* Update WMM registers */
2667 field.bit_offset = queue_idx * 4; 2707 field.bit_offset = queue_idx * 4;
@@ -2717,7 +2757,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2717 .irq_handler = rt61pci_interrupt, 2757 .irq_handler = rt61pci_interrupt,
2718 .probe_hw = rt61pci_probe_hw, 2758 .probe_hw = rt61pci_probe_hw,
2719 .get_firmware_name = rt61pci_get_firmware_name, 2759 .get_firmware_name = rt61pci_get_firmware_name,
2720 .get_firmware_crc = rt61pci_get_firmware_crc, 2760 .check_firmware = rt61pci_check_firmware,
2721 .load_firmware = rt61pci_load_firmware, 2761 .load_firmware = rt61pci_load_firmware,
2722 .initialize = rt2x00pci_initialize, 2762 .initialize = rt2x00pci_initialize,
2723 .uninitialize = rt2x00pci_uninitialize, 2763 .uninitialize = rt2x00pci_uninitialize,
@@ -2732,6 +2772,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2732 .write_tx_data = rt2x00pci_write_tx_data, 2772 .write_tx_data = rt2x00pci_write_tx_data,
2733 .write_beacon = rt61pci_write_beacon, 2773 .write_beacon = rt61pci_write_beacon,
2734 .kick_tx_queue = rt61pci_kick_tx_queue, 2774 .kick_tx_queue = rt61pci_kick_tx_queue,
2775 .kill_tx_queue = rt61pci_kill_tx_queue,
2735 .fill_rxdone = rt61pci_fill_rxdone, 2776 .fill_rxdone = rt61pci_fill_rxdone,
2736 .config_shared_key = rt61pci_config_shared_key, 2777 .config_shared_key = rt61pci_config_shared_key,
2737 .config_pairwise_key = rt61pci_config_pairwise_key, 2778 .config_pairwise_key = rt61pci_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 65fe3332364..2f97fee7a8d 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -88,8 +88,10 @@
88 88
89/* 89/*
90 * SOFT_RESET_CSR 90 * SOFT_RESET_CSR
91 * FORCE_CLOCK_ON: Host force MAC clock ON
91 */ 92 */
92#define SOFT_RESET_CSR 0x0010 93#define SOFT_RESET_CSR 0x0010
94#define SOFT_RESET_CSR_FORCE_CLOCK_ON FIELD32(0x00000002)
93 95
94/* 96/*
95 * MCU_INT_SOURCE_CSR: MCU interrupt source/mask register. 97 * MCU_INT_SOURCE_CSR: MCU interrupt source/mask register.
@@ -1054,8 +1056,10 @@ struct hw_pairwise_ta_entry {
1054 1056
1055/* 1057/*
1056 * IO_CNTL_CSR 1058 * IO_CNTL_CSR
1059 * RF_PS: Set RF interface value to power save
1057 */ 1060 */
1058#define IO_CNTL_CSR 0x3498 1061#define IO_CNTL_CSR 0x3498
1062#define IO_CNTL_CSR_RF_PS FIELD32(0x00000004)
1059 1063
1060/* 1064/*
1061 * UART_INT_SOURCE_CSR 1065 * UART_INT_SOURCE_CSR
@@ -1186,7 +1190,8 @@ struct hw_pairwise_ta_entry {
1186#define EEPROM_NIC 0x0011 1190#define EEPROM_NIC 0x0011
1187#define EEPROM_NIC_ENABLE_DIVERSITY FIELD16(0x0001) 1191#define EEPROM_NIC_ENABLE_DIVERSITY FIELD16(0x0001)
1188#define EEPROM_NIC_TX_DIVERSITY FIELD16(0x0002) 1192#define EEPROM_NIC_TX_DIVERSITY FIELD16(0x0002)
1189#define EEPROM_NIC_TX_RX_FIXED FIELD16(0x000c) 1193#define EEPROM_NIC_RX_FIXED FIELD16(0x0004)
1194#define EEPROM_NIC_TX_FIXED FIELD16(0x0008)
1190#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0010) 1195#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0010)
1191#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0020) 1196#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0020)
1192#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0040) 1197#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0040)
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 96a8d69f879..6521dac7ec4 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -186,6 +186,18 @@ static const struct rt2x00debug rt73usb_rt2x00debug = {
186}; 186};
187#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 187#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
188 188
189#ifdef CONFIG_RT2X00_LIB_RFKILL
190static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
191{
192 u32 reg;
193
194 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
195 return rt2x00_get_field32(reg, MAC_CSR13_BIT7);
196}
197#else
198#define rt73usb_rfkill_poll NULL
199#endif /* CONFIG_RT2X00_LIB_RFKILL */
200
189#ifdef CONFIG_RT2X00_LIB_LEDS 201#ifdef CONFIG_RT2X00_LIB_LEDS
190static void rt73usb_brightness_set(struct led_classdev *led_cdev, 202static void rt73usb_brightness_set(struct led_classdev *led_cdev,
191 enum led_brightness brightness) 203 enum led_brightness brightness)
@@ -844,6 +856,44 @@ static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev,
844 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 856 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
845} 857}
846 858
859static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
860 struct rt2x00lib_conf *libconf)
861{
862 enum dev_state state =
863 (libconf->conf->flags & IEEE80211_CONF_PS) ?
864 STATE_SLEEP : STATE_AWAKE;
865 u32 reg;
866
867 if (state == STATE_SLEEP) {
868 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
869 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
870 libconf->conf->beacon_int - 10);
871 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
872 libconf->conf->listen_interval - 1);
873 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
874
875 /* We must first disable autowake before it can be enabled */
876 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
877 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
878
879 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1);
880 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
881
882 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
883 USB_MODE_SLEEP, REGISTER_TIMEOUT);
884 } else {
885 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
886 USB_MODE_WAKEUP, REGISTER_TIMEOUT);
887
888 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
889 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
890 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
891 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
892 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
893 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
894 }
895}
896
847static void rt73usb_config(struct rt2x00_dev *rt2x00dev, 897static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
848 struct rt2x00lib_conf *libconf, 898 struct rt2x00lib_conf *libconf,
849 const unsigned int flags) 899 const unsigned int flags)
@@ -861,6 +911,8 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
861 rt73usb_config_retry_limit(rt2x00dev, libconf); 911 rt73usb_config_retry_limit(rt2x00dev, libconf);
862 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) 912 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
863 rt73usb_config_duration(rt2x00dev, libconf); 913 rt73usb_config_duration(rt2x00dev, libconf);
914 if (flags & IEEE80211_CONF_CHANGE_PS)
915 rt73usb_config_ps(rt2x00dev, libconf);
864} 916}
865 917
866/* 918/*
@@ -884,21 +936,28 @@ static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev,
884 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); 936 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
885} 937}
886 938
887static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev) 939static inline void rt73usb_set_vgc(struct rt2x00_dev *rt2x00dev,
940 struct link_qual *qual, u8 vgc_level)
941{
942 if (qual->vgc_level != vgc_level) {
943 rt73usb_bbp_write(rt2x00dev, 17, vgc_level);
944 qual->vgc_level = vgc_level;
945 qual->vgc_level_reg = vgc_level;
946 }
947}
948
949static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
950 struct link_qual *qual)
888{ 951{
889 rt73usb_bbp_write(rt2x00dev, 17, 0x20); 952 rt73usb_set_vgc(rt2x00dev, qual, 0x20);
890 rt2x00dev->link.vgc_level = 0x20;
891} 953}
892 954
893static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev) 955static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
956 struct link_qual *qual, const u32 count)
894{ 957{
895 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
896 u8 r17;
897 u8 up_bound; 958 u8 up_bound;
898 u8 low_bound; 959 u8 low_bound;
899 960
900 rt73usb_bbp_read(rt2x00dev, 17, &r17);
901
902 /* 961 /*
903 * Determine r17 bounds. 962 * Determine r17 bounds.
904 */ 963 */
@@ -911,10 +970,10 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
911 up_bound += 0x10; 970 up_bound += 0x10;
912 } 971 }
913 } else { 972 } else {
914 if (rssi > -82) { 973 if (qual->rssi > -82) {
915 low_bound = 0x1c; 974 low_bound = 0x1c;
916 up_bound = 0x40; 975 up_bound = 0x40;
917 } else if (rssi > -84) { 976 } else if (qual->rssi > -84) {
918 low_bound = 0x1c; 977 low_bound = 0x1c;
919 up_bound = 0x20; 978 up_bound = 0x20;
920 } else { 979 } else {
@@ -938,37 +997,32 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
938 /* 997 /*
939 * Special big-R17 for very short distance 998 * Special big-R17 for very short distance
940 */ 999 */
941 if (rssi > -35) { 1000 if (qual->rssi > -35) {
942 if (r17 != 0x60) 1001 rt73usb_set_vgc(rt2x00dev, qual, 0x60);
943 rt73usb_bbp_write(rt2x00dev, 17, 0x60);
944 return; 1002 return;
945 } 1003 }
946 1004
947 /* 1005 /*
948 * Special big-R17 for short distance 1006 * Special big-R17 for short distance
949 */ 1007 */
950 if (rssi >= -58) { 1008 if (qual->rssi >= -58) {
951 if (r17 != up_bound) 1009 rt73usb_set_vgc(rt2x00dev, qual, up_bound);
952 rt73usb_bbp_write(rt2x00dev, 17, up_bound);
953 return; 1010 return;
954 } 1011 }
955 1012
956 /* 1013 /*
957 * Special big-R17 for middle-short distance 1014 * Special big-R17 for middle-short distance
958 */ 1015 */
959 if (rssi >= -66) { 1016 if (qual->rssi >= -66) {
960 low_bound += 0x10; 1017 rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x10);
961 if (r17 != low_bound)
962 rt73usb_bbp_write(rt2x00dev, 17, low_bound);
963 return; 1018 return;
964 } 1019 }
965 1020
966 /* 1021 /*
967 * Special mid-R17 for middle distance 1022 * Special mid-R17 for middle distance
968 */ 1023 */
969 if (rssi >= -74) { 1024 if (qual->rssi >= -74) {
970 if (r17 != (low_bound + 0x10)) 1025 rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x08);
971 rt73usb_bbp_write(rt2x00dev, 17, low_bound + 0x08);
972 return; 1026 return;
973 } 1027 }
974 1028
@@ -976,12 +1030,12 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
976 * Special case: Change up_bound based on the rssi. 1030 * Special case: Change up_bound based on the rssi.
977 * Lower up_bound when rssi is weaker then -74 dBm. 1031 * Lower up_bound when rssi is weaker then -74 dBm.
978 */ 1032 */
979 up_bound -= 2 * (-74 - rssi); 1033 up_bound -= 2 * (-74 - qual->rssi);
980 if (low_bound > up_bound) 1034 if (low_bound > up_bound)
981 up_bound = low_bound; 1035 up_bound = low_bound;
982 1036
983 if (r17 > up_bound) { 1037 if (qual->vgc_level > up_bound) {
984 rt73usb_bbp_write(rt2x00dev, 17, up_bound); 1038 rt73usb_set_vgc(rt2x00dev, qual, up_bound);
985 return; 1039 return;
986 } 1040 }
987 1041
@@ -991,17 +1045,12 @@ dynamic_cca_tune:
991 * r17 does not yet exceed upper limit, continue and base 1045 * r17 does not yet exceed upper limit, continue and base
992 * the r17 tuning on the false CCA count. 1046 * the r17 tuning on the false CCA count.
993 */ 1047 */
994 if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) { 1048 if ((qual->false_cca > 512) && (qual->vgc_level < up_bound))
995 r17 += 4; 1049 rt73usb_set_vgc(rt2x00dev, qual,
996 if (r17 > up_bound) 1050 min_t(u8, qual->vgc_level + 4, up_bound));
997 r17 = up_bound; 1051 else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound))
998 rt73usb_bbp_write(rt2x00dev, 17, r17); 1052 rt73usb_set_vgc(rt2x00dev, qual,
999 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) { 1053 max_t(u8, qual->vgc_level - 4, low_bound));
1000 r17 -= 4;
1001 if (r17 < low_bound)
1002 r17 = low_bound;
1003 rt73usb_bbp_write(rt2x00dev, 17, r17);
1004 }
1005} 1054}
1006 1055
1007/* 1056/*
@@ -1012,25 +1061,37 @@ static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
1012 return FIRMWARE_RT2571; 1061 return FIRMWARE_RT2571;
1013} 1062}
1014 1063
1015static u16 rt73usb_get_firmware_crc(const void *data, const size_t len) 1064static int rt73usb_check_firmware(struct rt2x00_dev *rt2x00dev,
1065 const u8 *data, const size_t len)
1016{ 1066{
1067 u16 fw_crc;
1017 u16 crc; 1068 u16 crc;
1018 1069
1019 /* 1070 /*
1020 * Use the crc itu-t algorithm. 1071 * Only support 2kb firmware files.
1072 */
1073 if (len != 2048)
1074 return FW_BAD_LENGTH;
1075
1076 /*
1021 * The last 2 bytes in the firmware array are the crc checksum itself, 1077 * The last 2 bytes in the firmware array are the crc checksum itself,
1022 * this means that we should never pass those 2 bytes to the crc 1078 * this means that we should never pass those 2 bytes to the crc
1023 * algorithm. 1079 * algorithm.
1024 */ 1080 */
1081 fw_crc = (data[len - 2] << 8 | data[len - 1]);
1082
1083 /*
1084 * Use the crc itu-t algorithm.
1085 */
1025 crc = crc_itu_t(0, data, len - 2); 1086 crc = crc_itu_t(0, data, len - 2);
1026 crc = crc_itu_t_byte(crc, 0); 1087 crc = crc_itu_t_byte(crc, 0);
1027 crc = crc_itu_t_byte(crc, 0); 1088 crc = crc_itu_t_byte(crc, 0);
1028 1089
1029 return crc; 1090 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
1030} 1091}
1031 1092
1032static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data, 1093static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1033 const size_t len) 1094 const u8 *data, const size_t len)
1034{ 1095{
1035 unsigned int i; 1096 unsigned int i;
1036 int status; 1097 int status;
@@ -1449,7 +1510,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1449 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1510 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1450 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1511 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1451 rt2x00_set_field32(&word, TXD_W0_OFDM, 1512 rt2x00_set_field32(&word, TXD_W0_OFDM,
1452 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1513 (txdesc->rate_mode == RATE_MODE_OFDM));
1453 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1514 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1454 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1515 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1455 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1516 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
@@ -1816,6 +1877,14 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1816 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); 1877 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags);
1817 1878
1818 /* 1879 /*
1880 * Detect if this device has an hardware controlled radio.
1881 */
1882#ifdef CONFIG_RT2X00_LIB_RFKILL
1883 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1884 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1885#endif /* CONFIG_RT2X00_LIB_RFKILL */
1886
1887 /*
1819 * Read frequency offset. 1888 * Read frequency offset.
1820 */ 1889 */
1821 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); 1890 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
@@ -2020,7 +2089,9 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2020 */ 2089 */
2021 rt2x00dev->hw->flags = 2090 rt2x00dev->hw->flags =
2022 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2091 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2023 IEEE80211_HW_SIGNAL_DBM; 2092 IEEE80211_HW_SIGNAL_DBM |
2093 IEEE80211_HW_SUPPORTS_PS |
2094 IEEE80211_HW_PS_NULLFUNC_STACK;
2024 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 2095 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
2025 2096
2026 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 2097 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
@@ -2121,6 +2192,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2121 struct rt2x00_field32 field; 2192 struct rt2x00_field32 field;
2122 int retval; 2193 int retval;
2123 u32 reg; 2194 u32 reg;
2195 u32 offset;
2124 2196
2125 /* 2197 /*
2126 * First pass the configuration through rt2x00lib, that will 2198 * First pass the configuration through rt2x00lib, that will
@@ -2132,24 +2204,23 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2132 if (retval) 2204 if (retval)
2133 return retval; 2205 return retval;
2134 2206
2207 /*
2208 * We only need to perform additional register initialization
2209 * for WMM queues/
2210 */
2211 if (queue_idx >= 4)
2212 return 0;
2213
2135 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2214 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2136 2215
2137 /* Update WMM TXOP register */ 2216 /* Update WMM TXOP register */
2138 if (queue_idx < 2) { 2217 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
2139 field.bit_offset = queue_idx * 16; 2218 field.bit_offset = (queue_idx & 1) * 16;
2140 field.bit_mask = 0xffff << field.bit_offset; 2219 field.bit_mask = 0xffff << field.bit_offset;
2141 2220
2142 rt2x00usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg); 2221 rt2x00usb_register_read(rt2x00dev, offset, &reg);
2143 rt2x00_set_field32(&reg, field, queue->txop); 2222 rt2x00_set_field32(&reg, field, queue->txop);
2144 rt2x00usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg); 2223 rt2x00usb_register_write(rt2x00dev, offset, reg);
2145 } else if (queue_idx < 4) {
2146 field.bit_offset = (queue_idx - 2) * 16;
2147 field.bit_mask = 0xffff << field.bit_offset;
2148
2149 rt2x00usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2150 rt2x00_set_field32(&reg, field, queue->txop);
2151 rt2x00usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2152 }
2153 2224
2154 /* Update WMM registers */ 2225 /* Update WMM registers */
2155 field.bit_offset = queue_idx * 4; 2226 field.bit_offset = queue_idx * 4;
@@ -2214,12 +2285,13 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2214static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { 2285static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2215 .probe_hw = rt73usb_probe_hw, 2286 .probe_hw = rt73usb_probe_hw,
2216 .get_firmware_name = rt73usb_get_firmware_name, 2287 .get_firmware_name = rt73usb_get_firmware_name,
2217 .get_firmware_crc = rt73usb_get_firmware_crc, 2288 .check_firmware = rt73usb_check_firmware,
2218 .load_firmware = rt73usb_load_firmware, 2289 .load_firmware = rt73usb_load_firmware,
2219 .initialize = rt2x00usb_initialize, 2290 .initialize = rt2x00usb_initialize,
2220 .uninitialize = rt2x00usb_uninitialize, 2291 .uninitialize = rt2x00usb_uninitialize,
2221 .clear_entry = rt2x00usb_clear_entry, 2292 .clear_entry = rt2x00usb_clear_entry,
2222 .set_device_state = rt73usb_set_device_state, 2293 .set_device_state = rt73usb_set_device_state,
2294 .rfkill_poll = rt73usb_rfkill_poll,
2223 .link_stats = rt73usb_link_stats, 2295 .link_stats = rt73usb_link_stats,
2224 .reset_tuner = rt73usb_reset_tuner, 2296 .reset_tuner = rt73usb_reset_tuner,
2225 .link_tuner = rt73usb_link_tuner, 2297 .link_tuner = rt73usb_link_tuner,
@@ -2228,6 +2300,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2228 .write_beacon = rt73usb_write_beacon, 2300 .write_beacon = rt73usb_write_beacon,
2229 .get_tx_data_len = rt73usb_get_tx_data_len, 2301 .get_tx_data_len = rt73usb_get_tx_data_len,
2230 .kick_tx_queue = rt73usb_kick_tx_queue, 2302 .kick_tx_queue = rt73usb_kick_tx_queue,
2303 .kill_tx_queue = rt2x00usb_kill_tx_queue,
2231 .fill_rxdone = rt73usb_fill_rxdone, 2304 .fill_rxdone = rt73usb_fill_rxdone,
2232 .config_shared_key = rt73usb_config_shared_key, 2305 .config_shared_key = rt73usb_config_shared_key,
2233 .config_pairwise_key = rt73usb_config_pairwise_key, 2306 .config_pairwise_key = rt73usb_config_pairwise_key,
@@ -2295,6 +2368,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2295 /* Billionton */ 2368 /* Billionton */
2296 { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, 2369 { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) },
2297 /* Buffalo */ 2370 /* Buffalo */
2371 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
2298 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2372 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2299 /* CNet */ 2373 /* CNet */
2300 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, 2374 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 46e1405eb0e..834b28ce6cd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -267,6 +267,19 @@ struct hw_pairwise_ta_entry {
267 * MAC_CSR13: GPIO. 267 * MAC_CSR13: GPIO.
268 */ 268 */
269#define MAC_CSR13 0x3034 269#define MAC_CSR13 0x3034
270#define MAC_CSR13_BIT0 FIELD32(0x00000001)
271#define MAC_CSR13_BIT1 FIELD32(0x00000002)
272#define MAC_CSR13_BIT2 FIELD32(0x00000004)
273#define MAC_CSR13_BIT3 FIELD32(0x00000008)
274#define MAC_CSR13_BIT4 FIELD32(0x00000010)
275#define MAC_CSR13_BIT5 FIELD32(0x00000020)
276#define MAC_CSR13_BIT6 FIELD32(0x00000040)
277#define MAC_CSR13_BIT7 FIELD32(0x00000080)
278#define MAC_CSR13_BIT8 FIELD32(0x00000100)
279#define MAC_CSR13_BIT9 FIELD32(0x00000200)
280#define MAC_CSR13_BIT10 FIELD32(0x00000400)
281#define MAC_CSR13_BIT11 FIELD32(0x00000800)
282#define MAC_CSR13_BIT12 FIELD32(0x00001000)
270 283
271/* 284/*
272 * MAC_CSR14: LED control register. 285 * MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 3b1e1c2aad2..9718f61809c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -100,6 +100,8 @@ struct rtl8187_priv {
100 struct usb_device *udev; 100 struct usb_device *udev;
101 u32 rx_conf; 101 u32 rx_conf;
102 struct usb_anchor anchored; 102 struct usb_anchor anchored;
103 struct delayed_work work;
104 struct ieee80211_hw *dev;
103 u16 txpwr_base; 105 u16 txpwr_base;
104 u8 asic_rev; 106 u8 asic_rev;
105 u8 is_rtl8187b; 107 u8 is_rtl8187b;
@@ -117,7 +119,7 @@ struct rtl8187_priv {
117 struct { 119 struct {
118 __le64 buf; 120 __le64 buf;
119 struct sk_buff_head queue; 121 struct sk_buff_head queue;
120 } b_tx_status; 122 } b_tx_status; /* This queue is used by both -b and non-b devices */
121}; 123};
122 124
123void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); 125void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 22bc07ef2f3..82bd47e7c61 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -177,25 +177,33 @@ static void rtl8187_tx_cb(struct urb *urb)
177 sizeof(struct rtl8187_tx_hdr)); 177 sizeof(struct rtl8187_tx_hdr));
178 ieee80211_tx_info_clear_status(info); 178 ieee80211_tx_info_clear_status(info);
179 179
180 if (!urb->status && 180 if (!(urb->status) && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
181 !(info->flags & IEEE80211_TX_CTL_NO_ACK) && 181 if (priv->is_rtl8187b) {
182 priv->is_rtl8187b) { 182 skb_queue_tail(&priv->b_tx_status.queue, skb);
183 skb_queue_tail(&priv->b_tx_status.queue, skb);
184 183
185 /* queue is "full", discard last items */ 184 /* queue is "full", discard last items */
186 while (skb_queue_len(&priv->b_tx_status.queue) > 5) { 185 while (skb_queue_len(&priv->b_tx_status.queue) > 5) {
187 struct sk_buff *old_skb; 186 struct sk_buff *old_skb;
188 187
189 dev_dbg(&priv->udev->dev, 188 dev_dbg(&priv->udev->dev,
190 "transmit status queue full\n"); 189 "transmit status queue full\n");
191 190
192 old_skb = skb_dequeue(&priv->b_tx_status.queue); 191 old_skb = skb_dequeue(&priv->b_tx_status.queue);
193 ieee80211_tx_status_irqsafe(hw, old_skb); 192 ieee80211_tx_status_irqsafe(hw, old_skb);
194 } 193 }
195 } else { 194 return;
196 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !urb->status) 195 } else {
197 info->flags |= IEEE80211_TX_STAT_ACK; 196 info->flags |= IEEE80211_TX_STAT_ACK;
197 }
198 }
199 if (priv->is_rtl8187b)
198 ieee80211_tx_status_irqsafe(hw, skb); 200 ieee80211_tx_status_irqsafe(hw, skb);
201 else {
202 /* Retry information for the RTI8187 is only available by
203 * reading a register in the device. We are in interrupt mode
204 * here, thus queue the skb and finish on a work queue. */
205 skb_queue_tail(&priv->b_tx_status.queue, skb);
206 queue_delayed_work(hw->workqueue, &priv->work, 0);
199 } 207 }
200} 208}
201 209
@@ -391,7 +399,7 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
391 struct rtl8187_rx_info *info; 399 struct rtl8187_rx_info *info;
392 int ret = 0; 400 int ret = 0;
393 401
394 while (skb_queue_len(&priv->rx_queue) < 8) { 402 while (skb_queue_len(&priv->rx_queue) < 16) {
395 skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL); 403 skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
396 if (!skb) { 404 if (!skb) {
397 ret = -ENOMEM; 405 ret = -ENOMEM;
@@ -645,7 +653,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
645 653
646 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); 654 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
647 rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); 655 rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
648 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81); 656 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0);
649 657
650 // TODO: set RESP_RATE and BRSR properly 658 // TODO: set RESP_RATE and BRSR properly
651 rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0); 659 rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
@@ -765,9 +773,6 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
765 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); 773 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
766 774
767 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1); 775 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1);
768 reg = rtl818x_ioread8(priv, &priv->map->RATE_FALLBACK);
769 reg |= RTL818X_RATE_FALLBACK_ENABLE;
770 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, reg);
771 776
772 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); 777 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
773 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2); 778 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
@@ -855,6 +860,34 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
855 return 0; 860 return 0;
856} 861}
857 862
863static void rtl8187_work(struct work_struct *work)
864{
865 /* The RTL8187 returns the retry count through register 0xFFFA. In
866 * addition, it appears to be a cumulative retry count, not the
867 * value for the current TX packet. When multiple TX entries are
868 * queued, the retry count will be valid for the last one in the queue.
869 * The "error" should not matter for purposes of rate setting. */
870 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
871 work.work);
872 struct ieee80211_tx_info *info;
873 struct ieee80211_hw *dev = priv->dev;
874 static u16 retry;
875 u16 tmp;
876
877 mutex_lock(&priv->conf_mutex);
878 tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
879 while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
880 struct sk_buff *old_skb;
881
882 old_skb = skb_dequeue(&priv->b_tx_status.queue);
883 info = IEEE80211_SKB_CB(old_skb);
884 info->status.rates[0].count = tmp - retry + 1;
885 ieee80211_tx_status_irqsafe(dev, old_skb);
886 }
887 retry = tmp;
888 mutex_unlock(&priv->conf_mutex);
889}
890
858static int rtl8187_start(struct ieee80211_hw *dev) 891static int rtl8187_start(struct ieee80211_hw *dev)
859{ 892{
860 struct rtl8187_priv *priv = dev->priv; 893 struct rtl8187_priv *priv = dev->priv;
@@ -869,6 +902,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
869 mutex_lock(&priv->conf_mutex); 902 mutex_lock(&priv->conf_mutex);
870 903
871 init_usb_anchor(&priv->anchored); 904 init_usb_anchor(&priv->anchored);
905 priv->dev = dev;
872 906
873 if (priv->is_rtl8187b) { 907 if (priv->is_rtl8187b) {
874 reg = RTL818X_RX_CONF_MGMT | 908 reg = RTL818X_RX_CONF_MGMT |
@@ -936,6 +970,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
936 reg |= RTL818X_CMD_TX_ENABLE; 970 reg |= RTL818X_CMD_TX_ENABLE;
937 reg |= RTL818X_CMD_RX_ENABLE; 971 reg |= RTL818X_CMD_RX_ENABLE;
938 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 972 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
973 INIT_DELAYED_WORK(&priv->work, rtl8187_work);
939 mutex_unlock(&priv->conf_mutex); 974 mutex_unlock(&priv->conf_mutex);
940 975
941 return 0; 976 return 0;
@@ -966,6 +1001,8 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
966 dev_kfree_skb_any(skb); 1001 dev_kfree_skb_any(skb);
967 1002
968 usb_kill_anchored_urbs(&priv->anchored); 1003 usb_kill_anchored_urbs(&priv->anchored);
1004 if (!priv->is_rtl8187b)
1005 cancel_delayed_work_sync(&priv->work);
969 mutex_unlock(&priv->conf_mutex); 1006 mutex_unlock(&priv->conf_mutex);
970} 1007}
971 1008
@@ -974,19 +1011,21 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
974{ 1011{
975 struct rtl8187_priv *priv = dev->priv; 1012 struct rtl8187_priv *priv = dev->priv;
976 int i; 1013 int i;
1014 int ret = -EOPNOTSUPP;
977 1015
1016 mutex_lock(&priv->conf_mutex);
978 if (priv->mode != NL80211_IFTYPE_MONITOR) 1017 if (priv->mode != NL80211_IFTYPE_MONITOR)
979 return -EOPNOTSUPP; 1018 goto exit;
980 1019
981 switch (conf->type) { 1020 switch (conf->type) {
982 case NL80211_IFTYPE_STATION: 1021 case NL80211_IFTYPE_STATION:
983 priv->mode = conf->type; 1022 priv->mode = conf->type;
984 break; 1023 break;
985 default: 1024 default:
986 return -EOPNOTSUPP; 1025 goto exit;
987 } 1026 }
988 1027
989 mutex_lock(&priv->conf_mutex); 1028 ret = 0;
990 priv->vif = conf->vif; 1029 priv->vif = conf->vif;
991 1030
992 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1031 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
@@ -995,8 +1034,9 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
995 ((u8 *)conf->mac_addr)[i]); 1034 ((u8 *)conf->mac_addr)[i]);
996 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 1035 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
997 1036
1037exit:
998 mutex_unlock(&priv->conf_mutex); 1038 mutex_unlock(&priv->conf_mutex);
999 return 0; 1039 return ret;
1000} 1040}
1001 1041
1002static void rtl8187_remove_interface(struct ieee80211_hw *dev, 1042static void rtl8187_remove_interface(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7015f248055..d6bf8d2ef8e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1125,7 +1125,7 @@ static int strip_seq_show(struct seq_file *seq, void *v)
1125} 1125}
1126 1126
1127 1127
1128static struct seq_operations strip_seq_ops = { 1128static const struct seq_operations strip_seq_ops = {
1129 .start = strip_seq_start, 1129 .start = strip_seq_start,
1130 .next = strip_seq_next, 1130 .next = strip_seq_next,
1131 .stop = strip_seq_stop, 1131 .stop = strip_seq_stop,
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index c99a1b6b948..c8d5c34e8dd 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -44,6 +44,7 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/string.h> 45#include <linux/string.h>
46#include <linux/wireless.h> 46#include <linux/wireless.h>
47#include <linux/ieee80211.h>
47 48
48#include <net/iw_handler.h> 49#include <net/iw_handler.h>
49 50
@@ -111,12 +112,6 @@ static void wl3501_release(struct pcmcia_device *link);
111 */ 112 */
112static dev_info_t wl3501_dev_info = "wl3501_cs"; 113static dev_info_t wl3501_dev_info = "wl3501_cs";
113 114
114static int wl3501_chan2freq[] = {
115 [0] = 2412, [1] = 2417, [2] = 2422, [3] = 2427, [4] = 2432,
116 [5] = 2437, [6] = 2442, [7] = 2447, [8] = 2452, [9] = 2457,
117 [10] = 2462, [11] = 2467, [12] = 2472, [13] = 2477,
118};
119
120static const struct { 115static const struct {
121 int reg_domain; 116 int reg_domain;
122 int min, max, deflt; 117 int min, max, deflt;
@@ -1510,7 +1505,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
1510{ 1505{
1511 struct wl3501_card *this = netdev_priv(dev); 1506 struct wl3501_card *this = netdev_priv(dev);
1512 1507
1513 wrqu->freq.m = wl3501_chan2freq[this->chan - 1] * 100000; 1508 wrqu->freq.m = ieee80211_dsss_chan_to_freq(this->chan) * 100000;
1514 wrqu->freq.e = 1; 1509 wrqu->freq.e = 1;
1515 return 0; 1510 return 0;
1516} 1511}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index b45c27d42fd..6226ac2357f 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -919,10 +919,9 @@ static int zd1201_set_freq(struct net_device *dev,
919 if (freq->e == 0) 919 if (freq->e == 0)
920 channel = freq->m; 920 channel = freq->m;
921 else { 921 else {
922 if (freq->m >= 2482) 922 channel = ieee80211_freq_to_dsss_chan(freq->m);
923 channel = 14; 923 if (channel < 0)
924 if (freq->m >= 2407) 924 channel = 0;
925 channel = (freq->m-2407)/5;
926 } 925 }
927 926
928 err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel); 927 err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 5200db40561..6ac597ffd3b 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -33,8 +33,13 @@ typedef u16 __nocast zd_addr_t;
33#ifdef DEBUG 33#ifdef DEBUG
34# define dev_dbg_f(dev, fmt, args...) \ 34# define dev_dbg_f(dev, fmt, args...) \
35 dev_printk_f(KERN_DEBUG, dev, fmt, ## args) 35 dev_printk_f(KERN_DEBUG, dev, fmt, ## args)
36# define dev_dbg_f_limit(dev, fmt, args...) do { \
37 if (net_ratelimit()) \
38 dev_printk_f(KERN_DEBUG, dev, fmt, ## args); \
39} while (0)
36#else 40#else
37# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0) 41# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0)
42# define dev_dbg_f_limit(dev, fmt, args...) do { (void)(dev); } while (0)
38#endif /* DEBUG */ 43#endif /* DEBUG */
39 44
40#ifdef DEBUG 45#ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index a611ad85798..7579af27edb 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -768,13 +768,23 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
768 if (!beacon) 768 if (!beacon)
769 return -ENOMEM; 769 return -ENOMEM;
770 r = zd_mac_config_beacon(hw, beacon); 770 r = zd_mac_config_beacon(hw, beacon);
771 kfree_skb(beacon);
772
771 if (r < 0) 773 if (r < 0)
772 return r; 774 return r;
773 r = zd_set_beacon_interval(&mac->chip, BCN_MODE_IBSS | 775 }
774 hw->conf.beacon_int); 776
777 if (conf->changed & IEEE80211_IFCC_BEACON_ENABLED) {
778 u32 interval;
779
780 if (conf->enable_beacon)
781 interval = BCN_MODE_IBSS | hw->conf.beacon_int;
782 else
783 interval = 0;
784
785 r = zd_set_beacon_interval(&mac->chip, interval);
775 if (r < 0) 786 if (r < 0)
776 return r; 787 return r;
777 kfree_skb(beacon);
778 } 788 }
779 } else 789 } else
780 associated = is_valid_ether_addr(conf->bssid); 790 associated = is_valid_ether_addr(conf->bssid);
@@ -793,10 +803,9 @@ static void zd_process_intr(struct work_struct *work)
793 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); 803 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
794 804
795 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); 805 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
796 if (int_status & INT_CFG_NEXT_BCN) { 806 if (int_status & INT_CFG_NEXT_BCN)
797 if (net_ratelimit()) 807 dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
798 dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); 808 else
799 } else
800 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n"); 809 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
801 810
802 zd_chip_enable_hwint(&mac->chip); 811 zd_chip_enable_hwint(&mac->chip);
@@ -967,7 +976,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
967 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 976 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
968 977
969 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 978 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
970 IEEE80211_HW_SIGNAL_DB; 979 IEEE80211_HW_SIGNAL_UNSPEC;
971 980
972 hw->wiphy->interface_modes = 981 hw->wiphy->interface_modes =
973 BIT(NL80211_IFTYPE_MESH_POINT) | 982 BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cd6184ee08e..9f102a6535c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data)
196{ 196{
197 struct net_device *dev = (struct net_device *)data; 197 struct net_device *dev = (struct net_device *)data;
198 struct netfront_info *np = netdev_priv(dev); 198 struct netfront_info *np = netdev_priv(dev);
199 netif_rx_schedule(&np->napi); 199 napi_schedule(&np->napi);
200} 200}
201 201
202static int netfront_tx_slot_available(struct netfront_info *np) 202static int netfront_tx_slot_available(struct netfront_info *np)
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev)
328 xennet_alloc_rx_buffers(dev); 328 xennet_alloc_rx_buffers(dev);
329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
331 netif_rx_schedule(&np->napi); 331 napi_schedule(&np->napi);
332 } 332 }
333 spin_unlock_bh(&np->rx_lock); 333 spin_unlock_bh(&np->rx_lock);
334 334
@@ -979,7 +979,7 @@ err:
979 979
980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
981 if (!more_to_do) 981 if (!more_to_do)
982 __netif_rx_complete(napi); 982 __napi_complete(napi);
983 983
984 local_irq_restore(flags); 984 local_irq_restore(flags);
985 } 985 }
@@ -1317,7 +1317,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1317 xennet_tx_buf_gc(dev); 1317 xennet_tx_buf_gc(dev);
1318 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1318 /* Under tx_lock: protects access to rx shared-ring indexes. */
1319 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1319 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1320 netif_rx_schedule(&np->napi); 1320 napi_schedule(&np->napi);
1321 } 1321 }
1322 1322
1323 spin_unlock_irqrestore(&np->tx_lock, flags); 1323 spin_unlock_irqrestore(&np->tx_lock, flags);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index 03a3f34e903..a12a7211c98 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -183,7 +183,7 @@ static int __init sonic_probe1(struct net_device *dev)
183 183
184 if (lp->descriptors == NULL) { 184 if (lp->descriptors == NULL) {
185 printk(KERN_ERR "%s: couldn't alloc DMA memory for " 185 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
186 " descriptors.\n", lp->device->bus_id); 186 " descriptors.\n", dev_name(lp->device));
187 goto out; 187 goto out;
188 } 188 }
189 189
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index f0b15c9347d..0a6992d8611 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -358,6 +358,17 @@ static void znet_set_multicast_list (struct net_device *dev)
358 * multicast address configured isn't equal to IFF_ALLMULTI */ 358 * multicast address configured isn't equal to IFF_ALLMULTI */
359} 359}
360 360
361static const struct net_device_ops znet_netdev_ops = {
362 .ndo_open = znet_open,
363 .ndo_stop = znet_close,
364 .ndo_start_xmit = znet_send_packet,
365 .ndo_set_multicast_list = znet_set_multicast_list,
366 .ndo_tx_timeout = znet_tx_timeout,
367 .ndo_change_mtu = eth_change_mtu,
368 .ndo_set_mac_address = eth_mac_addr,
369 .ndo_validate_addr = eth_validate_addr,
370};
371
361/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe 372/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
362 BIOS area. We just scan for the signature, and pull the vital parameters 373 BIOS area. We just scan for the signature, and pull the vital parameters
363 out of the structure. */ 374 out of the structure. */
@@ -440,11 +451,7 @@ static int __init znet_probe (void)
440 znet->tx_end = znet->tx_start + znet->tx_buf_len; 451 znet->tx_end = znet->tx_start + znet->tx_buf_len;
441 452
442 /* The ZNET-specific entries in the device structure. */ 453 /* The ZNET-specific entries in the device structure. */
443 dev->open = &znet_open; 454 dev->netdev_ops = &znet_netdev_ops;
444 dev->hard_start_xmit = &znet_send_packet;
445 dev->stop = &znet_close;
446 dev->set_multicast_list = &znet_set_multicast_list;
447 dev->tx_timeout = znet_tx_timeout;
448 dev->watchdog_timeo = TX_TIMEOUT; 455 dev->watchdog_timeo = TX_TIMEOUT;
449 err = register_netdev(dev); 456 err = register_netdev(dev);
450 if (err) 457 if (err)
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index f5e618562c5..6669adf355b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -60,6 +60,9 @@
60 * 1.25 Added Packing support 60 * 1.25 Added Packing support
61 * 1.5 61 * 1.5
62 */ 62 */
63
64#define KMSG_COMPONENT "claw"
65
63#include <asm/ccwdev.h> 66#include <asm/ccwdev.h>
64#include <asm/ccwgroup.h> 67#include <asm/ccwgroup.h>
65#include <asm/debug.h> 68#include <asm/debug.h>
@@ -94,7 +97,7 @@
94 CLAW uses the s390dbf file system see claw_trace and claw_setup 97 CLAW uses the s390dbf file system see claw_trace and claw_setup
95*/ 98*/
96 99
97 100static char version[] __initdata = "CLAW driver";
98static char debug_buffer[255]; 101static char debug_buffer[255];
99/** 102/**
100 * Debug Facility Stuff 103 * Debug Facility Stuff
@@ -206,20 +209,30 @@ static struct net_device_stats *claw_stats(struct net_device *dev);
206static int pages_to_order_of_mag(int num_of_pages); 209static int pages_to_order_of_mag(int num_of_pages);
207static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 210static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
208/* sysfs Functions */ 211/* sysfs Functions */
209static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf); 212static ssize_t claw_hname_show(struct device *dev,
210static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr, 213 struct device_attribute *attr, char *buf);
214static ssize_t claw_hname_write(struct device *dev,
215 struct device_attribute *attr,
211 const char *buf, size_t count); 216 const char *buf, size_t count);
212static ssize_t claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf); 217static ssize_t claw_adname_show(struct device *dev,
213static ssize_t claw_adname_write(struct device *dev, struct device_attribute *attr, 218 struct device_attribute *attr, char *buf);
219static ssize_t claw_adname_write(struct device *dev,
220 struct device_attribute *attr,
214 const char *buf, size_t count); 221 const char *buf, size_t count);
215static ssize_t claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf); 222static ssize_t claw_apname_show(struct device *dev,
216static ssize_t claw_apname_write(struct device *dev, struct device_attribute *attr, 223 struct device_attribute *attr, char *buf);
224static ssize_t claw_apname_write(struct device *dev,
225 struct device_attribute *attr,
217 const char *buf, size_t count); 226 const char *buf, size_t count);
218static ssize_t claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf); 227static ssize_t claw_wbuff_show(struct device *dev,
219static ssize_t claw_wbuff_write(struct device *dev, struct device_attribute *attr, 228 struct device_attribute *attr, char *buf);
229static ssize_t claw_wbuff_write(struct device *dev,
230 struct device_attribute *attr,
220 const char *buf, size_t count); 231 const char *buf, size_t count);
221static ssize_t claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf); 232static ssize_t claw_rbuff_show(struct device *dev,
222static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr, 233 struct device_attribute *attr, char *buf);
234static ssize_t claw_rbuff_write(struct device *dev,
235 struct device_attribute *attr,
223 const char *buf, size_t count); 236 const char *buf, size_t count);
224static int claw_add_files(struct device *dev); 237static int claw_add_files(struct device *dev);
225static void claw_remove_files(struct device *dev); 238static void claw_remove_files(struct device *dev);
@@ -298,8 +311,8 @@ claw_probe(struct ccwgroup_device *cgdev)
298 if (rc) { 311 if (rc) {
299 probe_error(cgdev); 312 probe_error(cgdev);
300 put_device(&cgdev->dev); 313 put_device(&cgdev->dev);
301 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", 314 dev_err(&cgdev->dev, "Creating the /proc files for a new"
302 dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__); 315 " CLAW device failed\n");
303 CLAW_DBF_TEXT_(2, setup, "probex%d", rc); 316 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
304 return rc; 317 return rc;
305 } 318 }
@@ -496,7 +509,8 @@ claw_open(struct net_device *dev)
496 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || 509 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
497 (((privptr->channel[READ].flag | 510 (((privptr->channel[READ].flag |
498 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { 511 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
499 printk(KERN_INFO "%s: remote side is not ready\n", dev->name); 512 dev_info(&privptr->channel[READ].cdev->dev,
513 "%s: remote side is not ready\n", dev->name);
500 CLAW_DBF_TEXT(2, trace, "notrdy"); 514 CLAW_DBF_TEXT(2, trace, "notrdy");
501 515
502 for ( i = 0; i < 2; i++) { 516 for ( i = 0; i < 2; i++) {
@@ -582,10 +596,9 @@ claw_irq_handler(struct ccw_device *cdev,
582 CLAW_DBF_TEXT(4, trace, "clawirq"); 596 CLAW_DBF_TEXT(4, trace, "clawirq");
583 /* Bypass all 'unsolicited interrupts' */ 597 /* Bypass all 'unsolicited interrupts' */
584 if (!cdev->dev.driver_data) { 598 if (!cdev->dev.driver_data) {
585 printk(KERN_WARNING "claw: unsolicited interrupt for device:" 599 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
586 "%s received c-%02x d-%02x\n", 600 " IRQ, c-%02x d-%02x\n",
587 dev_name(&cdev->dev), irb->scsw.cmd.cstat, 601 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
588 irb->scsw.cmd.dstat);
589 CLAW_DBF_TEXT(2, trace, "badirq"); 602 CLAW_DBF_TEXT(2, trace, "badirq");
590 return; 603 return;
591 } 604 }
@@ -597,8 +610,7 @@ claw_irq_handler(struct ccw_device *cdev,
597 else if (privptr->channel[WRITE].cdev == cdev) 610 else if (privptr->channel[WRITE].cdev == cdev)
598 p_ch = &privptr->channel[WRITE]; 611 p_ch = &privptr->channel[WRITE];
599 else { 612 else {
600 printk(KERN_WARNING "claw: Can't determine channel for " 613 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
601 "interrupt, device %s\n", dev_name(&cdev->dev));
602 CLAW_DBF_TEXT(2, trace, "badchan"); 614 CLAW_DBF_TEXT(2, trace, "badchan");
603 return; 615 return;
604 } 616 }
@@ -612,7 +624,8 @@ claw_irq_handler(struct ccw_device *cdev,
612 624
613 /* Check for good subchannel return code, otherwise info message */ 625 /* Check for good subchannel return code, otherwise info message */
614 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { 626 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
615 printk(KERN_INFO "%s: subchannel check for device: %04x -" 627 dev_info(&cdev->dev,
628 "%s: subchannel check for device: %04x -"
616 " Sch Stat %02x Dev Stat %02x CPA - %04x\n", 629 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
617 dev->name, p_ch->devno, 630 dev->name, p_ch->devno,
618 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 631 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
@@ -651,7 +664,7 @@ claw_irq_handler(struct ccw_device *cdev,
651 wake_up(&p_ch->wait); /* wake claw_open (READ)*/ 664 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
652 } else if (p_ch->flag == CLAW_WRITE) { 665 } else if (p_ch->flag == CLAW_WRITE) {
653 p_ch->claw_state = CLAW_START_WRITE; 666 p_ch->claw_state = CLAW_START_WRITE;
654 /* send SYSTEM_VALIDATE */ 667 /* send SYSTEM_VALIDATE */
655 claw_strt_read(dev, LOCK_NO); 668 claw_strt_read(dev, LOCK_NO);
656 claw_send_control(dev, 669 claw_send_control(dev,
657 SYSTEM_VALIDATE_REQUEST, 670 SYSTEM_VALIDATE_REQUEST,
@@ -659,10 +672,9 @@ claw_irq_handler(struct ccw_device *cdev,
659 p_env->host_name, 672 p_env->host_name,
660 p_env->adapter_name); 673 p_env->adapter_name);
661 } else { 674 } else {
662 printk(KERN_WARNING "claw: unsolicited " 675 dev_warn(&cdev->dev, "The CLAW device received"
663 "interrupt for device:" 676 " an unexpected IRQ, "
664 "%s received c-%02x d-%02x\n", 677 "c-%02x d-%02x\n",
665 dev_name(&cdev->dev),
666 irb->scsw.cmd.cstat, 678 irb->scsw.cmd.cstat,
667 irb->scsw.cmd.dstat); 679 irb->scsw.cmd.dstat);
668 return; 680 return;
@@ -677,8 +689,8 @@ claw_irq_handler(struct ccw_device *cdev,
677 (p_ch->irb->ecw[0] & 0x40) == 0x40 || 689 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
678 (p_ch->irb->ecw[0]) == 0) { 690 (p_ch->irb->ecw[0]) == 0) {
679 privptr->stats.rx_errors++; 691 privptr->stats.rx_errors++;
680 printk(KERN_INFO "%s: Restart is " 692 dev_info(&cdev->dev,
681 "required after remote " 693 "%s: Restart is required after remote "
682 "side recovers \n", 694 "side recovers \n",
683 dev->name); 695 dev->name);
684 } 696 }
@@ -713,11 +725,13 @@ claw_irq_handler(struct ccw_device *cdev,
713 return; 725 return;
714 case CLAW_START_WRITE: 726 case CLAW_START_WRITE:
715 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 727 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
716 printk(KERN_INFO "%s: Unit Check Occured in " 728 dev_info(&cdev->dev,
729 "%s: Unit Check Occured in "
717 "write channel\n", dev->name); 730 "write channel\n", dev->name);
718 clear_bit(0, (void *)&p_ch->IO_active); 731 clear_bit(0, (void *)&p_ch->IO_active);
719 if (p_ch->irb->ecw[0] & 0x80) { 732 if (p_ch->irb->ecw[0] & 0x80) {
720 printk(KERN_INFO "%s: Resetting Event " 733 dev_info(&cdev->dev,
734 "%s: Resetting Event "
721 "occurred:\n", dev->name); 735 "occurred:\n", dev->name);
722 init_timer(&p_ch->timer); 736 init_timer(&p_ch->timer);
723 p_ch->timer.function = 737 p_ch->timer.function =
@@ -725,7 +739,8 @@ claw_irq_handler(struct ccw_device *cdev,
725 p_ch->timer.data = (unsigned long)p_ch; 739 p_ch->timer.data = (unsigned long)p_ch;
726 p_ch->timer.expires = jiffies + 10*HZ; 740 p_ch->timer.expires = jiffies + 10*HZ;
727 add_timer(&p_ch->timer); 741 add_timer(&p_ch->timer);
728 printk(KERN_INFO "%s: write connection " 742 dev_info(&cdev->dev,
743 "%s: write connection "
729 "restarting\n", dev->name); 744 "restarting\n", dev->name);
730 } 745 }
731 CLAW_DBF_TEXT(4, trace, "rstrtwrt"); 746 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
@@ -733,9 +748,10 @@ claw_irq_handler(struct ccw_device *cdev,
733 } 748 }
734 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { 749 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
735 clear_bit(0, (void *)&p_ch->IO_active); 750 clear_bit(0, (void *)&p_ch->IO_active);
736 printk(KERN_INFO "%s: Unit Exception " 751 dev_info(&cdev->dev,
737 "Occured in write channel\n", 752 "%s: Unit Exception "
738 dev->name); 753 "occurred in write channel\n",
754 dev->name);
739 } 755 }
740 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || 756 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
741 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || 757 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
@@ -757,8 +773,9 @@ claw_irq_handler(struct ccw_device *cdev,
757 CLAW_DBF_TEXT(4, trace, "StWtExit"); 773 CLAW_DBF_TEXT(4, trace, "StWtExit");
758 return; 774 return;
759 default: 775 default:
760 printk(KERN_WARNING "%s: wrong selection code - irq " 776 dev_warn(&cdev->dev,
761 "state=%d\n", dev->name, p_ch->claw_state); 777 "The CLAW device for %s received an unexpected IRQ\n",
778 dev->name);
762 CLAW_DBF_TEXT(2, trace, "badIRQ"); 779 CLAW_DBF_TEXT(2, trace, "badIRQ");
763 return; 780 return;
764 } 781 }
@@ -910,8 +927,10 @@ claw_release(struct net_device *dev)
910 if (((privptr->channel[READ].last_dstat | 927 if (((privptr->channel[READ].last_dstat |
911 privptr->channel[WRITE].last_dstat) & 928 privptr->channel[WRITE].last_dstat) &
912 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { 929 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
913 printk(KERN_WARNING "%s: channel problems during close - " 930 dev_warn(&privptr->channel[READ].cdev->dev,
914 "read: %02x - write: %02x\n", 931 "Deactivating %s completed with incorrect"
932 " subchannel status "
933 "(read %02x, write %02x)\n",
915 dev->name, 934 dev->name,
916 privptr->channel[READ].last_dstat, 935 privptr->channel[READ].last_dstat,
917 privptr->channel[WRITE].last_dstat); 936 privptr->channel[WRITE].last_dstat);
@@ -1076,8 +1095,8 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1076 } 1095 }
1077 1096
1078 if ( privptr-> p_read_active_first ==NULL ) { 1097 if ( privptr-> p_read_active_first ==NULL ) {
1079 privptr-> p_read_active_first= p_first; /* set new first */ 1098 privptr->p_read_active_first = p_first; /* set new first */
1080 privptr-> p_read_active_last = p_last; /* set new last */ 1099 privptr->p_read_active_last = p_last; /* set new last */
1081 } 1100 }
1082 else { 1101 else {
1083 1102
@@ -1113,7 +1132,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1113 privptr->p_read_active_last->r_TIC_2.cda= 1132 privptr->p_read_active_last->r_TIC_2.cda=
1114 (__u32)__pa(&p_first->read); 1133 (__u32)__pa(&p_first->read);
1115 } 1134 }
1116 /* chain in new set of blocks */ 1135 /* chain in new set of blocks */
1117 privptr->p_read_active_last->next = p_first; 1136 privptr->p_read_active_last->next = p_first;
1118 privptr->p_read_active_last=p_last; 1137 privptr->p_read_active_last=p_last;
1119 } /* end of if ( privptr-> p_read_active_first ==NULL) */ 1138 } /* end of if ( privptr-> p_read_active_first ==NULL) */
@@ -1135,21 +1154,18 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1135 case -EBUSY: /* BUSY is a transient state no action needed */ 1154 case -EBUSY: /* BUSY is a transient state no action needed */
1136 break; 1155 break;
1137 case -ENODEV: 1156 case -ENODEV:
1138 printk(KERN_EMERG "%s: Missing device called " 1157 dev_err(&cdev->dev, "The remote channel adapter is not"
1139 "for IO ENODEV\n", dev_name(&cdev->dev)); 1158 " available\n");
1140 break;
1141 case -EIO:
1142 printk(KERN_EMERG "%s: Status pending... EIO \n",
1143 dev_name(&cdev->dev));
1144 break; 1159 break;
1145 case -EINVAL: 1160 case -EINVAL:
1146 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", 1161 dev_err(&cdev->dev,
1147 dev_name(&cdev->dev)); 1162 "The status of the remote channel adapter"
1163 " is not valid\n");
1148 break; 1164 break;
1149 default: 1165 default:
1150 printk(KERN_EMERG "%s: Unknown error in " 1166 dev_err(&cdev->dev, "The common device layer"
1151 "Do_IO %d\n", dev_name(&cdev->dev), 1167 " returned error code %d\n",
1152 return_code); 1168 return_code);
1153 } 1169 }
1154 } 1170 }
1155 CLAW_DBF_TEXT(4, trace, "ccwret"); 1171 CLAW_DBF_TEXT(4, trace, "ccwret");
@@ -1163,40 +1179,41 @@ static void
1163ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1179ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1164{ 1180{
1165 struct net_device *ndev = p_ch->ndev; 1181 struct net_device *ndev = p_ch->ndev;
1182 struct device *dev = &p_ch->cdev->dev;
1166 1183
1167 CLAW_DBF_TEXT(4, trace, "unitchek"); 1184 CLAW_DBF_TEXT(4, trace, "unitchek");
1168 printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", 1185 dev_warn(dev, "The communication peer of %s disconnected\n",
1169 ndev->name, sense); 1186 ndev->name);
1170 1187
1171 if (sense & 0x40) { 1188 if (sense & 0x40) {
1172 if (sense & 0x01) { 1189 if (sense & 0x01) {
1173 printk(KERN_WARNING "%s: Interface disconnect or " 1190 dev_warn(dev, "The remote channel adapter for"
1174 "Selective reset " 1191 " %s has been reset\n",
1175 "occurred (remote side)\n", ndev->name); 1192 ndev->name);
1176 }
1177 else {
1178 printk(KERN_WARNING "%s: System reset occured"
1179 " (remote side)\n", ndev->name);
1180 } 1193 }
1181 } 1194 }
1182 else if (sense & 0x20) { 1195 else if (sense & 0x20) {
1183 if (sense & 0x04) { 1196 if (sense & 0x04) {
1184 printk(KERN_WARNING "%s: Data-streaming " 1197 dev_warn(dev, "A data streaming timeout occurred"
1185 "timeout)\n", ndev->name); 1198 " for %s\n",
1199 ndev->name);
1186 } 1200 }
1187 else { 1201 else {
1188 printk(KERN_WARNING "%s: Data-transfer parity" 1202 dev_warn(dev, "A data transfer parity error occurred"
1189 " error\n", ndev->name); 1203 " for %s\n",
1204 ndev->name);
1190 } 1205 }
1191 } 1206 }
1192 else if (sense & 0x10) { 1207 else if (sense & 0x10) {
1193 if (sense & 0x20) { 1208 if (sense & 0x20) {
1194 printk(KERN_WARNING "%s: Hardware malfunction " 1209 dev_warn(dev, "The remote channel adapter for %s"
1195 "(remote side)\n", ndev->name); 1210 " is faulty\n",
1211 ndev->name);
1196 } 1212 }
1197 else { 1213 else {
1198 printk(KERN_WARNING "%s: read-data parity error " 1214 dev_warn(dev, "A read data parity error occurred"
1199 "(remote side)\n", ndev->name); 1215 " for %s\n",
1216 ndev->name);
1200 } 1217 }
1201 } 1218 }
1202 1219
@@ -1375,7 +1392,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1375 */ 1392 */
1376 1393
1377 if (p_first_ccw!=NULL) { 1394 if (p_first_ccw!=NULL) {
1378 /* setup ending ccw sequence for this segment */ 1395 /* setup ending ccw sequence for this segment */
1379 pEnd=privptr->p_end_ccw; 1396 pEnd=privptr->p_end_ccw;
1380 if (pEnd->write1) { 1397 if (pEnd->write1) {
1381 pEnd->write1=0x00; /* second end ccw is now active */ 1398 pEnd->write1=0x00; /* second end ccw is now active */
@@ -1697,10 +1714,11 @@ init_ccw_bk(struct net_device *dev)
1697 p_buf-> w_TIC_1.flags = 0; 1714 p_buf-> w_TIC_1.flags = 0;
1698 p_buf-> w_TIC_1.count = 0; 1715 p_buf-> w_TIC_1.count = 0;
1699 1716
1700 if (((unsigned long)p_buff+privptr->p_env->write_size) >= 1717 if (((unsigned long)p_buff +
1718 privptr->p_env->write_size) >=
1701 ((unsigned long)(p_buff+2* 1719 ((unsigned long)(p_buff+2*
1702 (privptr->p_env->write_size) -1) & PAGE_MASK)) { 1720 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1703 p_buff= p_buff+privptr->p_env->write_size; 1721 p_buff = p_buff+privptr->p_env->write_size;
1704 } 1722 }
1705 } 1723 }
1706 } 1724 }
@@ -1840,15 +1858,16 @@ init_ccw_bk(struct net_device *dev)
1840 p_buf->header.opcode=0xff; 1858 p_buf->header.opcode=0xff;
1841 p_buf->header.flag=CLAW_PENDING; 1859 p_buf->header.flag=CLAW_PENDING;
1842 1860
1843 if (((unsigned long)p_buff+privptr->p_env->read_size) >= 1861 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1844 ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1) 1862 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1845 & PAGE_MASK) ) { 1863 -1)
1864 & PAGE_MASK)) {
1846 p_buff= p_buff+privptr->p_env->read_size; 1865 p_buff= p_buff+privptr->p_env->read_size;
1847 } 1866 }
1848 else { 1867 else {
1849 p_buff= 1868 p_buff=
1850 (void *)((unsigned long) 1869 (void *)((unsigned long)
1851 (p_buff+2*(privptr->p_env->read_size) -1) 1870 (p_buff+2*(privptr->p_env->read_size)-1)
1852 & PAGE_MASK) ; 1871 & PAGE_MASK) ;
1853 } 1872 }
1854 } /* for read_buffers */ 1873 } /* for read_buffers */
@@ -1856,24 +1875,28 @@ init_ccw_bk(struct net_device *dev)
1856 else { /* read Size >= PAGE_SIZE */ 1875 else { /* read Size >= PAGE_SIZE */
1857 for (i=0 ; i< privptr->p_env->read_buffers ; i++) { 1876 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1858 p_buff = (void *)__get_free_pages(__GFP_DMA, 1877 p_buff = (void *)__get_free_pages(__GFP_DMA,
1859 (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); 1878 (int)pages_to_order_of_mag(
1879 privptr->p_buff_pages_perread));
1860 if (p_buff==NULL) { 1880 if (p_buff==NULL) {
1861 free_pages((unsigned long)privptr->p_buff_ccw, 1881 free_pages((unsigned long)privptr->p_buff_ccw,
1862 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); 1882 (int)pages_to_order_of_mag(privptr->
1883 p_buff_ccw_num));
1863 /* free the write pages */ 1884 /* free the write pages */
1864 p_buf=privptr->p_buff_write; 1885 p_buf=privptr->p_buff_write;
1865 while (p_buf!=NULL) { 1886 while (p_buf!=NULL) {
1866 free_pages((unsigned long)p_buf->p_buffer, 1887 free_pages(
1867 (int)pages_to_order_of_mag( 1888 (unsigned long)p_buf->p_buffer,
1868 privptr->p_buff_pages_perwrite )); 1889 (int)pages_to_order_of_mag(
1890 privptr->p_buff_pages_perwrite));
1869 p_buf=p_buf->next; 1891 p_buf=p_buf->next;
1870 } 1892 }
1871 /* free any read pages already alloc */ 1893 /* free any read pages already alloc */
1872 p_buf=privptr->p_buff_read; 1894 p_buf=privptr->p_buff_read;
1873 while (p_buf!=NULL) { 1895 while (p_buf!=NULL) {
1874 free_pages((unsigned long)p_buf->p_buffer, 1896 free_pages(
1875 (int)pages_to_order_of_mag( 1897 (unsigned long)p_buf->p_buffer,
1876 privptr->p_buff_pages_perread )); 1898 (int)pages_to_order_of_mag(
1899 privptr->p_buff_pages_perread));
1877 p_buf=p_buf->next; 1900 p_buf=p_buf->next;
1878 } 1901 }
1879 privptr->p_buff_ccw=NULL; 1902 privptr->p_buff_ccw=NULL;
@@ -2003,7 +2026,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2003 tdev = &privptr->channel[READ].cdev->dev; 2026 tdev = &privptr->channel[READ].cdev->dev;
2004 memcpy( &temp_host_name, p_env->host_name, 8); 2027 memcpy( &temp_host_name, p_env->host_name, 8);
2005 memcpy( &temp_ws_name, p_env->adapter_name , 8); 2028 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2006 printk(KERN_INFO "%s: CLAW device %.8s: " 2029 dev_info(tdev, "%s: CLAW device %.8s: "
2007 "Received Control Packet\n", 2030 "Received Control Packet\n",
2008 dev->name, temp_ws_name); 2031 dev->name, temp_ws_name);
2009 if (privptr->release_pend==1) { 2032 if (privptr->release_pend==1) {
@@ -2022,32 +2045,30 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2022 if (p_ctlbk->version != CLAW_VERSION_ID) { 2045 if (p_ctlbk->version != CLAW_VERSION_ID) {
2023 claw_snd_sys_validate_rsp(dev, p_ctlbk, 2046 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2024 CLAW_RC_WRONG_VERSION); 2047 CLAW_RC_WRONG_VERSION);
2025 printk("%s: %d is wrong version id. " 2048 dev_warn(tdev, "The communication peer of %s"
2026 "Expected %d\n", 2049 " uses an incorrect API version %d\n",
2027 dev->name, p_ctlbk->version, 2050 dev->name, p_ctlbk->version);
2028 CLAW_VERSION_ID);
2029 } 2051 }
2030 p_sysval = (struct sysval *)&(p_ctlbk->data); 2052 p_sysval = (struct sysval *)&(p_ctlbk->data);
2031 printk("%s: Recv Sys Validate Request: " 2053 dev_info(tdev, "%s: Recv Sys Validate Request: "
2032 "Vers=%d,link_id=%d,Corr=%d,WS name=%." 2054 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2033 "8s,Host name=%.8s\n", 2055 "Host name=%.8s\n",
2034 dev->name, p_ctlbk->version, 2056 dev->name, p_ctlbk->version,
2035 p_ctlbk->linkid, 2057 p_ctlbk->linkid,
2036 p_ctlbk->correlator, 2058 p_ctlbk->correlator,
2037 p_sysval->WS_name, 2059 p_sysval->WS_name,
2038 p_sysval->host_name); 2060 p_sysval->host_name);
2039 if (memcmp(temp_host_name, p_sysval->host_name, 8)) { 2061 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2040 claw_snd_sys_validate_rsp(dev, p_ctlbk, 2062 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2041 CLAW_RC_NAME_MISMATCH); 2063 CLAW_RC_NAME_MISMATCH);
2042 CLAW_DBF_TEXT(2, setup, "HSTBAD"); 2064 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2043 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); 2065 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2044 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); 2066 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2045 printk(KERN_INFO "%s: Host name mismatch\n", 2067 dev_warn(tdev,
2046 dev->name); 2068 "Host name %s for %s does not match the"
2047 printk(KERN_INFO "%s: Received :%s: " 2069 " remote adapter name %s\n",
2048 "expected :%s: \n",
2049 dev->name,
2050 p_sysval->host_name, 2070 p_sysval->host_name,
2071 dev->name,
2051 temp_host_name); 2072 temp_host_name);
2052 } 2073 }
2053 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { 2074 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
@@ -2056,35 +2077,38 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2056 CLAW_DBF_TEXT(2, setup, "WSNBAD"); 2077 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2057 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); 2078 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2058 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); 2079 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2059 printk(KERN_INFO "%s: WS name mismatch\n", 2080 dev_warn(tdev, "Adapter name %s for %s does not match"
2060 dev->name); 2081 " the remote host name %s\n",
2061 printk(KERN_INFO "%s: Received :%s: " 2082 p_sysval->WS_name,
2062 "expected :%s: \n", 2083 dev->name,
2063 dev->name, 2084 temp_ws_name);
2064 p_sysval->WS_name,
2065 temp_ws_name);
2066 } 2085 }
2067 if ((p_sysval->write_frame_size < p_env->write_size) && 2086 if ((p_sysval->write_frame_size < p_env->write_size) &&
2068 (p_env->packing == 0)) { 2087 (p_env->packing == 0)) {
2069 claw_snd_sys_validate_rsp(dev, p_ctlbk, 2088 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2070 CLAW_RC_HOST_RCV_TOO_SMALL); 2089 CLAW_RC_HOST_RCV_TOO_SMALL);
2071 printk(KERN_INFO "%s: host write size is too " 2090 dev_warn(tdev,
2072 "small\n", dev->name); 2091 "The local write buffer is smaller than the"
2092 " remote read buffer\n");
2073 CLAW_DBF_TEXT(2, setup, "wrtszbad"); 2093 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2074 } 2094 }
2075 if ((p_sysval->read_frame_size < p_env->read_size) && 2095 if ((p_sysval->read_frame_size < p_env->read_size) &&
2076 (p_env->packing == 0)) { 2096 (p_env->packing == 0)) {
2077 claw_snd_sys_validate_rsp(dev, p_ctlbk, 2097 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2078 CLAW_RC_HOST_RCV_TOO_SMALL); 2098 CLAW_RC_HOST_RCV_TOO_SMALL);
2079 printk(KERN_INFO "%s: host read size is too " 2099 dev_warn(tdev,
2080 "small\n", dev->name); 2100 "The local read buffer is smaller than the"
2101 " remote write buffer\n");
2081 CLAW_DBF_TEXT(2, setup, "rdsizbad"); 2102 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2082 } 2103 }
2083 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); 2104 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2084 printk(KERN_INFO "%s: CLAW device %.8s: System validate " 2105 dev_info(tdev,
2085 "completed.\n", dev->name, temp_ws_name); 2106 "CLAW device %.8s: System validate"
2086 printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name, 2107 " completed.\n", temp_ws_name);
2087 p_sysval->read_frame_size, p_sysval->write_frame_size); 2108 dev_info(tdev,
2109 "%s: sys Validate Rsize:%d Wsize:%d\n",
2110 dev->name, p_sysval->read_frame_size,
2111 p_sysval->write_frame_size);
2088 privptr->system_validate_comp = 1; 2112 privptr->system_validate_comp = 1;
2089 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) 2113 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2090 p_env->packing = PACKING_ASK; 2114 p_env->packing = PACKING_ASK;
@@ -2092,8 +2116,10 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2092 break; 2116 break;
2093 case SYSTEM_VALIDATE_RESPONSE: 2117 case SYSTEM_VALIDATE_RESPONSE:
2094 p_sysval = (struct sysval *)&(p_ctlbk->data); 2118 p_sysval = (struct sysval *)&(p_ctlbk->data);
2095 printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," 2119 dev_info(tdev,
2096 "WS name=%.8s,Host name=%.8s\n", 2120 "Settings for %s validated (version=%d, "
2121 "remote device=%d, rc=%d, adapter name=%.8s, "
2122 "host name=%.8s)\n",
2097 dev->name, 2123 dev->name,
2098 p_ctlbk->version, 2124 p_ctlbk->version,
2099 p_ctlbk->correlator, 2125 p_ctlbk->correlator,
@@ -2102,41 +2128,39 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2102 p_sysval->host_name); 2128 p_sysval->host_name);
2103 switch (p_ctlbk->rc) { 2129 switch (p_ctlbk->rc) {
2104 case 0: 2130 case 0:
2105 printk(KERN_INFO "%s: CLAW device " 2131 dev_info(tdev, "%s: CLAW device "
2106 "%.8s: System validate " 2132 "%.8s: System validate completed.\n",
2107 "completed.\n", 2133 dev->name, temp_ws_name);
2108 dev->name, temp_ws_name);
2109 if (privptr->system_validate_comp == 0) 2134 if (privptr->system_validate_comp == 0)
2110 claw_strt_conn_req(dev); 2135 claw_strt_conn_req(dev);
2111 privptr->system_validate_comp = 1; 2136 privptr->system_validate_comp = 1;
2112 break; 2137 break;
2113 case CLAW_RC_NAME_MISMATCH: 2138 case CLAW_RC_NAME_MISMATCH:
2114 printk(KERN_INFO "%s: Sys Validate " 2139 dev_warn(tdev, "Validating %s failed because of"
2115 "Resp : Host, WS name is " 2140 " a host or adapter name mismatch\n",
2116 "mismatch\n", 2141 dev->name);
2117 dev->name);
2118 break; 2142 break;
2119 case CLAW_RC_WRONG_VERSION: 2143 case CLAW_RC_WRONG_VERSION:
2120 printk(KERN_INFO "%s: Sys Validate " 2144 dev_warn(tdev, "Validating %s failed because of a"
2121 "Resp : Wrong version\n", 2145 " version conflict\n",
2122 dev->name); 2146 dev->name);
2123 break; 2147 break;
2124 case CLAW_RC_HOST_RCV_TOO_SMALL: 2148 case CLAW_RC_HOST_RCV_TOO_SMALL:
2125 printk(KERN_INFO "%s: Sys Validate " 2149 dev_warn(tdev, "Validating %s failed because of a"
2126 "Resp : bad frame size\n", 2150 " frame size conflict\n",
2127 dev->name); 2151 dev->name);
2128 break; 2152 break;
2129 default: 2153 default:
2130 printk(KERN_INFO "%s: Sys Validate " 2154 dev_warn(tdev, "The communication peer of %s rejected"
2131 "error code=%d \n", 2155 " the connection\n",
2132 dev->name, p_ctlbk->rc); 2156 dev->name);
2133 break; 2157 break;
2134 } 2158 }
2135 break; 2159 break;
2136 2160
2137 case CONNECTION_REQUEST: 2161 case CONNECTION_REQUEST:
2138 p_connect = (struct conncmd *)&(p_ctlbk->data); 2162 p_connect = (struct conncmd *)&(p_ctlbk->data);
2139 printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," 2163 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2140 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", 2164 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2141 dev->name, 2165 dev->name,
2142 p_ctlbk->version, 2166 p_ctlbk->version,
@@ -2146,21 +2170,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2146 p_connect->WS_name); 2170 p_connect->WS_name);
2147 if (privptr->active_link_ID != 0) { 2171 if (privptr->active_link_ID != 0) {
2148 claw_snd_disc(dev, p_ctlbk); 2172 claw_snd_disc(dev, p_ctlbk);
2149 printk(KERN_INFO "%s: Conn Req error : " 2173 dev_info(tdev, "%s rejected a connection request"
2150 "already logical link is active \n", 2174 " because it is already active\n",
2151 dev->name); 2175 dev->name);
2152 } 2176 }
2153 if (p_ctlbk->linkid != 1) { 2177 if (p_ctlbk->linkid != 1) {
2154 claw_snd_disc(dev, p_ctlbk); 2178 claw_snd_disc(dev, p_ctlbk);
2155 printk(KERN_INFO "%s: Conn Req error : " 2179 dev_info(tdev, "%s rejected a request to open multiple"
2156 "req logical link id is not 1\n", 2180 " connections\n",
2157 dev->name); 2181 dev->name);
2158 } 2182 }
2159 rc = find_link(dev, p_connect->host_name, p_connect->WS_name); 2183 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2160 if (rc != 0) { 2184 if (rc != 0) {
2161 claw_snd_disc(dev, p_ctlbk); 2185 claw_snd_disc(dev, p_ctlbk);
2162 printk(KERN_INFO "%s: Conn Resp error: " 2186 dev_info(tdev, "%s rejected a connection request"
2163 "req appl name does not match\n", 2187 " because of a type mismatch\n",
2164 dev->name); 2188 dev->name);
2165 } 2189 }
2166 claw_send_control(dev, 2190 claw_send_control(dev,
@@ -2172,7 +2196,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2172 p_env->packing = PACK_SEND; 2196 p_env->packing = PACK_SEND;
2173 claw_snd_conn_req(dev, 0); 2197 claw_snd_conn_req(dev, 0);
2174 } 2198 }
2175 printk(KERN_INFO "%s: CLAW device %.8s: Connection " 2199 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2176 "completed link_id=%d.\n", 2200 "completed link_id=%d.\n",
2177 dev->name, temp_ws_name, 2201 dev->name, temp_ws_name,
2178 p_ctlbk->linkid); 2202 p_ctlbk->linkid);
@@ -2182,7 +2206,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2182 break; 2206 break;
2183 case CONNECTION_RESPONSE: 2207 case CONNECTION_RESPONSE:
2184 p_connect = (struct conncmd *)&(p_ctlbk->data); 2208 p_connect = (struct conncmd *)&(p_ctlbk->data);
2185 printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," 2209 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2186 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", 2210 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2187 dev->name, 2211 dev->name,
2188 p_ctlbk->version, 2212 p_ctlbk->version,
@@ -2193,16 +2217,18 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2193 p_connect->WS_name); 2217 p_connect->WS_name);
2194 2218
2195 if (p_ctlbk->rc != 0) { 2219 if (p_ctlbk->rc != 0) {
2196 printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", 2220 dev_warn(tdev, "The communication peer of %s rejected"
2197 dev->name, p_ctlbk->rc); 2221 " a connection request\n",
2222 dev->name);
2198 return 1; 2223 return 1;
2199 } 2224 }
2200 rc = find_link(dev, 2225 rc = find_link(dev,
2201 p_connect->host_name, p_connect->WS_name); 2226 p_connect->host_name, p_connect->WS_name);
2202 if (rc != 0) { 2227 if (rc != 0) {
2203 claw_snd_disc(dev, p_ctlbk); 2228 claw_snd_disc(dev, p_ctlbk);
2204 printk(KERN_INFO "%s: Conn Resp error: " 2229 dev_warn(tdev, "The communication peer of %s"
2205 "req appl name does not match\n", 2230 " rejected a connection "
2231 "request because of a type mismatch\n",
2206 dev->name); 2232 dev->name);
2207 } 2233 }
2208 /* should be until CONNECTION_CONFIRM */ 2234 /* should be until CONNECTION_CONFIRM */
@@ -2210,7 +2236,8 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2210 break; 2236 break;
2211 case CONNECTION_CONFIRM: 2237 case CONNECTION_CONFIRM:
2212 p_connect = (struct conncmd *)&(p_ctlbk->data); 2238 p_connect = (struct conncmd *)&(p_ctlbk->data);
2213 printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," 2239 dev_info(tdev,
2240 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2214 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", 2241 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2215 dev->name, 2242 dev->name,
2216 p_ctlbk->version, 2243 p_ctlbk->version,
@@ -2221,21 +2248,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2221 if (p_ctlbk->linkid == -(privptr->active_link_ID)) { 2248 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2222 privptr->active_link_ID = p_ctlbk->linkid; 2249 privptr->active_link_ID = p_ctlbk->linkid;
2223 if (p_env->packing > PACKING_ASK) { 2250 if (p_env->packing > PACKING_ASK) {
2224 printk(KERN_INFO "%s: Confirmed Now packing\n", 2251 dev_info(tdev,
2225 dev->name); 2252 "%s: Confirmed Now packing\n", dev->name);
2226 p_env->packing = DO_PACKED; 2253 p_env->packing = DO_PACKED;
2227 } 2254 }
2228 p_ch = &privptr->channel[WRITE]; 2255 p_ch = &privptr->channel[WRITE];
2229 wake_up(&p_ch->wait); 2256 wake_up(&p_ch->wait);
2230 } else { 2257 } else {
2231 printk(KERN_INFO "%s: Conn confirm: " 2258 dev_warn(tdev, "Activating %s failed because of"
2232 "unexpected linkid=%d \n", 2259 " an incorrect link ID=%d\n",
2233 dev->name, p_ctlbk->linkid); 2260 dev->name, p_ctlbk->linkid);
2234 claw_snd_disc(dev, p_ctlbk); 2261 claw_snd_disc(dev, p_ctlbk);
2235 } 2262 }
2236 break; 2263 break;
2237 case DISCONNECT: 2264 case DISCONNECT:
2238 printk(KERN_INFO "%s: Disconnect: " 2265 dev_info(tdev, "%s: Disconnect: "
2239 "Vers=%d,link_id=%d,Corr=%d\n", 2266 "Vers=%d,link_id=%d,Corr=%d\n",
2240 dev->name, p_ctlbk->version, 2267 dev->name, p_ctlbk->version,
2241 p_ctlbk->linkid, p_ctlbk->correlator); 2268 p_ctlbk->linkid, p_ctlbk->correlator);
@@ -2247,12 +2274,13 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2247 privptr->active_link_ID = 0; 2274 privptr->active_link_ID = 0;
2248 break; 2275 break;
2249 case CLAW_ERROR: 2276 case CLAW_ERROR:
2250 printk(KERN_INFO "%s: CLAW ERROR detected\n", 2277 dev_warn(tdev, "The communication peer of %s failed\n",
2251 dev->name); 2278 dev->name);
2252 break; 2279 break;
2253 default: 2280 default:
2254 printk(KERN_INFO "%s: Unexpected command code=%d \n", 2281 dev_warn(tdev, "The communication peer of %s sent"
2255 dev->name, p_ctlbk->command); 2282 " an unknown command code\n",
2283 dev->name);
2256 break; 2284 break;
2257 } 2285 }
2258 2286
@@ -2294,12 +2322,14 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2294 memcpy(&p_sysval->host_name, local_name, 8); 2322 memcpy(&p_sysval->host_name, local_name, 8);
2295 memcpy(&p_sysval->WS_name, remote_name, 8); 2323 memcpy(&p_sysval->WS_name, remote_name, 8);
2296 if (privptr->p_env->packing > 0) { 2324 if (privptr->p_env->packing > 0) {
2297 p_sysval->read_frame_size=DEF_PACK_BUFSIZE; 2325 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2298 p_sysval->write_frame_size=DEF_PACK_BUFSIZE; 2326 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2299 } else { 2327 } else {
2300 /* how big is the biggest group of packets */ 2328 /* how big is the biggest group of packets */
2301 p_sysval->read_frame_size=privptr->p_env->read_size; 2329 p_sysval->read_frame_size =
2302 p_sysval->write_frame_size=privptr->p_env->write_size; 2330 privptr->p_env->read_size;
2331 p_sysval->write_frame_size =
2332 privptr->p_env->write_size;
2303 } 2333 }
2304 memset(&p_sysval->reserved, 0x00, 4); 2334 memset(&p_sysval->reserved, 0x00, 4);
2305 break; 2335 break;
@@ -2511,8 +2541,10 @@ unpack_read(struct net_device *dev )
2511 mtc_this_frm=1; 2541 mtc_this_frm=1;
2512 if (p_this_ccw->header.length!= 2542 if (p_this_ccw->header.length!=
2513 privptr->p_env->read_size ) { 2543 privptr->p_env->read_size ) {
2514 printk(KERN_INFO " %s: Invalid frame detected " 2544 dev_warn(p_dev,
2515 "length is %02x\n" , 2545 "The communication peer of %s"
2546 " sent a faulty"
2547 " frame of length %02x\n",
2516 dev->name, p_this_ccw->header.length); 2548 dev->name, p_this_ccw->header.length);
2517 } 2549 }
2518 } 2550 }
@@ -2544,7 +2576,7 @@ unpack_next:
2544 goto NextFrame; 2576 goto NextFrame;
2545 p_packd = p_this_ccw->p_buffer+pack_off; 2577 p_packd = p_this_ccw->p_buffer+pack_off;
2546 p_packh = (struct clawph *) p_packd; 2578 p_packh = (struct clawph *) p_packd;
2547 if ((p_packh->len == 0) || /* all done with this frame? */ 2579 if ((p_packh->len == 0) || /* done with this frame? */
2548 (p_packh->flag != 0)) 2580 (p_packh->flag != 0))
2549 goto NextFrame; 2581 goto NextFrame;
2550 bytes_to_mov = p_packh->len; 2582 bytes_to_mov = p_packh->len;
@@ -2594,9 +2626,9 @@ unpack_next:
2594 netif_rx(skb); 2626 netif_rx(skb);
2595 } 2627 }
2596 else { 2628 else {
2629 dev_info(p_dev, "Allocating a buffer for"
2630 " incoming data failed\n");
2597 privptr->stats.rx_dropped++; 2631 privptr->stats.rx_dropped++;
2598 printk(KERN_WARNING "%s: %s() low on memory\n",
2599 dev->name,__func__);
2600 } 2632 }
2601 privptr->mtc_offset=0; 2633 privptr->mtc_offset=0;
2602 privptr->mtc_logical_link=-1; 2634 privptr->mtc_logical_link=-1;
@@ -2720,8 +2752,8 @@ claw_strt_out_IO( struct net_device *dev )
2720 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { 2752 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2721 parm = (unsigned long) p_ch; 2753 parm = (unsigned long) p_ch;
2722 CLAW_DBF_TEXT(2, trace, "StWrtIO"); 2754 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2723 rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, 2755 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2724 0xff, 0); 2756 0xff, 0);
2725 if (rc != 0) { 2757 if (rc != 0) {
2726 ccw_check_return_code(p_ch->cdev, rc); 2758 ccw_check_return_code(p_ch->cdev, rc);
2727 } 2759 }
@@ -2816,22 +2848,26 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
2816 * Initialize everything of the net device except the name and the 2848 * Initialize everything of the net device except the name and the
2817 * channel structs. 2849 * channel structs.
2818 */ 2850 */
2851static const struct net_device_ops claw_netdev_ops = {
2852 .ndo_open = claw_open,
2853 .ndo_stop = claw_release,
2854 .ndo_get_stats = claw_stats,
2855 .ndo_start_xmit = claw_tx,
2856 .ndo_change_mtu = claw_change_mtu,
2857};
2858
2819static void 2859static void
2820claw_init_netdevice(struct net_device * dev) 2860claw_init_netdevice(struct net_device * dev)
2821{ 2861{
2822 CLAW_DBF_TEXT(2, setup, "init_dev"); 2862 CLAW_DBF_TEXT(2, setup, "init_dev");
2823 CLAW_DBF_TEXT_(2, setup, "%s", dev->name); 2863 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2824 dev->mtu = CLAW_DEFAULT_MTU_SIZE; 2864 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2825 dev->hard_start_xmit = claw_tx;
2826 dev->open = claw_open;
2827 dev->stop = claw_release;
2828 dev->get_stats = claw_stats;
2829 dev->change_mtu = claw_change_mtu;
2830 dev->hard_header_len = 0; 2865 dev->hard_header_len = 0;
2831 dev->addr_len = 0; 2866 dev->addr_len = 0;
2832 dev->type = ARPHRD_SLIP; 2867 dev->type = ARPHRD_SLIP;
2833 dev->tx_queue_len = 1300; 2868 dev->tx_queue_len = 1300;
2834 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 2869 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2870 dev->netdev_ops = &claw_netdev_ops;
2835 CLAW_DBF_TEXT(2, setup, "initok"); 2871 CLAW_DBF_TEXT(2, setup, "initok");
2836 return; 2872 return;
2837} 2873}
@@ -2880,8 +2916,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
2880 int ret; 2916 int ret;
2881 struct ccw_dev_id dev_id; 2917 struct ccw_dev_id dev_id;
2882 2918
2883 printk(KERN_INFO "claw: add for %s\n", 2919 dev_info(&cgdev->dev, "add for %s\n",
2884 dev_name(&cgdev->cdev[READ]->dev)); 2920 dev_name(&cgdev->cdev[READ]->dev));
2885 CLAW_DBF_TEXT(2, setup, "new_dev"); 2921 CLAW_DBF_TEXT(2, setup, "new_dev");
2886 privptr = cgdev->dev.driver_data; 2922 privptr = cgdev->dev.driver_data;
2887 cgdev->cdev[READ]->dev.driver_data = privptr; 2923 cgdev->cdev[READ]->dev.driver_data = privptr;
@@ -2897,29 +2933,28 @@ claw_new_device(struct ccwgroup_device *cgdev)
2897 if (ret == 0) 2933 if (ret == 0)
2898 ret = add_channel(cgdev->cdev[1],1,privptr); 2934 ret = add_channel(cgdev->cdev[1],1,privptr);
2899 if (ret != 0) { 2935 if (ret != 0) {
2900 printk(KERN_WARNING 2936 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2901 "add channel failed with ret = %d\n", ret); 2937 " failed with error code %d\n", ret);
2902 goto out; 2938 goto out;
2903 } 2939 }
2904 ret = ccw_device_set_online(cgdev->cdev[READ]); 2940 ret = ccw_device_set_online(cgdev->cdev[READ]);
2905 if (ret != 0) { 2941 if (ret != 0) {
2906 printk(KERN_WARNING 2942 dev_warn(&cgdev->dev,
2907 "claw: ccw_device_set_online %s READ failed " 2943 "Setting the read subchannel online"
2908 "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev), 2944 " failed with error code %d\n", ret);
2909 ret);
2910 goto out; 2945 goto out;
2911 } 2946 }
2912 ret = ccw_device_set_online(cgdev->cdev[WRITE]); 2947 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
2913 if (ret != 0) { 2948 if (ret != 0) {
2914 printk(KERN_WARNING 2949 dev_warn(&cgdev->dev,
2915 "claw: ccw_device_set_online %s WRITE failed " 2950 "Setting the write subchannel online "
2916 "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev), 2951 "failed with error code %d\n", ret);
2917 ret);
2918 goto out; 2952 goto out;
2919 } 2953 }
2920 dev = alloc_netdev(0,"claw%d",claw_init_netdevice); 2954 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2921 if (!dev) { 2955 if (!dev) {
2922 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); 2956 dev_warn(&cgdev->dev,
2957 "Activating the CLAW device failed\n");
2923 goto out; 2958 goto out;
2924 } 2959 }
2925 dev->ml_priv = privptr; 2960 dev->ml_priv = privptr;
@@ -2947,13 +2982,13 @@ claw_new_device(struct ccwgroup_device *cgdev)
2947 privptr->channel[WRITE].ndev = dev; 2982 privptr->channel[WRITE].ndev = dev;
2948 privptr->p_env->ndev = dev; 2983 privptr->p_env->ndev = dev;
2949 2984
2950 printk(KERN_INFO "%s:readsize=%d writesize=%d " 2985 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2951 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", 2986 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2952 dev->name, p_env->read_size, 2987 dev->name, p_env->read_size,
2953 p_env->write_size, p_env->read_buffers, 2988 p_env->write_size, p_env->read_buffers,
2954 p_env->write_buffers, p_env->devno[READ], 2989 p_env->write_buffers, p_env->devno[READ],
2955 p_env->devno[WRITE]); 2990 p_env->devno[WRITE]);
2956 printk(KERN_INFO "%s:host_name:%.8s, adapter_name " 2991 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2957 ":%.8s api_type: %.8s\n", 2992 ":%.8s api_type: %.8s\n",
2958 dev->name, p_env->host_name, 2993 dev->name, p_env->host_name,
2959 p_env->adapter_name , p_env->api_type); 2994 p_env->adapter_name , p_env->api_type);
@@ -2997,8 +3032,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
2997 ndev = priv->channel[READ].ndev; 3032 ndev = priv->channel[READ].ndev;
2998 if (ndev) { 3033 if (ndev) {
2999 /* Close the device */ 3034 /* Close the device */
3000 printk(KERN_INFO 3035 dev_info(&cgdev->dev, "%s: shutting down \n",
3001 "%s: shuting down \n",ndev->name); 3036 ndev->name);
3002 if (ndev->flags & IFF_RUNNING) 3037 if (ndev->flags & IFF_RUNNING)
3003 ret = claw_release(ndev); 3038 ret = claw_release(ndev);
3004 ndev->flags &=~IFF_RUNNING; 3039 ndev->flags &=~IFF_RUNNING;
@@ -3023,8 +3058,7 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3023 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3058 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3024 priv = cgdev->dev.driver_data; 3059 priv = cgdev->dev.driver_data;
3025 BUG_ON(!priv); 3060 BUG_ON(!priv);
3026 printk(KERN_INFO "claw: %s() called %s will be removed.\n", 3061 dev_info(&cgdev->dev, " will be removed.\n");
3027 __func__, dev_name(&cgdev->cdev[0]->dev));
3028 if (cgdev->state == CCWGROUP_ONLINE) 3062 if (cgdev->state == CCWGROUP_ONLINE)
3029 claw_shutdown_device(cgdev); 3063 claw_shutdown_device(cgdev);
3030 claw_remove_files(&cgdev->dev); 3064 claw_remove_files(&cgdev->dev);
@@ -3063,7 +3097,8 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3063} 3097}
3064 3098
3065static ssize_t 3099static ssize_t
3066claw_hname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3100claw_hname_write(struct device *dev, struct device_attribute *attr,
3101 const char *buf, size_t count)
3067{ 3102{
3068 struct claw_privbk *priv; 3103 struct claw_privbk *priv;
3069 struct claw_env * p_env; 3104 struct claw_env * p_env;
@@ -3100,7 +3135,8 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3100} 3135}
3101 3136
3102static ssize_t 3137static ssize_t
3103claw_adname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3138claw_adname_write(struct device *dev, struct device_attribute *attr,
3139 const char *buf, size_t count)
3104{ 3140{
3105 struct claw_privbk *priv; 3141 struct claw_privbk *priv;
3106 struct claw_env * p_env; 3142 struct claw_env * p_env;
@@ -3138,7 +3174,8 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3138} 3174}
3139 3175
3140static ssize_t 3176static ssize_t
3141claw_apname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3177claw_apname_write(struct device *dev, struct device_attribute *attr,
3178 const char *buf, size_t count)
3142{ 3179{
3143 struct claw_privbk *priv; 3180 struct claw_privbk *priv;
3144 struct claw_env * p_env; 3181 struct claw_env * p_env;
@@ -3185,7 +3222,8 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3185} 3222}
3186 3223
3187static ssize_t 3224static ssize_t
3188claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3225claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3226 const char *buf, size_t count)
3189{ 3227{
3190 struct claw_privbk *priv; 3228 struct claw_privbk *priv;
3191 struct claw_env * p_env; 3229 struct claw_env * p_env;
@@ -3226,7 +3264,8 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3226} 3264}
3227 3265
3228static ssize_t 3266static ssize_t
3229claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3267claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3268 const char *buf, size_t count)
3230{ 3269{
3231 struct claw_privbk *priv; 3270 struct claw_privbk *priv;
3232 struct claw_env *p_env; 3271 struct claw_env *p_env;
@@ -3289,7 +3328,7 @@ claw_cleanup(void)
3289{ 3328{
3290 unregister_cu3088_discipline(&claw_group_driver); 3329 unregister_cu3088_discipline(&claw_group_driver);
3291 claw_unregister_debug_facility(); 3330 claw_unregister_debug_facility();
3292 printk(KERN_INFO "claw: Driver unloaded\n"); 3331 pr_info("Driver unloaded\n");
3293 3332
3294} 3333}
3295 3334
@@ -3303,12 +3342,12 @@ static int __init
3303claw_init(void) 3342claw_init(void)
3304{ 3343{
3305 int ret = 0; 3344 int ret = 0;
3306 printk(KERN_INFO "claw: starting driver\n");
3307 3345
3346 pr_info("Loading %s\n", version);
3308 ret = claw_register_debug_facility(); 3347 ret = claw_register_debug_facility();
3309 if (ret) { 3348 if (ret) {
3310 printk(KERN_WARNING "claw: %s() debug_register failed %d\n", 3349 pr_err("Registering with the S/390 debug feature"
3311 __func__,ret); 3350 " failed with error code %d\n", ret);
3312 return ret; 3351 return ret;
3313 } 3352 }
3314 CLAW_DBF_TEXT(2, setup, "init_mod"); 3353 CLAW_DBF_TEXT(2, setup, "init_mod");
@@ -3316,8 +3355,8 @@ claw_init(void)
3316 if (ret) { 3355 if (ret) {
3317 CLAW_DBF_TEXT(2, setup, "init_bad"); 3356 CLAW_DBF_TEXT(2, setup, "init_bad");
3318 claw_unregister_debug_facility(); 3357 claw_unregister_debug_facility();
3319 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", 3358 pr_err("Registering with the cu3088 device driver failed "
3320 __func__,ret); 3359 "with error code %d\n", ret);
3321 } 3360 }
3322 return ret; 3361 return ret;
3323} 3362}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2678573bece..8f2a888d0a0 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1099,12 +1099,24 @@ static void ctcm_free_netdevice(struct net_device *dev)
1099 1099
1100struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); 1100struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
1101 1101
1102static const struct net_device_ops ctcm_netdev_ops = {
1103 .ndo_open = ctcm_open,
1104 .ndo_stop = ctcm_close,
1105 .ndo_get_stats = ctcm_stats,
1106 .ndo_change_mtu = ctcm_change_mtu,
1107 .ndo_start_xmit = ctcm_tx,
1108};
1109
1110static const struct net_device_ops ctcm_mpc_netdev_ops = {
1111 .ndo_open = ctcm_open,
1112 .ndo_stop = ctcm_close,
1113 .ndo_get_stats = ctcm_stats,
1114 .ndo_change_mtu = ctcm_change_mtu,
1115 .ndo_start_xmit = ctcmpc_tx,
1116};
1117
1102void static ctcm_dev_setup(struct net_device *dev) 1118void static ctcm_dev_setup(struct net_device *dev)
1103{ 1119{
1104 dev->open = ctcm_open;
1105 dev->stop = ctcm_close;
1106 dev->get_stats = ctcm_stats;
1107 dev->change_mtu = ctcm_change_mtu;
1108 dev->type = ARPHRD_SLIP; 1120 dev->type = ARPHRD_SLIP;
1109 dev->tx_queue_len = 100; 1121 dev->tx_queue_len = 100;
1110 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1122 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -1157,12 +1169,12 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1157 dev->mtu = MPC_BUFSIZE_DEFAULT - 1169 dev->mtu = MPC_BUFSIZE_DEFAULT -
1158 TH_HEADER_LENGTH - PDU_HEADER_LENGTH; 1170 TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
1159 1171
1160 dev->hard_start_xmit = ctcmpc_tx; 1172 dev->netdev_ops = &ctcm_mpc_netdev_ops;
1161 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; 1173 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1162 priv->buffer_size = MPC_BUFSIZE_DEFAULT; 1174 priv->buffer_size = MPC_BUFSIZE_DEFAULT;
1163 } else { 1175 } else {
1164 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; 1176 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
1165 dev->hard_start_xmit = ctcm_tx; 1177 dev->netdev_ops = &ctcm_netdev_ops;
1166 dev->hard_header_len = LL_HEADER_LENGTH + 2; 1178 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1167 } 1179 }
1168 1180
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 49c3bfa1afd..083f787d260 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2101,6 +2101,20 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2101/** 2101/**
2102 * lcs_new_device will be called by setting the group device online. 2102 * lcs_new_device will be called by setting the group device online.
2103 */ 2103 */
2104static const struct net_device_ops lcs_netdev_ops = {
2105 .ndo_open = lcs_open_device,
2106 .ndo_stop = lcs_stop_device,
2107 .ndo_get_stats = lcs_getstats,
2108 .ndo_start_xmit = lcs_start_xmit,
2109};
2110
2111static const struct net_device_ops lcs_mc_netdev_ops = {
2112 .ndo_open = lcs_open_device,
2113 .ndo_stop = lcs_stop_device,
2114 .ndo_get_stats = lcs_getstats,
2115 .ndo_start_xmit = lcs_start_xmit,
2116 .ndo_set_multicast_list = lcs_set_multicast_list,
2117};
2104 2118
2105static int 2119static int
2106lcs_new_device(struct ccwgroup_device *ccwgdev) 2120lcs_new_device(struct ccwgroup_device *ccwgdev)
@@ -2168,14 +2182,11 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2168 goto out; 2182 goto out;
2169 card->dev = dev; 2183 card->dev = dev;
2170 card->dev->ml_priv = card; 2184 card->dev->ml_priv = card;
2171 card->dev->open = lcs_open_device; 2185 card->dev->netdev_ops = &lcs_netdev_ops;
2172 card->dev->stop = lcs_stop_device;
2173 card->dev->hard_start_xmit = lcs_start_xmit;
2174 card->dev->get_stats = lcs_getstats;
2175 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); 2186 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2176#ifdef CONFIG_IP_MULTICAST 2187#ifdef CONFIG_IP_MULTICAST
2177 if (!lcs_check_multicast_support(card)) 2188 if (!lcs_check_multicast_support(card))
2178 card->dev->set_multicast_list = lcs_set_multicast_list; 2189 card->dev->netdev_ops = &lcs_mc_netdev_ops;
2179#endif 2190#endif
2180netdev_out: 2191netdev_out:
2181 lcs_set_allowed_threads(card,0xffffffff); 2192 lcs_set_allowed_threads(card,0xffffffff);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 930e2fc2a01..1ba4509435f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1876,20 +1876,24 @@ static void netiucv_free_netdevice(struct net_device *dev)
1876/** 1876/**
1877 * Initialize a net device. (Called from kernel in alloc_netdev()) 1877 * Initialize a net device. (Called from kernel in alloc_netdev())
1878 */ 1878 */
1879static const struct net_device_ops netiucv_netdev_ops = {
1880 .ndo_open = netiucv_open,
1881 .ndo_stop = netiucv_close,
1882 .ndo_get_stats = netiucv_stats,
1883 .ndo_start_xmit = netiucv_tx,
1884 .ndo_change_mtu = netiucv_change_mtu,
1885};
1886
1879static void netiucv_setup_netdevice(struct net_device *dev) 1887static void netiucv_setup_netdevice(struct net_device *dev)
1880{ 1888{
1881 dev->mtu = NETIUCV_MTU_DEFAULT; 1889 dev->mtu = NETIUCV_MTU_DEFAULT;
1882 dev->hard_start_xmit = netiucv_tx;
1883 dev->open = netiucv_open;
1884 dev->stop = netiucv_close;
1885 dev->get_stats = netiucv_stats;
1886 dev->change_mtu = netiucv_change_mtu;
1887 dev->destructor = netiucv_free_netdevice; 1890 dev->destructor = netiucv_free_netdevice;
1888 dev->hard_header_len = NETIUCV_HDRLEN; 1891 dev->hard_header_len = NETIUCV_HDRLEN;
1889 dev->addr_len = 0; 1892 dev->addr_len = 0;
1890 dev->type = ARPHRD_SLIP; 1893 dev->type = ARPHRD_SLIP;
1891 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; 1894 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1892 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1895 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1896 dev->netdev_ops = &netiucv_netdev_ops;
1893} 1897}
1894 1898
1895/** 1899/**
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 5c7c4d95c49..f675807cc48 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -13,6 +13,8 @@
13#ifndef __CXGB3I_ULP2_DDP_H__ 13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__ 14#define __CXGB3I_ULP2_DDP_H__
15 15
16#include <linux/vmalloc.h>
17
16/** 18/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity 19 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 * 20 *
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5f77417ed58..3ee4eb40abc 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -533,12 +533,8 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
533 event->event_code = event_code; 533 event->event_code = event_code;
534 event->event_data = event_data; 534 event->event_data = event_data;
535 535
536 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS, 536 nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
537 GFP_KERNEL); 537 GFP_KERNEL);
538 if (err && (err != -ESRCH)) /* filter no recipient errors */
539 /* nlmsg_multicast already kfree_skb'd */
540 goto send_fail;
541
542 return; 538 return;
543 539
544send_fail_skb: 540send_fail_skb:
@@ -607,12 +603,8 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
607 event->event_code = FCH_EVT_VENDOR_UNIQUE; 603 event->event_code = FCH_EVT_VENDOR_UNIQUE;
608 memcpy(&event->event_data, data_buf, data_len); 604 memcpy(&event->event_data, data_buf, data_len);
609 605
610 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS, 606 nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
611 GFP_KERNEL); 607 GFP_KERNEL);
612 if (err && (err != -ESRCH)) /* filter no recipient errors */
613 /* nlmsg_multicast already kfree_skb'd */
614 goto send_vendor_fail;
615
616 return; 608 return;
617 609
618send_vendor_fail_skb: 610send_vendor_fail_skb:
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 75c9297694c..2adfab8c11c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -966,15 +966,7 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
966static int 966static int
967iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 967iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp)
968{ 968{
969 int rc; 969 return netlink_broadcast(nls, skb, 0, 1, gfp);
970
971 rc = netlink_broadcast(nls, skb, 0, 1, gfp);
972 if (rc < 0) {
973 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
974 return rc;
975 }
976
977 return 0;
978} 970}
979 971
980static int 972static int
@@ -1207,7 +1199,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1207 * the user and when the daemon is restarted it will handle it 1199 * the user and when the daemon is restarted it will handle it
1208 */ 1200 */
1209 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1201 rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
1210 if (rc < 0) 1202 if (rc == -ESRCH)
1211 iscsi_cls_session_printk(KERN_ERR, session, 1203 iscsi_cls_session_printk(KERN_ERR, session,
1212 "Cannot notify userspace of session " 1204 "Cannot notify userspace of session "
1213 "event %u. Check iscsi daemon\n", 1205 "event %u. Check iscsi daemon\n",
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 6f255e9c5af..cfbb74f2982 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -9,6 +9,7 @@ ssb-$(CONFIG_SSB_PCMCIAHOST) += pcmcia.o
9 9
10# built-in drivers 10# built-in drivers
11ssb-y += driver_chipcommon.o 11ssb-y += driver_chipcommon.o
12ssb-y += driver_chipcommon_pmu.o
12ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o 13ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o
13ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o 14ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o
14ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o 15ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 2d27d6d6d08..27a677584a4 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -21,6 +21,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) },
22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) },
23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
25 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
26 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
@@ -29,6 +30,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
29 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) }, 30 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) },
30 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) }, 31 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
31 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) }, 32 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
33 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
32 { 0, }, 34 { 0, },
33}; 35};
34MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); 36MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 571f4fd5523..9681536163c 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -26,19 +26,6 @@ enum ssb_clksrc {
26}; 26};
27 27
28 28
29static inline u32 chipco_read32(struct ssb_chipcommon *cc,
30 u16 offset)
31{
32 return ssb_read32(cc->dev, offset);
33}
34
35static inline void chipco_write32(struct ssb_chipcommon *cc,
36 u16 offset,
37 u32 value)
38{
39 ssb_write32(cc->dev, offset, value);
40}
41
42static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset, 29static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset,
43 u32 mask, u32 value) 30 u32 mask, u32 value)
44{ 31{
@@ -246,6 +233,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
246{ 233{
247 if (!cc->dev) 234 if (!cc->dev)
248 return; /* We don't have a ChipCommon */ 235 return; /* We don't have a ChipCommon */
236 ssb_pmu_init(cc);
249 chipco_powercontrol_init(cc); 237 chipco_powercontrol_init(cc);
250 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); 238 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
251 calc_fast_powerup_delay(cc); 239 calc_fast_powerup_delay(cc);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
new file mode 100644
index 00000000000..4aaddeec55a
--- /dev/null
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -0,0 +1,508 @@
1/*
2 * Sonics Silicon Backplane
3 * Broadcom ChipCommon Power Management Unit driver
4 *
5 * Copyright 2009, Michael Buesch <mb@bu3sch.de>
6 * Copyright 2007, Broadcom Corporation
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include <linux/ssb/ssb.h>
12#include <linux/ssb/ssb_regs.h>
13#include <linux/ssb/ssb_driver_chipcommon.h>
14#include <linux/delay.h>
15
16#include "ssb_private.h"
17
18static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset)
19{
20 chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
21 return chipco_read32(cc, SSB_CHIPCO_PLLCTL_DATA);
22}
23
24static void ssb_chipco_pll_write(struct ssb_chipcommon *cc,
25 u32 offset, u32 value)
26{
27 chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
28 chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, value);
29}
30
31struct pmu0_plltab_entry {
32 u16 freq; /* Crystal frequency in kHz.*/
33 u8 xf; /* Crystal frequency value for PMU control */
34 u8 wb_int;
35 u32 wb_frac;
36};
37
38static const struct pmu0_plltab_entry pmu0_plltab[] = {
39 { .freq = 12000, .xf = 1, .wb_int = 73, .wb_frac = 349525, },
40 { .freq = 13000, .xf = 2, .wb_int = 67, .wb_frac = 725937, },
41 { .freq = 14400, .xf = 3, .wb_int = 61, .wb_frac = 116508, },
42 { .freq = 15360, .xf = 4, .wb_int = 57, .wb_frac = 305834, },
43 { .freq = 16200, .xf = 5, .wb_int = 54, .wb_frac = 336579, },
44 { .freq = 16800, .xf = 6, .wb_int = 52, .wb_frac = 399457, },
45 { .freq = 19200, .xf = 7, .wb_int = 45, .wb_frac = 873813, },
46 { .freq = 19800, .xf = 8, .wb_int = 44, .wb_frac = 466033, },
47 { .freq = 20000, .xf = 9, .wb_int = 44, .wb_frac = 0, },
48 { .freq = 25000, .xf = 10, .wb_int = 70, .wb_frac = 419430, },
49 { .freq = 26000, .xf = 11, .wb_int = 67, .wb_frac = 725937, },
50 { .freq = 30000, .xf = 12, .wb_int = 58, .wb_frac = 699050, },
51 { .freq = 38400, .xf = 13, .wb_int = 45, .wb_frac = 873813, },
52 { .freq = 40000, .xf = 14, .wb_int = 45, .wb_frac = 0, },
53};
54#define SSB_PMU0_DEFAULT_XTALFREQ 20000
55
56static const struct pmu0_plltab_entry * pmu0_plltab_find_entry(u32 crystalfreq)
57{
58 const struct pmu0_plltab_entry *e;
59 unsigned int i;
60
61 for (i = 0; i < ARRAY_SIZE(pmu0_plltab); i++) {
62 e = &pmu0_plltab[i];
63 if (e->freq == crystalfreq)
64 return e;
65 }
66
67 return NULL;
68}
69
70/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */
71static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
72 u32 crystalfreq)
73{
74 struct ssb_bus *bus = cc->dev->bus;
75 const struct pmu0_plltab_entry *e = NULL;
76 u32 pmuctl, tmp, pllctl;
77 unsigned int i;
78
79 if ((bus->chip_id == 0x5354) && !crystalfreq) {
80 /* The 5354 crystal freq is 25MHz */
81 crystalfreq = 25000;
82 }
83 if (crystalfreq)
84 e = pmu0_plltab_find_entry(crystalfreq);
85 if (!e)
86 e = pmu0_plltab_find_entry(SSB_PMU0_DEFAULT_XTALFREQ);
87 BUG_ON(!e);
88 crystalfreq = e->freq;
89 cc->pmu.crystalfreq = e->freq;
90
91 /* Check if the PLL already is programmed to this frequency. */
92 pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
93 if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
94 /* We're already there... */
95 return;
96 }
97
98 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
99 (crystalfreq / 1000), (crystalfreq % 1000));
100
101 /* First turn the PLL off. */
102 switch (bus->chip_id) {
103 case 0x4328:
104 chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
105 ~(1 << SSB_PMURES_4328_BB_PLL_PU));
106 chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
107 ~(1 << SSB_PMURES_4328_BB_PLL_PU));
108 break;
109 case 0x5354:
110 chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
111 ~(1 << SSB_PMURES_5354_BB_PLL_PU));
112 chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
113 ~(1 << SSB_PMURES_5354_BB_PLL_PU));
114 break;
115 default:
116 SSB_WARN_ON(1);
117 }
118 for (i = 1500; i; i--) {
119 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
120 if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT))
121 break;
122 udelay(10);
123 }
124 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
125 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
126 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
127
128 /* Set PDIV in PLL control 0. */
129 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
130 if (crystalfreq >= SSB_PMU0_PLLCTL0_PDIV_FREQ)
131 pllctl |= SSB_PMU0_PLLCTL0_PDIV_MSK;
132 else
133 pllctl &= ~SSB_PMU0_PLLCTL0_PDIV_MSK;
134 ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL0, pllctl);
135
136 /* Set WILD in PLL control 1. */
137 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL1);
138 pllctl &= ~SSB_PMU0_PLLCTL1_STOPMOD;
139 pllctl &= ~(SSB_PMU0_PLLCTL1_WILD_IMSK | SSB_PMU0_PLLCTL1_WILD_FMSK);
140 pllctl |= ((u32)e->wb_int << SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_IMSK;
141 pllctl |= ((u32)e->wb_frac << SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_FMSK;
142 if (e->wb_frac == 0)
143 pllctl |= SSB_PMU0_PLLCTL1_STOPMOD;
144 ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL1, pllctl);
145
146 /* Set WILD in PLL control 2. */
147 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL2);
148 pllctl &= ~SSB_PMU0_PLLCTL2_WILD_IMSKHI;
149 pllctl |= (((u32)e->wb_int >> 4) << SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT) & SSB_PMU0_PLLCTL2_WILD_IMSKHI;
150 ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL2, pllctl);
151
152 /* Set the crystalfrequency and the divisor. */
153 pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
154 pmuctl &= ~SSB_CHIPCO_PMU_CTL_ILP_DIV;
155 pmuctl |= (((crystalfreq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT)
156 & SSB_CHIPCO_PMU_CTL_ILP_DIV;
157 pmuctl &= ~SSB_CHIPCO_PMU_CTL_XTALFREQ;
158 pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
159 chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl);
160}
161
162struct pmu1_plltab_entry {
163 u16 freq; /* Crystal frequency in kHz.*/
164 u8 xf; /* Crystal frequency value for PMU control */
165 u8 ndiv_int;
166 u32 ndiv_frac;
167 u8 p1div;
168 u8 p2div;
169};
170
171static const struct pmu1_plltab_entry pmu1_plltab[] = {
172 { .freq = 12000, .xf = 1, .p1div = 3, .p2div = 22, .ndiv_int = 0x9, .ndiv_frac = 0xFFFFEF, },
173 { .freq = 13000, .xf = 2, .p1div = 1, .p2div = 6, .ndiv_int = 0xb, .ndiv_frac = 0x483483, },
174 { .freq = 14400, .xf = 3, .p1div = 1, .p2div = 10, .ndiv_int = 0xa, .ndiv_frac = 0x1C71C7, },
175 { .freq = 15360, .xf = 4, .p1div = 1, .p2div = 5, .ndiv_int = 0xb, .ndiv_frac = 0x755555, },
176 { .freq = 16200, .xf = 5, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x6E9E06, },
177 { .freq = 16800, .xf = 6, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x3CF3CF, },
178 { .freq = 19200, .xf = 7, .p1div = 1, .p2div = 9, .ndiv_int = 0x5, .ndiv_frac = 0x17B425, },
179 { .freq = 19800, .xf = 8, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0xA57EB, },
180 { .freq = 20000, .xf = 9, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0, },
181 { .freq = 24000, .xf = 10, .p1div = 3, .p2div = 11, .ndiv_int = 0xa, .ndiv_frac = 0, },
182 { .freq = 25000, .xf = 11, .p1div = 5, .p2div = 16, .ndiv_int = 0xb, .ndiv_frac = 0, },
183 { .freq = 26000, .xf = 12, .p1div = 1, .p2div = 2, .ndiv_int = 0x10, .ndiv_frac = 0xEC4EC4, },
184 { .freq = 30000, .xf = 13, .p1div = 3, .p2div = 8, .ndiv_int = 0xb, .ndiv_frac = 0, },
185 { .freq = 38400, .xf = 14, .p1div = 1, .p2div = 5, .ndiv_int = 0x4, .ndiv_frac = 0x955555, },
186 { .freq = 40000, .xf = 15, .p1div = 1, .p2div = 2, .ndiv_int = 0xb, .ndiv_frac = 0, },
187};
188
189#define SSB_PMU1_DEFAULT_XTALFREQ 15360
190
191static const struct pmu1_plltab_entry * pmu1_plltab_find_entry(u32 crystalfreq)
192{
193 const struct pmu1_plltab_entry *e;
194 unsigned int i;
195
196 for (i = 0; i < ARRAY_SIZE(pmu1_plltab); i++) {
197 e = &pmu1_plltab[i];
198 if (e->freq == crystalfreq)
199 return e;
200 }
201
202 return NULL;
203}
204
205/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */
206static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
207 u32 crystalfreq)
208{
209 struct ssb_bus *bus = cc->dev->bus;
210 const struct pmu1_plltab_entry *e = NULL;
211 u32 buffer_strength = 0;
212 u32 tmp, pllctl, pmuctl;
213 unsigned int i;
214
215 if (bus->chip_id == 0x4312) {
216 /* We do not touch the BCM4312 PLL and assume
217 * the default crystal settings work out-of-the-box. */
218 cc->pmu.crystalfreq = 20000;
219 return;
220 }
221
222 if (crystalfreq)
223 e = pmu1_plltab_find_entry(crystalfreq);
224 if (!e)
225 e = pmu1_plltab_find_entry(SSB_PMU1_DEFAULT_XTALFREQ);
226 BUG_ON(!e);
227 crystalfreq = e->freq;
228 cc->pmu.crystalfreq = e->freq;
229
230 /* Check if the PLL already is programmed to this frequency. */
231 pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
232 if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
233 /* We're already there... */
234 return;
235 }
236
237 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
238 (crystalfreq / 1000), (crystalfreq % 1000));
239
240 /* First turn the PLL off. */
241 switch (bus->chip_id) {
242 case 0x4325:
243 chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
244 ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) |
245 (1 << SSB_PMURES_4325_HT_AVAIL)));
246 chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
247 ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) |
248 (1 << SSB_PMURES_4325_HT_AVAIL)));
249 /* Adjust the BBPLL to 2 on all channels later. */
250 buffer_strength = 0x222222;
251 break;
252 default:
253 SSB_WARN_ON(1);
254 }
255 for (i = 1500; i; i--) {
256 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
257 if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT))
258 break;
259 udelay(10);
260 }
261 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
262 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
263 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
264
265 /* Set p1div and p2div. */
266 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
267 pllctl &= ~(SSB_PMU1_PLLCTL0_P1DIV | SSB_PMU1_PLLCTL0_P2DIV);
268 pllctl |= ((u32)e->p1div << SSB_PMU1_PLLCTL0_P1DIV_SHIFT) & SSB_PMU1_PLLCTL0_P1DIV;
269 pllctl |= ((u32)e->p2div << SSB_PMU1_PLLCTL0_P2DIV_SHIFT) & SSB_PMU1_PLLCTL0_P2DIV;
270 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, pllctl);
271
272 /* Set ndiv int and ndiv mode */
273 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL2);
274 pllctl &= ~(SSB_PMU1_PLLCTL2_NDIVINT | SSB_PMU1_PLLCTL2_NDIVMODE);
275 pllctl |= ((u32)e->ndiv_int << SSB_PMU1_PLLCTL2_NDIVINT_SHIFT) & SSB_PMU1_PLLCTL2_NDIVINT;
276 pllctl |= (1 << SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT) & SSB_PMU1_PLLCTL2_NDIVMODE;
277 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, pllctl);
278
279 /* Set ndiv frac */
280 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL3);
281 pllctl &= ~SSB_PMU1_PLLCTL3_NDIVFRAC;
282 pllctl |= ((u32)e->ndiv_frac << SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT) & SSB_PMU1_PLLCTL3_NDIVFRAC;
283 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, pllctl);
284
285 /* Change the drive strength, if required. */
286 if (buffer_strength) {
287 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL5);
288 pllctl &= ~SSB_PMU1_PLLCTL5_CLKDRV;
289 pllctl |= (buffer_strength << SSB_PMU1_PLLCTL5_CLKDRV_SHIFT) & SSB_PMU1_PLLCTL5_CLKDRV;
290 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, pllctl);
291 }
292
293 /* Tune the crystalfreq and the divisor. */
294 pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
295 pmuctl &= ~(SSB_CHIPCO_PMU_CTL_ILP_DIV | SSB_CHIPCO_PMU_CTL_XTALFREQ);
296 pmuctl |= ((((u32)e->freq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT)
297 & SSB_CHIPCO_PMU_CTL_ILP_DIV;
298 pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
299 chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl);
300}
301
302static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
303{
304 struct ssb_bus *bus = cc->dev->bus;
305 u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */
306
307 if (bus->bustype == SSB_BUSTYPE_SSB) {
308 /* TODO: The user may override the crystal frequency. */
309 }
310
311 switch (bus->chip_id) {
312 case 0x4312:
313 case 0x4325:
314 ssb_pmu1_pllinit_r0(cc, crystalfreq);
315 break;
316 case 0x4328:
317 case 0x5354:
318 ssb_pmu0_pllinit_r0(cc, crystalfreq);
319 break;
320 default:
321 ssb_printk(KERN_ERR PFX
322 "ERROR: PLL init unknown for device %04X\n",
323 bus->chip_id);
324 }
325}
326
327struct pmu_res_updown_tab_entry {
328 u8 resource; /* The resource number */
329 u16 updown; /* The updown value */
330};
331
332enum pmu_res_depend_tab_task {
333 PMU_RES_DEP_SET = 1,
334 PMU_RES_DEP_ADD,
335 PMU_RES_DEP_REMOVE,
336};
337
338struct pmu_res_depend_tab_entry {
339 u8 resource; /* The resource number */
340 u8 task; /* SET | ADD | REMOVE */
341 u32 depend; /* The depend mask */
342};
343
344static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4328a0[] = {
345 { .resource = SSB_PMURES_4328_EXT_SWITCHER_PWM, .updown = 0x0101, },
346 { .resource = SSB_PMURES_4328_BB_SWITCHER_PWM, .updown = 0x1F01, },
347 { .resource = SSB_PMURES_4328_BB_SWITCHER_BURST, .updown = 0x010F, },
348 { .resource = SSB_PMURES_4328_BB_EXT_SWITCHER_BURST, .updown = 0x0101, },
349 { .resource = SSB_PMURES_4328_ILP_REQUEST, .updown = 0x0202, },
350 { .resource = SSB_PMURES_4328_RADIO_SWITCHER_PWM, .updown = 0x0F01, },
351 { .resource = SSB_PMURES_4328_RADIO_SWITCHER_BURST, .updown = 0x0F01, },
352 { .resource = SSB_PMURES_4328_ROM_SWITCH, .updown = 0x0101, },
353 { .resource = SSB_PMURES_4328_PA_REF_LDO, .updown = 0x0F01, },
354 { .resource = SSB_PMURES_4328_RADIO_LDO, .updown = 0x0F01, },
355 { .resource = SSB_PMURES_4328_AFE_LDO, .updown = 0x0F01, },
356 { .resource = SSB_PMURES_4328_PLL_LDO, .updown = 0x0F01, },
357 { .resource = SSB_PMURES_4328_BG_FILTBYP, .updown = 0x0101, },
358 { .resource = SSB_PMURES_4328_TX_FILTBYP, .updown = 0x0101, },
359 { .resource = SSB_PMURES_4328_RX_FILTBYP, .updown = 0x0101, },
360 { .resource = SSB_PMURES_4328_XTAL_PU, .updown = 0x0101, },
361 { .resource = SSB_PMURES_4328_XTAL_EN, .updown = 0xA001, },
362 { .resource = SSB_PMURES_4328_BB_PLL_FILTBYP, .updown = 0x0101, },
363 { .resource = SSB_PMURES_4328_RF_PLL_FILTBYP, .updown = 0x0101, },
364 { .resource = SSB_PMURES_4328_BB_PLL_PU, .updown = 0x0701, },
365};
366
367static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4328a0[] = {
368 {
369 /* Adjust ILP Request to avoid forcing EXT/BB into burst mode. */
370 .resource = SSB_PMURES_4328_ILP_REQUEST,
371 .task = PMU_RES_DEP_SET,
372 .depend = ((1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) |
373 (1 << SSB_PMURES_4328_BB_SWITCHER_PWM)),
374 },
375};
376
377static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4325a0[] = {
378 { .resource = SSB_PMURES_4325_XTAL_PU, .updown = 0x1501, },
379};
380
381static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4325a0[] = {
382 {
383 /* Adjust HT-Available dependencies. */
384 .resource = SSB_PMURES_4325_HT_AVAIL,
385 .task = PMU_RES_DEP_ADD,
386 .depend = ((1 << SSB_PMURES_4325_RX_PWRSW_PU) |
387 (1 << SSB_PMURES_4325_TX_PWRSW_PU) |
388 (1 << SSB_PMURES_4325_LOGEN_PWRSW_PU) |
389 (1 << SSB_PMURES_4325_AFE_PWRSW_PU)),
390 },
391};
392
393static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
394{
395 struct ssb_bus *bus = cc->dev->bus;
396 u32 min_msk = 0, max_msk = 0;
397 unsigned int i;
398 const struct pmu_res_updown_tab_entry *updown_tab = NULL;
399 unsigned int updown_tab_size;
400 const struct pmu_res_depend_tab_entry *depend_tab = NULL;
401 unsigned int depend_tab_size;
402
403 switch (bus->chip_id) {
404 case 0x4312:
405 /* We keep the default settings:
406 * min_msk = 0xCBB
407 * max_msk = 0x7FFFF
408 */
409 break;
410 case 0x4325:
411 /* Power OTP down later. */
412 min_msk = (1 << SSB_PMURES_4325_CBUCK_BURST) |
413 (1 << SSB_PMURES_4325_LNLDO2_PU);
414 if (chipco_read32(cc, SSB_CHIPCO_CHIPSTAT) &
415 SSB_CHIPCO_CHST_4325_PMUTOP_2B)
416 min_msk |= (1 << SSB_PMURES_4325_CLDO_CBUCK_BURST);
417 /* The PLL may turn on, if it decides so. */
418 max_msk = 0xFFFFF;
419 updown_tab = pmu_res_updown_tab_4325a0;
420 updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4325a0);
421 depend_tab = pmu_res_depend_tab_4325a0;
422 depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4325a0);
423 break;
424 case 0x4328:
425 min_msk = (1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) |
426 (1 << SSB_PMURES_4328_BB_SWITCHER_PWM) |
427 (1 << SSB_PMURES_4328_XTAL_EN);
428 /* The PLL may turn on, if it decides so. */
429 max_msk = 0xFFFFF;
430 updown_tab = pmu_res_updown_tab_4328a0;
431 updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4328a0);
432 depend_tab = pmu_res_depend_tab_4328a0;
433 depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4328a0);
434 break;
435 case 0x5354:
436 /* The PLL may turn on, if it decides so. */
437 max_msk = 0xFFFFF;
438 break;
439 default:
440 ssb_printk(KERN_ERR PFX
441 "ERROR: PMU resource config unknown for device %04X\n",
442 bus->chip_id);
443 }
444
445 if (updown_tab) {
446 for (i = 0; i < updown_tab_size; i++) {
447 chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL,
448 updown_tab[i].resource);
449 chipco_write32(cc, SSB_CHIPCO_PMU_RES_UPDNTM,
450 updown_tab[i].updown);
451 }
452 }
453 if (depend_tab) {
454 for (i = 0; i < depend_tab_size; i++) {
455 chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL,
456 depend_tab[i].resource);
457 switch (depend_tab[i].task) {
458 case PMU_RES_DEP_SET:
459 chipco_write32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
460 depend_tab[i].depend);
461 break;
462 case PMU_RES_DEP_ADD:
463 chipco_set32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
464 depend_tab[i].depend);
465 break;
466 case PMU_RES_DEP_REMOVE:
467 chipco_mask32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
468 ~(depend_tab[i].depend));
469 break;
470 default:
471 SSB_WARN_ON(1);
472 }
473 }
474 }
475
476 /* Set the resource masks. */
477 if (min_msk)
478 chipco_write32(cc, SSB_CHIPCO_PMU_MINRES_MSK, min_msk);
479 if (max_msk)
480 chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
481}
482
483void ssb_pmu_init(struct ssb_chipcommon *cc)
484{
485 struct ssb_bus *bus = cc->dev->bus;
486 u32 pmucap;
487
488 if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
489 return;
490
491 pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
492 cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
493
494 ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
495 cc->pmu.rev, pmucap);
496
497 if (cc->pmu.rev >= 1) {
498 if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
499 chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
500 ~SSB_CHIPCO_PMU_CTL_NOILPONW);
501 } else {
502 chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
503 SSB_CHIPCO_PMU_CTL_NOILPONW);
504 }
505 }
506 ssb_pmu_pll_init(cc);
507 ssb_pmu_resources_init(cc);
508}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index d5cde051806..c958ac16423 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -467,6 +467,51 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
467 /* TODO - get remaining rev 4 stuff needed */ 467 /* TODO - get remaining rev 4 stuff needed */
468} 468}
469 469
470static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
471{
472 int i;
473 u16 v;
474
475 /* extract the MAC address */
476 for (i = 0; i < 3; i++) {
477 v = in[SPOFF(SSB_SPROM1_IL0MAC) + i];
478 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
479 }
480 SPEX(country_code, SSB_SPROM8_CCODE, 0xFFFF, 0);
481 SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0);
482 SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0);
483 SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
484 SSB_SPROM8_ANTAVAIL_A_SHIFT);
485 SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
486 SSB_SPROM8_ANTAVAIL_BG_SHIFT);
487 SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
488 SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
489 SSB_SPROM8_ITSSI_BG_SHIFT);
490 SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
491 SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
492 SSB_SPROM8_ITSSI_A_SHIFT);
493 SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
494 SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
495 SSB_SPROM8_GPIOA_P1_SHIFT);
496 SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
497 SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
498 SSB_SPROM8_GPIOB_P3_SHIFT);
499
500 /* Extract the antenna gain values. */
501 SPEX(antenna_gain.ghz24.a0, SSB_SPROM8_AGAIN01,
502 SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
503 SPEX(antenna_gain.ghz24.a1, SSB_SPROM8_AGAIN01,
504 SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
505 SPEX(antenna_gain.ghz24.a2, SSB_SPROM8_AGAIN23,
506 SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
507 SPEX(antenna_gain.ghz24.a3, SSB_SPROM8_AGAIN23,
508 SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
509 memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
510 sizeof(out->antenna_gain.ghz5));
511
512 /* TODO - get remaining rev 8 stuff needed */
513}
514
470static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, 515static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
471 const u16 *in, u16 size) 516 const u16 *in, u16 size)
472{ 517{
@@ -487,15 +532,25 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
487 out->revision = 4; 532 out->revision = 4;
488 sprom_extract_r45(out, in); 533 sprom_extract_r45(out, in);
489 } else { 534 } else {
490 if (out->revision == 0) 535 switch (out->revision) {
491 goto unsupported; 536 case 1:
492 if (out->revision >= 1 && out->revision <= 3) { 537 case 2:
538 case 3:
493 sprom_extract_r123(out, in); 539 sprom_extract_r123(out, in);
494 } 540 break;
495 if (out->revision == 4 || out->revision == 5) 541 case 4:
542 case 5:
496 sprom_extract_r45(out, in); 543 sprom_extract_r45(out, in);
497 if (out->revision > 5) 544 break;
498 goto unsupported; 545 case 8:
546 sprom_extract_r8(out, in);
547 break;
548 default:
549 ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
550 " revision %d detected. Will extract"
551 " v1\n", out->revision);
552 sprom_extract_r123(out, in);
553 }
499 } 554 }
500 555
501 if (out->boardflags_lo == 0xFFFF) 556 if (out->boardflags_lo == 0xFFFF)
@@ -504,11 +559,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
504 out->boardflags_hi = 0; /* per specs */ 559 out->boardflags_hi = 0; /* per specs */
505 560
506 return 0; 561 return 0;
507unsupported:
508 ssb_printk(KERN_WARNING PFX "Unsupported SPROM revision %d "
509 "detected. Will extract v1\n", out->revision);
510 sprom_extract_r123(out, in);
511 return 0;
512} 562}
513 563
514static int ssb_pci_sprom_get(struct ssb_bus *bus, 564static int ssb_pci_sprom_get(struct ssb_bus *bus,
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 8c26f5ea2b8..d2860a82368 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
63static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS]; 63static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS];
64 64
65/* Driver Version */ 65/* Driver Version */
66static const __le32 rndis_driver_version = __constant_cpu_to_le32 (1); 66static const __le32 rndis_driver_version = cpu_to_le32 (1);
67 67
68/* Function Prototypes */ 68/* Function Prototypes */
69static rndis_resp_t *rndis_add_response (int configNr, u32 length); 69static rndis_resp_t *rndis_add_response (int configNr, u32 length);
@@ -190,7 +190,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
190 190
191 /* response goes here, right after the header */ 191 /* response goes here, right after the header */
192 outbuf = (__le32 *) &resp[1]; 192 outbuf = (__le32 *) &resp[1];
193 resp->InformationBufferOffset = __constant_cpu_to_le32 (16); 193 resp->InformationBufferOffset = cpu_to_le32 (16);
194 194
195 net = rndis_per_dev_params[configNr].dev; 195 net = rndis_per_dev_params[configNr].dev;
196 if (net->get_stats) 196 if (net->get_stats)
@@ -221,7 +221,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
221 * reddite ergo quae sunt Caesaris Caesari 221 * reddite ergo quae sunt Caesaris Caesari
222 * et quae sunt Dei Deo! 222 * et quae sunt Dei Deo!
223 */ 223 */
224 *outbuf = __constant_cpu_to_le32 (0); 224 *outbuf = cpu_to_le32 (0);
225 retval = 0; 225 retval = 0;
226 break; 226 break;
227 227
@@ -256,7 +256,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
256 pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__); 256 pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__);
257 if (rndis_per_dev_params [configNr].media_state 257 if (rndis_per_dev_params [configNr].media_state
258 == NDIS_MEDIA_STATE_DISCONNECTED) 258 == NDIS_MEDIA_STATE_DISCONNECTED)
259 *outbuf = __constant_cpu_to_le32 (0); 259 *outbuf = cpu_to_le32 (0);
260 else 260 else
261 *outbuf = cpu_to_le32 ( 261 *outbuf = cpu_to_le32 (
262 rndis_per_dev_params [configNr].speed); 262 rndis_per_dev_params [configNr].speed);
@@ -317,7 +317,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
317 /* mandatory */ 317 /* mandatory */
318 case OID_GEN_MAXIMUM_TOTAL_SIZE: 318 case OID_GEN_MAXIMUM_TOTAL_SIZE:
319 pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); 319 pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
320 *outbuf = __constant_cpu_to_le32(RNDIS_MAX_TOTAL_SIZE); 320 *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
321 retval = 0; 321 retval = 0;
322 break; 322 break;
323 323
@@ -332,7 +332,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
332 332
333 case OID_GEN_PHYSICAL_MEDIUM: 333 case OID_GEN_PHYSICAL_MEDIUM:
334 pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__); 334 pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__);
335 *outbuf = __constant_cpu_to_le32 (0); 335 *outbuf = cpu_to_le32 (0);
336 retval = 0; 336 retval = 0;
337 break; 337 break;
338 338
@@ -342,7 +342,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
342 */ 342 */
343 case OID_GEN_MAC_OPTIONS: /* from WinME */ 343 case OID_GEN_MAC_OPTIONS: /* from WinME */
344 pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__); 344 pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__);
345 *outbuf = __constant_cpu_to_le32( 345 *outbuf = cpu_to_le32(
346 NDIS_MAC_OPTION_RECEIVE_SERIALIZED 346 NDIS_MAC_OPTION_RECEIVE_SERIALIZED
347 | NDIS_MAC_OPTION_FULL_DUPLEX); 347 | NDIS_MAC_OPTION_FULL_DUPLEX);
348 retval = 0; 348 retval = 0;
@@ -431,7 +431,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
431 case OID_802_3_MULTICAST_LIST: 431 case OID_802_3_MULTICAST_LIST:
432 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); 432 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
433 /* Multicast base address only */ 433 /* Multicast base address only */
434 *outbuf = __constant_cpu_to_le32 (0xE0000000); 434 *outbuf = cpu_to_le32 (0xE0000000);
435 retval = 0; 435 retval = 0;
436 break; 436 break;
437 437
@@ -439,7 +439,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
439 case OID_802_3_MAXIMUM_LIST_SIZE: 439 case OID_802_3_MAXIMUM_LIST_SIZE:
440 pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); 440 pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
441 /* Multicast base address only */ 441 /* Multicast base address only */
442 *outbuf = __constant_cpu_to_le32 (1); 442 *outbuf = cpu_to_le32 (1);
443 retval = 0; 443 retval = 0;
444 break; 444 break;
445 445
@@ -461,14 +461,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
461 /* mandatory */ 461 /* mandatory */
462 case OID_802_3_XMIT_ONE_COLLISION: 462 case OID_802_3_XMIT_ONE_COLLISION:
463 pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__); 463 pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__);
464 *outbuf = __constant_cpu_to_le32 (0); 464 *outbuf = cpu_to_le32 (0);
465 retval = 0; 465 retval = 0;
466 break; 466 break;
467 467
468 /* mandatory */ 468 /* mandatory */
469 case OID_802_3_XMIT_MORE_COLLISIONS: 469 case OID_802_3_XMIT_MORE_COLLISIONS:
470 pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); 470 pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
471 *outbuf = __constant_cpu_to_le32 (0); 471 *outbuf = cpu_to_le32 (0);
472 retval = 0; 472 retval = 0;
473 break; 473 break;
474 474
@@ -572,24 +572,24 @@ static int rndis_init_response (int configNr, rndis_init_msg_type *buf)
572 return -ENOMEM; 572 return -ENOMEM;
573 resp = (rndis_init_cmplt_type *) r->buf; 573 resp = (rndis_init_cmplt_type *) r->buf;
574 574
575 resp->MessageType = __constant_cpu_to_le32 ( 575 resp->MessageType = cpu_to_le32 (
576 REMOTE_NDIS_INITIALIZE_CMPLT); 576 REMOTE_NDIS_INITIALIZE_CMPLT);
577 resp->MessageLength = __constant_cpu_to_le32 (52); 577 resp->MessageLength = cpu_to_le32 (52);
578 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 578 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
579 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS); 579 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS);
580 resp->MajorVersion = __constant_cpu_to_le32 (RNDIS_MAJOR_VERSION); 580 resp->MajorVersion = cpu_to_le32 (RNDIS_MAJOR_VERSION);
581 resp->MinorVersion = __constant_cpu_to_le32 (RNDIS_MINOR_VERSION); 581 resp->MinorVersion = cpu_to_le32 (RNDIS_MINOR_VERSION);
582 resp->DeviceFlags = __constant_cpu_to_le32 (RNDIS_DF_CONNECTIONLESS); 582 resp->DeviceFlags = cpu_to_le32 (RNDIS_DF_CONNECTIONLESS);
583 resp->Medium = __constant_cpu_to_le32 (RNDIS_MEDIUM_802_3); 583 resp->Medium = cpu_to_le32 (RNDIS_MEDIUM_802_3);
584 resp->MaxPacketsPerTransfer = __constant_cpu_to_le32 (1); 584 resp->MaxPacketsPerTransfer = cpu_to_le32 (1);
585 resp->MaxTransferSize = cpu_to_le32 ( 585 resp->MaxTransferSize = cpu_to_le32 (
586 params->dev->mtu 586 params->dev->mtu
587 + sizeof (struct ethhdr) 587 + sizeof (struct ethhdr)
588 + sizeof (struct rndis_packet_msg_type) 588 + sizeof (struct rndis_packet_msg_type)
589 + 22); 589 + 22);
590 resp->PacketAlignmentFactor = __constant_cpu_to_le32 (0); 590 resp->PacketAlignmentFactor = cpu_to_le32 (0);
591 resp->AFListOffset = __constant_cpu_to_le32 (0); 591 resp->AFListOffset = cpu_to_le32 (0);
592 resp->AFListSize = __constant_cpu_to_le32 (0); 592 resp->AFListSize = cpu_to_le32 (0);
593 593
594 params->resp_avail(params->v); 594 params->resp_avail(params->v);
595 return 0; 595 return 0;
@@ -617,7 +617,7 @@ static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
617 return -ENOMEM; 617 return -ENOMEM;
618 resp = (rndis_query_cmplt_type *) r->buf; 618 resp = (rndis_query_cmplt_type *) r->buf;
619 619
620 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT); 620 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT);
621 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 621 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
622 622
623 if (gen_ndis_query_resp (configNr, le32_to_cpu (buf->OID), 623 if (gen_ndis_query_resp (configNr, le32_to_cpu (buf->OID),
@@ -626,13 +626,13 @@ static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
626 le32_to_cpu(buf->InformationBufferLength), 626 le32_to_cpu(buf->InformationBufferLength),
627 r)) { 627 r)) {
628 /* OID not supported */ 628 /* OID not supported */
629 resp->Status = __constant_cpu_to_le32 ( 629 resp->Status = cpu_to_le32 (
630 RNDIS_STATUS_NOT_SUPPORTED); 630 RNDIS_STATUS_NOT_SUPPORTED);
631 resp->MessageLength = __constant_cpu_to_le32 (sizeof *resp); 631 resp->MessageLength = cpu_to_le32 (sizeof *resp);
632 resp->InformationBufferLength = __constant_cpu_to_le32 (0); 632 resp->InformationBufferLength = cpu_to_le32 (0);
633 resp->InformationBufferOffset = __constant_cpu_to_le32 (0); 633 resp->InformationBufferOffset = cpu_to_le32 (0);
634 } else 634 } else
635 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS); 635 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS);
636 636
637 params->resp_avail(params->v); 637 params->resp_avail(params->v);
638 return 0; 638 return 0;
@@ -665,14 +665,14 @@ static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
665 pr_debug("\n"); 665 pr_debug("\n");
666#endif 666#endif
667 667
668 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_SET_CMPLT); 668 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_SET_CMPLT);
669 resp->MessageLength = __constant_cpu_to_le32 (16); 669 resp->MessageLength = cpu_to_le32 (16);
670 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 670 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
671 if (gen_ndis_set_resp (configNr, le32_to_cpu (buf->OID), 671 if (gen_ndis_set_resp (configNr, le32_to_cpu (buf->OID),
672 ((u8 *) buf) + 8 + BufOffset, BufLength, r)) 672 ((u8 *) buf) + 8 + BufOffset, BufLength, r))
673 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED); 673 resp->Status = cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED);
674 else 674 else
675 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS); 675 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS);
676 676
677 params->resp_avail(params->v); 677 params->resp_avail(params->v);
678 return 0; 678 return 0;
@@ -689,11 +689,11 @@ static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf)
689 return -ENOMEM; 689 return -ENOMEM;
690 resp = (rndis_reset_cmplt_type *) r->buf; 690 resp = (rndis_reset_cmplt_type *) r->buf;
691 691
692 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT); 692 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT);
693 resp->MessageLength = __constant_cpu_to_le32 (16); 693 resp->MessageLength = cpu_to_le32 (16);
694 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS); 694 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS);
695 /* resent information */ 695 /* resent information */
696 resp->AddressingReset = __constant_cpu_to_le32 (1); 696 resp->AddressingReset = cpu_to_le32 (1);
697 697
698 params->resp_avail(params->v); 698 params->resp_avail(params->v);
699 return 0; 699 return 0;
@@ -713,11 +713,11 @@ static int rndis_keepalive_response (int configNr,
713 return -ENOMEM; 713 return -ENOMEM;
714 resp = (rndis_keepalive_cmplt_type *) r->buf; 714 resp = (rndis_keepalive_cmplt_type *) r->buf;
715 715
716 resp->MessageType = __constant_cpu_to_le32 ( 716 resp->MessageType = cpu_to_le32 (
717 REMOTE_NDIS_KEEPALIVE_CMPLT); 717 REMOTE_NDIS_KEEPALIVE_CMPLT);
718 resp->MessageLength = __constant_cpu_to_le32 (16); 718 resp->MessageLength = cpu_to_le32 (16);
719 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 719 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
720 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS); 720 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS);
721 721
722 params->resp_avail(params->v); 722 params->resp_avail(params->v);
723 return 0; 723 return 0;
@@ -742,12 +742,12 @@ static int rndis_indicate_status_msg (int configNr, u32 status)
742 return -ENOMEM; 742 return -ENOMEM;
743 resp = (rndis_indicate_status_msg_type *) r->buf; 743 resp = (rndis_indicate_status_msg_type *) r->buf;
744 744
745 resp->MessageType = __constant_cpu_to_le32 ( 745 resp->MessageType = cpu_to_le32 (
746 REMOTE_NDIS_INDICATE_STATUS_MSG); 746 REMOTE_NDIS_INDICATE_STATUS_MSG);
747 resp->MessageLength = __constant_cpu_to_le32 (20); 747 resp->MessageLength = cpu_to_le32 (20);
748 resp->Status = cpu_to_le32 (status); 748 resp->Status = cpu_to_le32 (status);
749 resp->StatusBufferLength = __constant_cpu_to_le32 (0); 749 resp->StatusBufferLength = cpu_to_le32 (0);
750 resp->StatusBufferOffset = __constant_cpu_to_le32 (0); 750 resp->StatusBufferOffset = cpu_to_le32 (0);
751 751
752 params->resp_avail(params->v); 752 params->resp_avail(params->v);
753 return 0; 753 return 0;
@@ -963,9 +963,9 @@ void rndis_add_hdr (struct sk_buff *skb)
963 return; 963 return;
964 header = (void *) skb_push (skb, sizeof *header); 964 header = (void *) skb_push (skb, sizeof *header);
965 memset (header, 0, sizeof *header); 965 memset (header, 0, sizeof *header);
966 header->MessageType = __constant_cpu_to_le32(REMOTE_NDIS_PACKET_MSG); 966 header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG);
967 header->MessageLength = cpu_to_le32(skb->len); 967 header->MessageLength = cpu_to_le32(skb->len);
968 header->DataOffset = __constant_cpu_to_le32 (36); 968 header->DataOffset = cpu_to_le32 (36);
969 header->DataLength = cpu_to_le32(skb->len - sizeof *header); 969 header->DataLength = cpu_to_le32(skb->len - sizeof *header);
970} 970}
971 971
@@ -1029,7 +1029,7 @@ int rndis_rm_hdr(struct sk_buff *skb)
1029 __le32 *tmp = (void *) skb->data; 1029 __le32 *tmp = (void *) skb->data;
1030 1030
1031 /* MessageType, MessageLength */ 1031 /* MessageType, MessageLength */
1032 if (__constant_cpu_to_le32(REMOTE_NDIS_PACKET_MSG) 1032 if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
1033 != get_unaligned(tmp++)) 1033 != get_unaligned(tmp++))
1034 return -EINVAL; 1034 return -EINVAL;
1035 tmp++; 1035 tmp++;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 6c2d37fdd3b..74ae7589900 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -204,8 +204,11 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
204 } else { 204 } else {
205 v86d_started = 1; 205 v86d_started = 1;
206 err = cn_netlink_send(m, 0, gfp_any()); 206 err = cn_netlink_send(m, 0, gfp_any());
207 if (err == -ENOBUFS)
208 err = 0;
207 } 209 }
208 } 210 } else if (err == -ENOBUFS)
211 err = 0;
209 212
210 if (!err && !(task->t.flags & TF_EXIT)) 213 if (!err && !(task->t.flags & TF_EXIT))
211 err = !wait_for_completion_timeout(task->done, 214 err = !wait_for_completion_timeout(task->done,
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 9c6d815dd19..64f406593c0 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1988,6 +1988,8 @@ COMPATIBLE_IOCTL(TUNSETGROUP)
1988COMPATIBLE_IOCTL(TUNGETFEATURES) 1988COMPATIBLE_IOCTL(TUNGETFEATURES)
1989COMPATIBLE_IOCTL(TUNSETOFFLOAD) 1989COMPATIBLE_IOCTL(TUNSETOFFLOAD)
1990COMPATIBLE_IOCTL(TUNSETTXFILTER) 1990COMPATIBLE_IOCTL(TUNSETTXFILTER)
1991COMPATIBLE_IOCTL(TUNGETSNDBUF)
1992COMPATIBLE_IOCTL(TUNSETSNDBUF)
1991/* Big V */ 1993/* Big V */
1992COMPATIBLE_IOCTL(VT_SETMODE) 1994COMPATIBLE_IOCTL(VT_SETMODE)
1993COMPATIBLE_IOCTL(VT_GETMODE) 1995COMPATIBLE_IOCTL(VT_GETMODE)
diff --git a/fs/dquot.c b/fs/dquot.c
index bca3cac4bee..d6add0bf5ad 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1057,10 +1057,7 @@ static void send_warning(const struct dquot *dquot, const char warntype)
1057 goto attr_err_out; 1057 goto attr_err_out;
1058 genlmsg_end(skb, msg_head); 1058 genlmsg_end(skb, msg_head);
1059 1059
1060 ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); 1060 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1061 if (ret < 0 && ret != -ESRCH)
1062 printk(KERN_ERR
1063 "VFS: Failed to send notification message: %d\n", ret);
1064 return; 1061 return;
1065attr_err_out: 1062attr_err_out:
1066 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); 1063 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
index a1916078fd0..cd4bcb6989c 100644
--- a/include/linux/arcdevice.h
+++ b/include/linux/arcdevice.h
@@ -235,8 +235,6 @@ struct Outgoing {
235 235
236 236
237struct arcnet_local { 237struct arcnet_local {
238 struct net_device_stats stats;
239
240 uint8_t config, /* current value of CONFIG register */ 238 uint8_t config, /* current value of CONFIG register */
241 timeout, /* Extended timeout for COM20020 */ 239 timeout, /* Extended timeout for COM20020 */
242 backplane, /* Backplane flag for COM20020 */ 240 backplane, /* Backplane flag for COM20020 */
@@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
335 333
336void arcnet_unregister_proto(struct ArcProto *proto); 334void arcnet_unregister_proto(struct ArcProto *proto);
337irqreturn_t arcnet_interrupt(int irq, void *dev_id); 335irqreturn_t arcnet_interrupt(int irq, void *dev_id);
338struct net_device *alloc_arcdev(char *name); 336struct net_device *alloc_arcdev(const char *name);
337
338int arcnet_open(struct net_device *dev);
339int arcnet_close(struct net_device *dev);
340int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
341void arcnet_timeout(struct net_device *dev);
339 342
340#endif /* __KERNEL__ */ 343#endif /* __KERNEL__ */
341#endif /* _LINUX_ARCDEVICE_H */ 344#endif /* _LINUX_ARCDEVICE_H */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
new file mode 100644
index 00000000000..b847fc7b93f
--- /dev/null
+++ b/include/linux/ath9k_platform.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _LINUX_ATH9K_PLATFORM_H
20#define _LINUX_ATH9K_PLATFORM_H
21
22#define ATH9K_PLAT_EEP_MAX_WORDS 2048
23
24struct ath9k_platform_data {
25 u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
26};
27
28#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index ac6d9a43e08..5dcfb944b6c 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -29,6 +29,7 @@
29 29
30int com20020_check(struct net_device *dev); 30int com20020_check(struct net_device *dev);
31int com20020_found(struct net_device *dev, int shared); 31int com20020_found(struct net_device *dev, int shared);
32extern const struct net_device_ops com20020_netdev_ops;
32 33
33/* The number of low I/O ports used by the card. */ 34/* The number of low I/O ports used by the card. */
34#define ARCNET_TOTAL_SIZE 8 35#define ARCNET_TOTAL_SIZE 8
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 34f2789d9b9..fc65d219d88 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -109,6 +109,12 @@ struct cn_queue_dev {
109 unsigned char name[CN_CBQ_NAMELEN]; 109 unsigned char name[CN_CBQ_NAMELEN];
110 110
111 struct workqueue_struct *cn_queue; 111 struct workqueue_struct *cn_queue;
112 /* Sent to kevent to create cn_queue only when needed */
113 struct work_struct wq_creation;
114 /* Tell if the wq_creation job is pending/completed */
115 atomic_t wq_requested;
116 /* Wait for cn_queue to be created */
117 wait_queue_head_t wq_created;
112 118
113 struct list_head queue_list; 119 struct list_head queue_list;
114 spinlock_t queue_lock; 120 spinlock_t queue_lock;
@@ -164,6 +170,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
164int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 170int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
165void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 171void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
166 172
173int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
174
167struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); 175struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
168void cn_queue_free_dev(struct cn_queue_dev *dev); 176void cn_queue_free_dev(struct cn_queue_dev *dev);
169 177
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61734e27abb..7434a8353e2 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
355 return __dccp_hdr_len(dccp_hdr(skb)); 355 return __dccp_hdr_len(dccp_hdr(skb));
356} 356}
357 357
358
359/* initial values for each feature */
360#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
361#define DCCPF_INITIAL_ACK_RATIO 2
362#define DCCPF_INITIAL_CCID DCCPC_CCID2
363/* FIXME: for now we're default to 1 but it should really be 0 */
364#define DCCPF_INITIAL_SEND_NDP_COUNT 1
365
366/**
367 * struct dccp_minisock - Minimal DCCP connection representation
368 *
369 * Will be used to pass the state from dccp_request_sock to dccp_sock.
370 *
371 * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
372 * @dccpms_pending - List of features being negotiated
373 * @dccpms_conf -
374 */
375struct dccp_minisock {
376 __u64 dccpms_sequence_window;
377 struct list_head dccpms_pending;
378 struct list_head dccpms_conf;
379};
380
381struct dccp_opt_conf {
382 __u8 *dccpoc_val;
383 __u8 dccpoc_len;
384};
385
386struct dccp_opt_pend {
387 struct list_head dccpop_node;
388 __u8 dccpop_type;
389 __u8 dccpop_feat;
390 __u8 *dccpop_val;
391 __u8 dccpop_len;
392 int dccpop_conf;
393 struct dccp_opt_conf *dccpop_sc;
394};
395
396extern void dccp_minisock_init(struct dccp_minisock *dmsk);
397
398/** 358/**
399 * struct dccp_request_sock - represent DCCP-specific connection request 359 * struct dccp_request_sock - represent DCCP-specific connection request
400 * @dreq_inet_rsk: structure inherited from 360 * @dreq_inet_rsk: structure inherited from
@@ -483,13 +443,14 @@ struct dccp_ackvec;
483 * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo 443 * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
484 * @dccps_l_ack_ratio - feature-local Ack Ratio 444 * @dccps_l_ack_ratio - feature-local Ack Ratio
485 * @dccps_r_ack_ratio - feature-remote Ack Ratio 445 * @dccps_r_ack_ratio - feature-remote Ack Ratio
446 * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
447 * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
486 * @dccps_pcslen - sender partial checksum coverage (via sockopt) 448 * @dccps_pcslen - sender partial checksum coverage (via sockopt)
487 * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) 449 * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
488 * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) 450 * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
489 * @dccps_ndp_count - number of Non Data Packets since last data packet 451 * @dccps_ndp_count - number of Non Data Packets since last data packet
490 * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) 452 * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
491 * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) 453 * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
492 * @dccps_minisock - associated minisock (accessed via dccp_msk)
493 * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) 454 * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
494 * @dccps_hc_rx_ackvec - rx half connection ack vector 455 * @dccps_hc_rx_ackvec - rx half connection ack vector
495 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) 456 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
@@ -523,12 +484,13 @@ struct dccp_sock {
523 __u32 dccps_timestamp_time; 484 __u32 dccps_timestamp_time;
524 __u16 dccps_l_ack_ratio; 485 __u16 dccps_l_ack_ratio;
525 __u16 dccps_r_ack_ratio; 486 __u16 dccps_r_ack_ratio;
487 __u64 dccps_l_seq_win:48;
488 __u64 dccps_r_seq_win:48;
526 __u8 dccps_pcslen:4; 489 __u8 dccps_pcslen:4;
527 __u8 dccps_pcrlen:4; 490 __u8 dccps_pcrlen:4;
528 __u8 dccps_send_ndp_count:1; 491 __u8 dccps_send_ndp_count:1;
529 __u64 dccps_ndp_count:48; 492 __u64 dccps_ndp_count:48;
530 unsigned long dccps_rate_last; 493 unsigned long dccps_rate_last;
531 struct dccp_minisock dccps_minisock;
532 struct list_head dccps_featneg; 494 struct list_head dccps_featneg;
533 struct dccp_ackvec *dccps_hc_rx_ackvec; 495 struct dccp_ackvec *dccps_hc_rx_ackvec;
534 struct ccid *dccps_hc_rx_ccid; 496 struct ccid *dccps_hc_rx_ccid;
@@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
546 return (struct dccp_sock *)sk; 508 return (struct dccp_sock *)sk;
547} 509}
548 510
549static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
550{
551 return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
552}
553
554static inline const char *dccp_role(const struct sock *sk) 511static inline const char *dccp_role(const struct sock *sk)
555{ 512{
556 switch (dccp_sk(sk)->dccps_role) { 513 switch (dccp_sk(sk)->dccps_role) {
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 1cb0f0b9092..a1f17abba7d 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -184,4 +184,25 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
184} 184}
185#endif /* __KERNEL__ */ 185#endif /* __KERNEL__ */
186 186
187/**
188 * compare_ether_header - Compare two Ethernet headers
189 * @a: Pointer to Ethernet header
190 * @b: Pointer to Ethernet header
191 *
192 * Compare two ethernet headers, returns 0 if equal.
193 * This assumes that the network header (i.e., IP header) is 4-byte
194 * aligned OR the platform can handle unaligned access. This is the
195 * case for all packets coming into netif_receive_skb or similar
196 * entry points.
197 */
198
199static inline int compare_ether_header(const void *a, const void *b)
200{
201 u32 *a32 = (u32 *)((u8 *)a + 2);
202 u32 *b32 = (u32 *)((u8 *)b + 2);
203
204 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
205 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
206}
207
187#endif /* _LINUX_ETHERDEVICE_H */ 208#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index fd47a151665..6a6e701f163 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -38,6 +38,7 @@ struct hdlc_proto {
38 int (*ioctl)(struct net_device *dev, struct ifreq *ifr); 38 int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
39 __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); 39 __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
40 int (*netif_rx)(struct sk_buff *skb); 40 int (*netif_rx)(struct sk_buff *skb);
41 int (*xmit)(struct sk_buff *skb, struct net_device *dev);
41 struct module *module; 42 struct module *module;
42 struct hdlc_proto *next; /* next protocol in the list */ 43 struct hdlc_proto *next; /* next protocol in the list */
43}; 44};
@@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
102int hdlc_open(struct net_device *dev); 103int hdlc_open(struct net_device *dev);
103/* Must be called by hardware driver when HDLC device is being closed */ 104/* Must be called by hardware driver when HDLC device is being closed */
104void hdlc_close(struct net_device *dev); 105void hdlc_close(struct net_device *dev);
106/* May be used by hardware driver */
107int hdlc_change_mtu(struct net_device *dev, int new_mtu);
108/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
109int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
105 110
106int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, 111int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
107 size_t size); 112 size_t size);
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index bf6302f6b5f..0821bac62b8 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -241,7 +241,6 @@ struct hdlcdrv_state {
241 struct hdlcdrv_bitbuffer bitbuf_hdlc; 241 struct hdlcdrv_bitbuffer bitbuf_hdlc;
242#endif /* HDLCDRV_DEBUG */ 242#endif /* HDLCDRV_DEBUG */
243 243
244 struct net_device_stats stats;
245 int ptt_keyed; 244 int ptt_keyed;
246 245
247 /* queued skb for transmission */ 246 /* queued skb for transmission */
diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h
index 1c7a0dd5536..06695b74d40 100644
--- a/include/linux/ibmtr.h
+++ b/include/linux/ibmtr.h
@@ -207,7 +207,7 @@ struct tok_info {
207 unsigned short exsap_station_id; 207 unsigned short exsap_station_id;
208 unsigned short global_int_enable; 208 unsigned short global_int_enable;
209 struct sk_buff *current_skb; 209 struct sk_buff *current_skb;
210 struct net_device_stats tr_stats; 210
211 unsigned char auto_speedsave; 211 unsigned char auto_speedsave;
212 open_state open_status, sap_status; 212 open_state open_status, sap_status;
213 enum {MANUAL, AUTOMATIC} open_mode; 213 enum {MANUAL, AUTOMATIC} open_mode;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c4e6ca1a630..b1bb817d142 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -527,6 +527,8 @@ struct ieee80211_tim_ie {
527 u8 virtual_map[0]; 527 u8 virtual_map[0];
528} __attribute__ ((packed)); 528} __attribute__ ((packed));
529 529
530#define WLAN_SA_QUERY_TR_ID_LEN 16
531
530struct ieee80211_mgmt { 532struct ieee80211_mgmt {
531 __le16 frame_control; 533 __le16 frame_control;
532 __le16 duration; 534 __le16 duration;
@@ -646,6 +648,10 @@ struct ieee80211_mgmt {
646 u8 action_code; 648 u8 action_code;
647 u8 variable[0]; 649 u8 variable[0];
648 } __attribute__((packed)) mesh_action; 650 } __attribute__((packed)) mesh_action;
651 struct {
652 u8 action;
653 u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
654 } __attribute__ ((packed)) sa_query;
649 } u; 655 } u;
650 } __attribute__ ((packed)) action; 656 } __attribute__ ((packed)) action;
651 } u; 657 } u;
@@ -655,6 +661,15 @@ struct ieee80211_mgmt {
655#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) 661#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
656 662
657 663
664/* Management MIC information element (IEEE 802.11w) */
665struct ieee80211_mmie {
666 u8 element_id;
667 u8 length;
668 __le16 key_id;
669 u8 sequence_number[6];
670 u8 mic[8];
671} __attribute__ ((packed));
672
658/* Control frames */ 673/* Control frames */
659struct ieee80211_rts { 674struct ieee80211_rts {
660 __le16 frame_control; 675 __le16 frame_control;
@@ -899,6 +914,9 @@ enum ieee80211_statuscode {
899 /* 802.11g */ 914 /* 802.11g */
900 WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, 915 WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
901 WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, 916 WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
917 /* 802.11w */
918 WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30,
919 WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31,
902 /* 802.11i */ 920 /* 802.11i */
903 WLAN_STATUS_INVALID_IE = 40, 921 WLAN_STATUS_INVALID_IE = 40,
904 WLAN_STATUS_INVALID_GROUP_CIPHER = 41, 922 WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
@@ -1018,6 +1036,8 @@ enum ieee80211_eid {
1018 WLAN_EID_HT_INFORMATION = 61, 1036 WLAN_EID_HT_INFORMATION = 61,
1019 /* 802.11i */ 1037 /* 802.11i */
1020 WLAN_EID_RSN = 48, 1038 WLAN_EID_RSN = 48,
1039 WLAN_EID_TIMEOUT_INTERVAL = 56,
1040 WLAN_EID_MMIE = 76 /* 802.11w */,
1021 WLAN_EID_WPA = 221, 1041 WLAN_EID_WPA = 221,
1022 WLAN_EID_GENERIC = 221, 1042 WLAN_EID_GENERIC = 221,
1023 WLAN_EID_VENDOR_SPECIFIC = 221, 1043 WLAN_EID_VENDOR_SPECIFIC = 221,
@@ -1030,6 +1050,8 @@ enum ieee80211_category {
1030 WLAN_CATEGORY_QOS = 1, 1050 WLAN_CATEGORY_QOS = 1,
1031 WLAN_CATEGORY_DLS = 2, 1051 WLAN_CATEGORY_DLS = 2,
1032 WLAN_CATEGORY_BACK = 3, 1052 WLAN_CATEGORY_BACK = 3,
1053 WLAN_CATEGORY_PUBLIC = 4,
1054 WLAN_CATEGORY_SA_QUERY = 8,
1033 WLAN_CATEGORY_WMM = 17, 1055 WLAN_CATEGORY_WMM = 17,
1034}; 1056};
1035 1057
@@ -1104,6 +1126,12 @@ struct ieee80211_country_ie_triplet {
1104 }; 1126 };
1105} __attribute__ ((packed)); 1127} __attribute__ ((packed));
1106 1128
1129enum ieee80211_timeout_interval_type {
1130 WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */,
1131 WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */,
1132 WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */,
1133};
1134
1107/* BACK action code */ 1135/* BACK action code */
1108enum ieee80211_back_actioncode { 1136enum ieee80211_back_actioncode {
1109 WLAN_ACTION_ADDBA_REQ = 0, 1137 WLAN_ACTION_ADDBA_REQ = 0,
@@ -1118,6 +1146,13 @@ enum ieee80211_back_parties {
1118 WLAN_BACK_TIMER = 2, 1146 WLAN_BACK_TIMER = 2,
1119}; 1147};
1120 1148
1149/* SA Query action */
1150enum ieee80211_sa_query_action {
1151 WLAN_ACTION_SA_QUERY_REQUEST = 0,
1152 WLAN_ACTION_SA_QUERY_RESPONSE = 1,
1153};
1154
1155
1121/* A-MSDU 802.11n */ 1156/* A-MSDU 802.11n */
1122#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080 1157#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080
1123 1158
@@ -1128,6 +1163,7 @@ enum ieee80211_back_parties {
1128/* reserved: 0x000FAC03 */ 1163/* reserved: 0x000FAC03 */
1129#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 1164#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
1130#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 1165#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
1166#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
1131 1167
1132#define WLAN_MAX_KEY_LEN 32 1168#define WLAN_MAX_KEY_LEN 32
1133 1169
@@ -1185,4 +1221,149 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
1185 return hdr->addr1; 1221 return hdr->addr1;
1186} 1222}
1187 1223
1224/**
1225 * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
1226 * @hdr: the frame (buffer must include at least the first octet of payload)
1227 */
1228static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
1229{
1230 if (ieee80211_is_disassoc(hdr->frame_control) ||
1231 ieee80211_is_deauth(hdr->frame_control))
1232 return true;
1233
1234 if (ieee80211_is_action(hdr->frame_control)) {
1235 u8 *category;
1236
1237 /*
1238 * Action frames, excluding Public Action frames, are Robust
1239 * Management Frames. However, if we are looking at a Protected
1240 * frame, skip the check since the data may be encrypted and
1241 * the frame has already been found to be a Robust Management
1242 * Frame (by the other end).
1243 */
1244 if (ieee80211_has_protected(hdr->frame_control))
1245 return true;
1246 category = ((u8 *) hdr) + 24;
1247 return *category != WLAN_CATEGORY_PUBLIC;
1248 }
1249
1250 return false;
1251}
1252
1253/**
1254 * ieee80211_fhss_chan_to_freq - get channel frequency
1255 * @channel: the FHSS channel
1256 *
1257 * Convert IEEE802.11 FHSS channel to frequency (MHz)
1258 * Ref IEEE 802.11-2007 section 14.6
1259 */
1260static inline int ieee80211_fhss_chan_to_freq(int channel)
1261{
1262 if ((channel > 1) && (channel < 96))
1263 return channel + 2400;
1264 else
1265 return -1;
1266}
1267
1268/**
1269 * ieee80211_freq_to_fhss_chan - get channel
1270 * @freq: the channels frequency
1271 *
1272 * Convert frequency (MHz) to IEEE802.11 FHSS channel
1273 * Ref IEEE 802.11-2007 section 14.6
1274 */
1275static inline int ieee80211_freq_to_fhss_chan(int freq)
1276{
1277 if ((freq > 2401) && (freq < 2496))
1278 return freq - 2400;
1279 else
1280 return -1;
1281}
1282
1283/**
1284 * ieee80211_dsss_chan_to_freq - get channel center frequency
1285 * @channel: the DSSS channel
1286 *
1287 * Convert IEEE802.11 DSSS channel to the center frequency (MHz).
1288 * Ref IEEE 802.11-2007 section 15.6
1289 */
1290static inline int ieee80211_dsss_chan_to_freq(int channel)
1291{
1292 if ((channel > 0) && (channel < 14))
1293 return 2407 + (channel * 5);
1294 else if (channel == 14)
1295 return 2484;
1296 else
1297 return -1;
1298}
1299
1300/**
1301 * ieee80211_freq_to_dsss_chan - get channel
1302 * @freq: the frequency
1303 *
1304 * Convert frequency (MHz) to IEEE802.11 DSSS channel
1305 * Ref IEEE 802.11-2007 section 15.6
1306 *
1307 * This routine selects the channel with the closest center frequency.
1308 */
1309static inline int ieee80211_freq_to_dsss_chan(int freq)
1310{
1311 if ((freq >= 2410) && (freq < 2475))
1312 return (freq - 2405) / 5;
1313 else if ((freq >= 2482) && (freq < 2487))
1314 return 14;
1315 else
1316 return -1;
1317}
1318
1319/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
1320 * Ref IEEE 802.11-2007 section 18.4.6.2
1321 *
1322 * The channels and frequencies are the same as those defined for DSSS
1323 */
1324#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
1325#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
1326
1327/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
1328 * Ref IEEE 802.11-2007 section 19.4.2
1329 */
1330#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
1331#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
1332
1333/**
1334 * ieee80211_ofdm_chan_to_freq - get channel center frequency
1335 * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
1336 * @channel: the OFDM channel
1337 *
1338 * Convert IEEE802.11 OFDM channel to center frequency (MHz)
1339 * Ref IEEE 802.11-2007 section 17.3.8.3.2
1340 */
1341static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
1342{
1343 if ((channel > 0) && (channel <= 200) &&
1344 (s_freq >= 4000))
1345 return s_freq + (channel * 5);
1346 else
1347 return -1;
1348}
1349
1350/**
1351 * ieee80211_freq_to_ofdm_channel - get channel
1352 * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
1353 * @freq: the frequency
1354 *
1355 * Convert frequency (MHz) to IEEE802.11 OFDM channel
1356 * Ref IEEE 802.11-2007 section 17.3.8.3.2
1357 *
1358 * This routine selects the channel with the closest center frequency.
1359 */
1360static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
1361{
1362 if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
1363 (s_freq >= 4000))
1364 return (freq + 2 - s_freq) / 5;
1365 else
1366 return -1;
1367}
1368
1188#endif /* LINUX_IEEE80211_H */ 1369#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if.h b/include/linux/if.h
index 2a6e29620a9..1108f3e099e 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -66,6 +66,7 @@
66#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ 66#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
67#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ 67#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
68#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ 68#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
69#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */
69 70
70#define IF_GET_IFACE 0x0001 /* for querying only */ 71#define IF_GET_IFACE 0x0001 /* for querying only */
71#define IF_GET_PROTO 0x0002 72#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 7f3c735f422..0216e1bdbc5 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -17,7 +17,7 @@
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#ifndef _LINUX_IF_ETHER_H 21#ifndef _LINUX_IF_ETHER_H
22#define _LINUX_IF_ETHER_H 22#define _LINUX_IF_ETHER_H
23 23
@@ -25,7 +25,7 @@
25 25
26/* 26/*
27 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 27 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
28 * and FCS/CRC (frame check sequence). 28 * and FCS/CRC (frame check sequence).
29 */ 29 */
30 30
31#define ETH_ALEN 6 /* Octets in one ethernet addr */ 31#define ETH_ALEN 6 /* Octets in one ethernet addr */
@@ -83,7 +83,7 @@
83/* 83/*
84 * Non DIX types. Won't clash for 1500 types. 84 * Non DIX types. Won't clash for 1500 types.
85 */ 85 */
86 86
87#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ 87#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
88#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ 88#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
89#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ 89#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
@@ -109,7 +109,7 @@
109/* 109/*
110 * This is an Ethernet frame header. 110 * This is an Ethernet frame header.
111 */ 111 */
112 112
113struct ethhdr { 113struct ethhdr {
114 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 114 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
115 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 115 unsigned char h_source[ETH_ALEN]; /* source ether addr */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 30c88b2245f..90b5fae5d71 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -95,16 +95,16 @@ struct pppoe_tag {
95} __attribute ((packed)); 95} __attribute ((packed));
96 96
97/* Tag identifiers */ 97/* Tag identifiers */
98#define PTT_EOL __constant_htons(0x0000) 98#define PTT_EOL __cpu_to_be16(0x0000)
99#define PTT_SRV_NAME __constant_htons(0x0101) 99#define PTT_SRV_NAME __cpu_to_be16(0x0101)
100#define PTT_AC_NAME __constant_htons(0x0102) 100#define PTT_AC_NAME __cpu_to_be16(0x0102)
101#define PTT_HOST_UNIQ __constant_htons(0x0103) 101#define PTT_HOST_UNIQ __cpu_to_be16(0x0103)
102#define PTT_AC_COOKIE __constant_htons(0x0104) 102#define PTT_AC_COOKIE __cpu_to_be16(0x0104)
103#define PTT_VENDOR __constant_htons(0x0105) 103#define PTT_VENDOR __cpu_to_be16(0x0105)
104#define PTT_RELAY_SID __constant_htons(0x0110) 104#define PTT_RELAY_SID __cpu_to_be16(0x0110)
105#define PTT_SRV_ERR __constant_htons(0x0201) 105#define PTT_SRV_ERR __cpu_to_be16(0x0201)
106#define PTT_SYS_ERR __constant_htons(0x0202) 106#define PTT_SYS_ERR __cpu_to_be16(0x0202)
107#define PTT_GEN_ERR __constant_htons(0x0203) 107#define PTT_GEN_ERR __cpu_to_be16(0x0203)
108 108
109struct pppoe_hdr { 109struct pppoe_hdr {
110#if defined(__LITTLE_ENDIAN_BITFIELD) 110#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 8529f57ba26..049d6c9428d 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -46,6 +46,8 @@
46#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) 46#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
47#define TUNSETTXFILTER _IOW('T', 209, unsigned int) 47#define TUNSETTXFILTER _IOW('T', 209, unsigned int)
48#define TUNGETIFF _IOR('T', 210, unsigned int) 48#define TUNGETIFF _IOR('T', 210, unsigned int)
49#define TUNGETSNDBUF _IOR('T', 211, int)
50#define TUNSETSNDBUF _IOW('T', 212, int)
49 51
50/* TUNSETIFF ifr flags */ 52/* TUNSETIFF ifr flags */
51#define IFF_TUN 0x0001 53#define IFF_TUN 0x0001
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 82c43624c06..5a9aae4adb4 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -16,14 +16,14 @@
16#define SIOCDELPRL (SIOCDEVPRIVATE + 6) 16#define SIOCDELPRL (SIOCDEVPRIVATE + 6)
17#define SIOCCHGPRL (SIOCDEVPRIVATE + 7) 17#define SIOCCHGPRL (SIOCDEVPRIVATE + 7)
18 18
19#define GRE_CSUM __constant_htons(0x8000) 19#define GRE_CSUM __cpu_to_be16(0x8000)
20#define GRE_ROUTING __constant_htons(0x4000) 20#define GRE_ROUTING __cpu_to_be16(0x4000)
21#define GRE_KEY __constant_htons(0x2000) 21#define GRE_KEY __cpu_to_be16(0x2000)
22#define GRE_SEQ __constant_htons(0x1000) 22#define GRE_SEQ __cpu_to_be16(0x1000)
23#define GRE_STRICT __constant_htons(0x0800) 23#define GRE_STRICT __cpu_to_be16(0x0800)
24#define GRE_REC __constant_htons(0x0700) 24#define GRE_REC __cpu_to_be16(0x0700)
25#define GRE_FLAGS __constant_htons(0x00F8) 25#define GRE_FLAGS __cpu_to_be16(0x00F8)
26#define GRE_VERSION __constant_htons(0x0007) 26#define GRE_VERSION __cpu_to_be16(0x0007)
27 27
28struct ip_tunnel_parm 28struct ip_tunnel_parm
29{ 29{
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 06fcdb45106..acef2a770b6 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
108#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) 108#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
109#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) 109#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
110#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) 110#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
111#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
111 112
112struct in_ifaddr 113struct in_ifaddr
113{ 114{
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 8a455694d68..0d45b4e8d36 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -193,6 +193,9 @@ struct vif_device
193struct mfc_cache 193struct mfc_cache
194{ 194{
195 struct mfc_cache *next; /* Next entry on cache line */ 195 struct mfc_cache *next; /* Next entry on cache line */
196#ifdef CONFIG_NET_NS
197 struct net *mfc_net;
198#endif
196 __be32 mfc_mcastgrp; /* Group the entry belongs to */ 199 __be32 mfc_mcastgrp; /* Group the entry belongs to */
197 __be32 mfc_origin; /* Source of packet */ 200 __be32 mfc_origin; /* Source of packet */
198 vifi_t mfc_parent; /* Source interface */ 201 vifi_t mfc_parent; /* Source interface */
@@ -215,6 +218,18 @@ struct mfc_cache
215 } mfc_un; 218 } mfc_un;
216}; 219};
217 220
221static inline
222struct net *mfc_net(const struct mfc_cache *mfc)
223{
224 return read_pnet(&mfc->mfc_net);
225}
226
227static inline
228void mfc_net_set(struct mfc_cache *mfc, struct net *net)
229{
230 write_pnet(&mfc->mfc_net, hold_net(net));
231}
232
218#define MFC_STATIC 1 233#define MFC_STATIC 1
219#define MFC_NOTIFY 2 234#define MFC_NOTIFY 2
220 235
@@ -241,7 +256,8 @@ struct mfc_cache
241 256
242#ifdef __KERNEL__ 257#ifdef __KERNEL__
243struct rtmsg; 258struct rtmsg;
244extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); 259extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
260 struct rtmsg *rtm, int nowait);
245#endif 261#endif
246 262
247#endif 263#endif
diff --git a/include/linux/ncp_no.h b/include/linux/ncp_no.h
index f56a696a7cc..cddaa48fb18 100644
--- a/include/linux/ncp_no.h
+++ b/include/linux/ncp_no.h
@@ -2,18 +2,18 @@
2#define _NCP_NO 2#define _NCP_NO
3 3
4/* these define the attribute byte as seen by NCP */ 4/* these define the attribute byte as seen by NCP */
5#define aRONLY (__constant_cpu_to_le32(1)) 5#define aRONLY (__cpu_to_le32(1))
6#define aHIDDEN (__constant_cpu_to_le32(2)) 6#define aHIDDEN (__cpu_to_le32(2))
7#define aSYSTEM (__constant_cpu_to_le32(4)) 7#define aSYSTEM (__cpu_to_le32(4))
8#define aEXECUTE (__constant_cpu_to_le32(8)) 8#define aEXECUTE (__cpu_to_le32(8))
9#define aDIR (__constant_cpu_to_le32(0x10)) 9#define aDIR (__cpu_to_le32(0x10))
10#define aARCH (__constant_cpu_to_le32(0x20)) 10#define aARCH (__cpu_to_le32(0x20))
11#define aSHARED (__constant_cpu_to_le32(0x80)) 11#define aSHARED (__cpu_to_le32(0x80))
12#define aDONTSUBALLOCATE (__constant_cpu_to_le32(1L<<11)) 12#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11))
13#define aTRANSACTIONAL (__constant_cpu_to_le32(1L<<12)) 13#define aTRANSACTIONAL (__cpu_to_le32(1L<<12))
14#define aPURGE (__constant_cpu_to_le32(1L<<16)) 14#define aPURGE (__cpu_to_le32(1L<<16))
15#define aRENAMEINHIBIT (__constant_cpu_to_le32(1L<<17)) 15#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17))
16#define aDELETEINHIBIT (__constant_cpu_to_le32(1L<<18)) 16#define aDELETEINHIBIT (__cpu_to_le32(1L<<18))
17#define aDONTCOMPRESS (__constant_cpu_to_le32(1L<<27)) 17#define aDONTCOMPRESS (__cpu_to_le32(1L<<27))
18 18
19#endif /* _NCP_NO */ 19#endif /* _NCP_NO */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ec54785d34f..bd8b4ca85a2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -96,7 +96,7 @@ struct wireless_dev;
96 * Compute the worst case header length according to the protocols 96 * Compute the worst case header length according to the protocols
97 * used. 97 * used.
98 */ 98 */
99 99
100#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 100#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
101# if defined(CONFIG_MAC80211_MESH) 101# if defined(CONFIG_MAC80211_MESH)
102# define LL_MAX_HEADER 128 102# define LL_MAX_HEADER 128
@@ -124,7 +124,7 @@ struct wireless_dev;
124 * Network device statistics. Akin to the 2.0 ether stats but 124 * Network device statistics. Akin to the 2.0 ether stats but
125 * with byte counters. 125 * with byte counters.
126 */ 126 */
127 127
128struct net_device_stats 128struct net_device_stats
129{ 129{
130 unsigned long rx_packets; /* total packets received */ 130 unsigned long rx_packets; /* total packets received */
@@ -285,7 +285,7 @@ enum netdev_state_t
285 285
286/* 286/*
287 * This structure holds at boot time configured netdevice settings. They 287 * This structure holds at boot time configured netdevice settings. They
288 * are then used in the device probing. 288 * are then used in the device probing.
289 */ 289 */
290struct netdev_boot_setup { 290struct netdev_boot_setup {
291 char name[IFNAMSIZ]; 291 char name[IFNAMSIZ];
@@ -314,6 +314,9 @@ struct napi_struct {
314 spinlock_t poll_lock; 314 spinlock_t poll_lock;
315 int poll_owner; 315 int poll_owner;
316#endif 316#endif
317
318 unsigned int gro_count;
319
317 struct net_device *dev; 320 struct net_device *dev;
318 struct list_head dev_list; 321 struct list_head dev_list;
319 struct sk_buff *gro_list; 322 struct sk_buff *gro_list;
@@ -740,7 +743,7 @@ struct net_device
740 void *dsa_ptr; /* dsa specific data */ 743 void *dsa_ptr; /* dsa specific data */
741#endif 744#endif
742 void *atalk_ptr; /* AppleTalk link */ 745 void *atalk_ptr; /* AppleTalk link */
743 void *ip_ptr; /* IPv4 specific data */ 746 void *ip_ptr; /* IPv4 specific data */
744 void *dn_ptr; /* DECnet specific data */ 747 void *dn_ptr; /* DECnet specific data */
745 void *ip6_ptr; /* IPv6 specific data */ 748 void *ip6_ptr; /* IPv6 specific data */
746 void *ec_ptr; /* Econet specific data */ 749 void *ec_ptr; /* Econet specific data */
@@ -753,7 +756,7 @@ struct net_device
753 */ 756 */
754 unsigned long last_rx; /* Time of last Rx */ 757 unsigned long last_rx; /* Time of last Rx */
755 /* Interface address info used in eth_type_trans() */ 758 /* Interface address info used in eth_type_trans() */
756 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 759 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
757 because most packets are unicast) */ 760 because most packets are unicast) */
758 761
759 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 762 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -984,6 +987,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
984void netif_napi_del(struct napi_struct *napi); 987void netif_napi_del(struct napi_struct *napi);
985 988
986struct napi_gro_cb { 989struct napi_gro_cb {
990 /* This indicates where we are processing relative to skb->data. */
991 int data_offset;
992
987 /* This is non-zero if the packet may be of the same flow. */ 993 /* This is non-zero if the packet may be of the same flow. */
988 int same_flow; 994 int same_flow;
989 995
@@ -1087,6 +1093,36 @@ extern int dev_restart(struct net_device *dev);
1087#ifdef CONFIG_NETPOLL_TRAP 1093#ifdef CONFIG_NETPOLL_TRAP
1088extern int netpoll_trap(void); 1094extern int netpoll_trap(void);
1089#endif 1095#endif
1096extern void *skb_gro_header(struct sk_buff *skb, unsigned int hlen);
1097extern int skb_gro_receive(struct sk_buff **head,
1098 struct sk_buff *skb);
1099
1100static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1101{
1102 return NAPI_GRO_CB(skb)->data_offset;
1103}
1104
1105static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1106{
1107 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1108}
1109
1110static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1111{
1112 NAPI_GRO_CB(skb)->data_offset += len;
1113}
1114
1115static inline void skb_gro_reset_offset(struct sk_buff *skb)
1116{
1117 NAPI_GRO_CB(skb)->data_offset = 0;
1118}
1119
1120static inline void *skb_gro_mac_header(struct sk_buff *skb)
1121{
1122 return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) :
1123 page_address(skb_shinfo(skb)->frags[0].page) +
1124 skb_shinfo(skb)->frags[0].page_offset;
1125}
1090 1126
1091static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1127static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1092 unsigned short type, 1128 unsigned short type,
@@ -1375,12 +1411,15 @@ extern int netif_receive_skb(struct sk_buff *skb);
1375extern void napi_gro_flush(struct napi_struct *napi); 1411extern void napi_gro_flush(struct napi_struct *napi);
1376extern int dev_gro_receive(struct napi_struct *napi, 1412extern int dev_gro_receive(struct napi_struct *napi,
1377 struct sk_buff *skb); 1413 struct sk_buff *skb);
1414extern int napi_skb_finish(int ret, struct sk_buff *skb);
1378extern int napi_gro_receive(struct napi_struct *napi, 1415extern int napi_gro_receive(struct napi_struct *napi,
1379 struct sk_buff *skb); 1416 struct sk_buff *skb);
1380extern void napi_reuse_skb(struct napi_struct *napi, 1417extern void napi_reuse_skb(struct napi_struct *napi,
1381 struct sk_buff *skb); 1418 struct sk_buff *skb);
1382extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, 1419extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi,
1383 struct napi_gro_fraginfo *info); 1420 struct napi_gro_fraginfo *info);
1421extern int napi_frags_finish(struct napi_struct *napi,
1422 struct sk_buff *skb, int ret);
1384extern int napi_gro_frags(struct napi_struct *napi, 1423extern int napi_gro_frags(struct napi_struct *napi,
1385 struct napi_gro_fraginfo *info); 1424 struct napi_gro_fraginfo *info);
1386extern void netif_nit_deliver(struct sk_buff *skb); 1425extern void netif_nit_deliver(struct sk_buff *skb);
@@ -1574,56 +1613,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1574 return (1 << debug_value) - 1; 1613 return (1 << debug_value) - 1;
1575} 1614}
1576 1615
1577/* Test if receive needs to be scheduled but only if up */
1578static inline int netif_rx_schedule_prep(struct napi_struct *napi)
1579{
1580 return napi_schedule_prep(napi);
1581}
1582
1583/* Add interface to tail of rx poll list. This assumes that _prep has
1584 * already been called and returned 1.
1585 */
1586static inline void __netif_rx_schedule(struct napi_struct *napi)
1587{
1588 __napi_schedule(napi);
1589}
1590
1591/* Try to reschedule poll. Called by irq handler. */
1592
1593static inline void netif_rx_schedule(struct napi_struct *napi)
1594{
1595 if (netif_rx_schedule_prep(napi))
1596 __netif_rx_schedule(napi);
1597}
1598
1599/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1600static inline int netif_rx_reschedule(struct napi_struct *napi)
1601{
1602 if (napi_schedule_prep(napi)) {
1603 __netif_rx_schedule(napi);
1604 return 1;
1605 }
1606 return 0;
1607}
1608
1609/* same as netif_rx_complete, except that local_irq_save(flags)
1610 * has already been issued
1611 */
1612static inline void __netif_rx_complete(struct napi_struct *napi)
1613{
1614 __napi_complete(napi);
1615}
1616
1617/* Remove interface from poll list: it must be in the poll list
1618 * on current cpu. This primitive is called by dev->poll(), when
1619 * it completes the work. The device cannot be out of poll list at this
1620 * moment, it is BUG().
1621 */
1622static inline void netif_rx_complete(struct napi_struct *napi)
1623{
1624 napi_complete(napi);
1625}
1626
1627static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 1616static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1628{ 1617{
1629 spin_lock(&txq->_xmit_lock); 1618 spin_lock(&txq->_xmit_lock);
@@ -1874,7 +1863,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
1874 1863
1875 if (dev->priv_flags & IFF_SLAVE_INACTIVE) { 1864 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1876 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && 1865 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1877 skb->protocol == __constant_htons(ETH_P_ARP)) 1866 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1878 return 0; 1867 return 0;
1879 1868
1880 if (master->priv_flags & IFF_MASTER_ALB) { 1869 if (master->priv_flags & IFF_MASTER_ALB) {
@@ -1883,7 +1872,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
1883 return 0; 1872 return 0;
1884 } 1873 }
1885 if (master->priv_flags & IFF_MASTER_8023AD && 1874 if (master->priv_flags & IFF_MASTER_8023AD &&
1886 skb->protocol == __constant_htons(ETH_P_SLOW)) 1875 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1887 return 0; 1876 return 0;
1888 1877
1889 return 1; 1878 return 1;
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 499aa937590..f8105e54716 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -59,9 +59,9 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
59static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) 59static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
60{ 60{
61 switch (skb->protocol) { 61 switch (skb->protocol) {
62 case __constant_htons(ETH_P_8021Q): 62 case __cpu_to_be16(ETH_P_8021Q):
63 return VLAN_HLEN; 63 return VLAN_HLEN;
64 case __constant_htons(ETH_P_PPP_SES): 64 case __cpu_to_be16(ETH_P_PPP_SES):
65 return PPPOE_SES_HLEN; 65 return PPPOE_SES_HLEN;
66 default: 66 default:
67 return 0; 67 return 0;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index e86ed59f9ad..8802d1bda38 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -47,7 +47,7 @@
47 * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or 47 * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
48 * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, 48 * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
49 * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or 49 * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or
50 * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET. 50 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
51 * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request 51 * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
52 * or rename notification. Has attributes %NL80211_ATTR_WIPHY and 52 * or rename notification. Has attributes %NL80211_ATTR_WIPHY and
53 * %NL80211_ATTR_WIPHY_NAME. 53 * %NL80211_ATTR_WIPHY_NAME.
@@ -72,8 +72,8 @@
72 * 72 *
73 * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified 73 * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
74 * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. 74 * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
75 * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT or 75 * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT,
76 * %NL80211_ATTR_KEY_THRESHOLD. 76 * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD.
77 * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, 77 * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
78 * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC and %NL80211_ATTR_KEY_CIPHER 78 * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC and %NL80211_ATTR_KEY_CIPHER
79 * attributes. 79 * attributes.
@@ -84,7 +84,7 @@
84 * %NL80222_CMD_NEW_BEACON message) 84 * %NL80222_CMD_NEW_BEACON message)
85 * @NL80211_CMD_SET_BEACON: set the beacon on an access point interface 85 * @NL80211_CMD_SET_BEACON: set the beacon on an access point interface
86 * using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD, 86 * using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD,
87 * %NL80211_BEACON_HEAD and %NL80211_BEACON_TAIL attributes. 87 * %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL attributes.
88 * @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface, 88 * @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface,
89 * parameters are like for %NL80211_CMD_SET_BEACON. 89 * parameters are like for %NL80211_CMD_SET_BEACON.
90 * @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it 90 * @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it
@@ -113,6 +113,8 @@
113 * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by 113 * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
114 * %NL80211_ATTR_IFINDEX. 114 * %NL80211_ATTR_IFINDEX.
115 * 115 *
116 * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set
117 * regulatory domain.
116 * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command 118 * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
117 * after being queried by the kernel. CRDA replies by sending a regulatory 119 * after being queried by the kernel. CRDA replies by sending a regulatory
118 * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our 120 * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
@@ -133,6 +135,21 @@
133 * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the 135 * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the
134 * interface identified by %NL80211_ATTR_IFINDEX 136 * interface identified by %NL80211_ATTR_IFINDEX
135 * 137 *
138 * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The
139 * interface is identified with %NL80211_ATTR_IFINDEX and the management
140 * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be
141 * added to the end of the specified management frame is specified with
142 * %NL80211_ATTR_IE. If the command succeeds, the requested data will be
143 * added to all specified management frames generated by
144 * kernel/firmware/driver.
145 *
146 * @NL80211_CMD_GET_SCAN: get scan results
147 * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
148 * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
149 * NL80211_CMD_GET_SCAN and on the "scan" multicast group)
150 * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
151 * partial scan results may be available
152 *
136 * @NL80211_CMD_MAX: highest used command number 153 * @NL80211_CMD_MAX: highest used command number
137 * @__NL80211_CMD_AFTER_LAST: internal use 154 * @__NL80211_CMD_AFTER_LAST: internal use
138 */ 155 */
@@ -178,6 +195,15 @@ enum nl80211_commands {
178 NL80211_CMD_GET_MESH_PARAMS, 195 NL80211_CMD_GET_MESH_PARAMS,
179 NL80211_CMD_SET_MESH_PARAMS, 196 NL80211_CMD_SET_MESH_PARAMS,
180 197
198 NL80211_CMD_SET_MGMT_EXTRA_IE,
199
200 NL80211_CMD_GET_REG,
201
202 NL80211_CMD_GET_SCAN,
203 NL80211_CMD_TRIGGER_SCAN,
204 NL80211_CMD_NEW_SCAN_RESULTS,
205 NL80211_CMD_SCAN_ABORTED,
206
181 /* add new commands above here */ 207 /* add new commands above here */
182 208
183 /* used to define NL80211_CMD_MAX below */ 209 /* used to define NL80211_CMD_MAX below */
@@ -190,6 +216,7 @@ enum nl80211_commands {
190 * here 216 * here
191 */ 217 */
192#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS 218#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
219#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE
193 220
194/** 221/**
195 * enum nl80211_attrs - nl80211 netlink attributes 222 * enum nl80211_attrs - nl80211 netlink attributes
@@ -284,6 +311,24 @@ enum nl80211_commands {
284 * supported interface types, each a flag attribute with the number 311 * supported interface types, each a flag attribute with the number
285 * of the interface mode. 312 * of the interface mode.
286 * 313 *
314 * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for
315 * %NL80211_CMD_SET_MGMT_EXTRA_IE.
316 *
317 * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with
318 * %NL80211_CMD_SET_MGMT_EXTRA_IE).
319 *
320 * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with
321 * a single scan request, a wiphy attribute.
322 *
323 * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz)
324 * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive
325 * scanning and include a zero-length SSID (wildcard) for wildcard scan
326 * @NL80211_ATTR_SCAN_GENERATION: the scan generation increases whenever the
327 * scan result list changes (BSS expired or added) so that applications
328 * can verify that they got a single, consistent snapshot (when all dump
329 * messages carried the same generation number)
330 * @NL80211_ATTR_BSS: scan result BSS
331 *
287 * @NL80211_ATTR_MAX: highest attribute number currently defined 332 * @NL80211_ATTR_MAX: highest attribute number currently defined
288 * @__NL80211_ATTR_AFTER_LAST: internal use 333 * @__NL80211_ATTR_AFTER_LAST: internal use
289 */ 334 */
@@ -346,6 +391,18 @@ enum nl80211_attrs {
346 NL80211_ATTR_WIPHY_FREQ, 391 NL80211_ATTR_WIPHY_FREQ,
347 NL80211_ATTR_WIPHY_CHANNEL_TYPE, 392 NL80211_ATTR_WIPHY_CHANNEL_TYPE,
348 393
394 NL80211_ATTR_KEY_DEFAULT_MGMT,
395
396 NL80211_ATTR_MGMT_SUBTYPE,
397 NL80211_ATTR_IE,
398
399 NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
400
401 NL80211_ATTR_SCAN_FREQUENCIES,
402 NL80211_ATTR_SCAN_SSIDS,
403 NL80211_ATTR_SCAN_GENERATION,
404 NL80211_ATTR_BSS,
405
349 /* add attributes here, update the policy in nl80211.c */ 406 /* add attributes here, update the policy in nl80211.c */
350 407
351 __NL80211_ATTR_AFTER_LAST, 408 __NL80211_ATTR_AFTER_LAST,
@@ -360,7 +417,9 @@ enum nl80211_attrs {
360#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES 417#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES
361#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS 418#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS
362#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ 419#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ
363#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET 420#define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE
421#define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE
422#define NL80211_ATTR_IE NL80211_ATTR_IE
364 423
365#define NL80211_MAX_SUPP_RATES 32 424#define NL80211_MAX_SUPP_RATES 32
366#define NL80211_MAX_SUPP_REG_RULES 32 425#define NL80211_MAX_SUPP_REG_RULES 32
@@ -412,12 +471,14 @@ enum nl80211_iftype {
412 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames 471 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames
413 * with short barker preamble 472 * with short barker preamble
414 * @NL80211_STA_FLAG_WME: station is WME/QoS capable 473 * @NL80211_STA_FLAG_WME: station is WME/QoS capable
474 * @NL80211_STA_FLAG_MFP: station uses management frame protection
415 */ 475 */
416enum nl80211_sta_flags { 476enum nl80211_sta_flags {
417 __NL80211_STA_FLAG_INVALID, 477 __NL80211_STA_FLAG_INVALID,
418 NL80211_STA_FLAG_AUTHORIZED, 478 NL80211_STA_FLAG_AUTHORIZED,
419 NL80211_STA_FLAG_SHORT_PREAMBLE, 479 NL80211_STA_FLAG_SHORT_PREAMBLE,
420 NL80211_STA_FLAG_WME, 480 NL80211_STA_FLAG_WME,
481 NL80211_STA_FLAG_MFP,
421 482
422 /* keep last */ 483 /* keep last */
423 __NL80211_STA_FLAG_AFTER_LAST, 484 __NL80211_STA_FLAG_AFTER_LAST,
@@ -811,4 +872,38 @@ enum nl80211_channel_type {
811 NL80211_CHAN_HT40MINUS, 872 NL80211_CHAN_HT40MINUS,
812 NL80211_CHAN_HT40PLUS 873 NL80211_CHAN_HT40PLUS
813}; 874};
875
876/**
877 * enum nl80211_bss - netlink attributes for a BSS
878 *
879 * @__NL80211_BSS_INVALID: invalid
880 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
881 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
882 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
883 * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
884 * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
885 * raw information elements from the probe response/beacon (bin)
886 * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
887 * in mBm (100 * dBm) (s32)
888 * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
889 * in unspecified units, scaled to 0..100 (u8)
890 * @__NL80211_BSS_AFTER_LAST: internal
891 * @NL80211_BSS_MAX: highest BSS attribute
892 */
893enum nl80211_bss {
894 __NL80211_BSS_INVALID,
895 NL80211_BSS_BSSID,
896 NL80211_BSS_FREQUENCY,
897 NL80211_BSS_TSF,
898 NL80211_BSS_BEACON_INTERVAL,
899 NL80211_BSS_CAPABILITY,
900 NL80211_BSS_INFORMATION_ELEMENTS,
901 NL80211_BSS_SIGNAL_MBM,
902 NL80211_BSS_SIGNAL_UNSPEC,
903
904 /* keep last */
905 __NL80211_BSS_AFTER_LAST,
906 NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1
907};
908
814#endif /* __LINUX_NL80211_H */ 909#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 52a9fe08451..966e0233299 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -834,6 +834,8 @@
834#define PCI_DEVICE_ID_PROMISE_20276 0x5275 834#define PCI_DEVICE_ID_PROMISE_20276 0x5275
835#define PCI_DEVICE_ID_PROMISE_20277 0x7275 835#define PCI_DEVICE_ID_PROMISE_20277 0x7275
836 836
837#define PCI_VENDOR_ID_FOXCONN 0x105b
838
837#define PCI_VENDOR_ID_UMC 0x1060 839#define PCI_VENDOR_ID_UMC 0x1060
838#define PCI_DEVICE_ID_UMC_UM8673F 0x0101 840#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
839#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a 841#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
@@ -1969,6 +1971,8 @@
1969 1971
1970#define PCI_VENDOR_ID_SAMSUNG 0x144d 1972#define PCI_VENDOR_ID_SAMSUNG 0x144d
1971 1973
1974#define PCI_VENDOR_ID_AMBIT 0x1468
1975
1972#define PCI_VENDOR_ID_MYRICOM 0x14c1 1976#define PCI_VENDOR_ID_MYRICOM 0x14c1
1973 1977
1974#define PCI_VENDOR_ID_TITAN 0x14D2 1978#define PCI_VENDOR_ID_TITAN 0x14D2
@@ -2213,6 +2217,8 @@
2213 2217
2214#define PCI_VENDOR_ID_TOPSPIN 0x1867 2218#define PCI_VENDOR_ID_TOPSPIN 0x1867
2215 2219
2220#define PCI_VENDOR_ID_SILAN 0x1904
2221
2216#define PCI_VENDOR_ID_TDI 0x192E 2222#define PCI_VENDOR_ID_TDI 0x192E
2217#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2223#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2218 2224
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 1ba0661561a..252bf6644c5 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -4,14 +4,14 @@
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6/* Message types - V1 */ 6/* Message types - V1 */
7#define PIM_V1_VERSION __constant_htonl(0x10000000) 7#define PIM_V1_VERSION cpu_to_be32(0x10000000)
8#define PIM_V1_REGISTER 1 8#define PIM_V1_REGISTER 1
9 9
10/* Message types - V2 */ 10/* Message types - V2 */
11#define PIM_VERSION 2 11#define PIM_VERSION 2
12#define PIM_REGISTER 1 12#define PIM_REGISTER 1
13 13
14#define PIM_NULL_REGISTER __constant_htonl(0x40000000) 14#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
15 15
16/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ 16/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
17struct pimreghdr 17struct pimreghdr
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index a942892d6df..9d64bdf1477 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -22,6 +22,7 @@
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/poll.h> 24#include <linux/poll.h>
25#include <net/net_namespace.h>
25 26
26struct ppp_channel; 27struct ppp_channel;
27 28
@@ -56,6 +57,9 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *);
56 that we may have missed a packet. */ 57 that we may have missed a packet. */
57extern void ppp_input_error(struct ppp_channel *, int code); 58extern void ppp_input_error(struct ppp_channel *, int code);
58 59
60/* Attach a channel to a given PPP unit in specified net. */
61extern int ppp_register_net_channel(struct net *, struct ppp_channel *);
62
59/* Attach a channel to a given PPP unit. */ 63/* Attach a channel to a given PPP unit. */
60extern int ppp_register_channel(struct ppp_channel *); 64extern int ppp_register_channel(struct ppp_channel *);
61 65
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 8ba1c320f97..bd50b371ffa 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -172,35 +172,35 @@ typedef struct sctp_paramhdr {
172typedef enum { 172typedef enum {
173 173
174 /* RFC 2960 Section 3.3.5 */ 174 /* RFC 2960 Section 3.3.5 */
175 SCTP_PARAM_HEARTBEAT_INFO = __constant_htons(1), 175 SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1),
176 /* RFC 2960 Section 3.3.2.1 */ 176 /* RFC 2960 Section 3.3.2.1 */
177 SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5), 177 SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5),
178 SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6), 178 SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6),
179 SCTP_PARAM_STATE_COOKIE = __constant_htons(7), 179 SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7),
180 SCTP_PARAM_UNRECOGNIZED_PARAMETERS = __constant_htons(8), 180 SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8),
181 SCTP_PARAM_COOKIE_PRESERVATIVE = __constant_htons(9), 181 SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9),
182 SCTP_PARAM_HOST_NAME_ADDRESS = __constant_htons(11), 182 SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11),
183 SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12), 183 SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12),
184 SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000), 184 SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000),
185 185
186 /* AUTH Extension Section 3 */ 186 /* AUTH Extension Section 3 */
187 SCTP_PARAM_RANDOM = __constant_htons(0x8002), 187 SCTP_PARAM_RANDOM = cpu_to_be16(0x8002),
188 SCTP_PARAM_CHUNKS = __constant_htons(0x8003), 188 SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003),
189 SCTP_PARAM_HMAC_ALGO = __constant_htons(0x8004), 189 SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004),
190 190
191 /* Add-IP: Supported Extensions, Section 4.2 */ 191 /* Add-IP: Supported Extensions, Section 4.2 */
192 SCTP_PARAM_SUPPORTED_EXT = __constant_htons(0x8008), 192 SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008),
193 193
194 /* PR-SCTP Sec 3.1 */ 194 /* PR-SCTP Sec 3.1 */
195 SCTP_PARAM_FWD_TSN_SUPPORT = __constant_htons(0xc000), 195 SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000),
196 196
197 /* Add-IP Extension. Section 3.2 */ 197 /* Add-IP Extension. Section 3.2 */
198 SCTP_PARAM_ADD_IP = __constant_htons(0xc001), 198 SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001),
199 SCTP_PARAM_DEL_IP = __constant_htons(0xc002), 199 SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002),
200 SCTP_PARAM_ERR_CAUSE = __constant_htons(0xc003), 200 SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003),
201 SCTP_PARAM_SET_PRIMARY = __constant_htons(0xc004), 201 SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004),
202 SCTP_PARAM_SUCCESS_REPORT = __constant_htons(0xc005), 202 SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
203 SCTP_PARAM_ADAPTATION_LAYER_IND = __constant_htons(0xc006), 203 SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
204 204
205} sctp_param_t; /* enum */ 205} sctp_param_t; /* enum */
206 206
@@ -212,13 +212,13 @@ typedef enum {
212 * 212 *
213 */ 213 */
214typedef enum { 214typedef enum {
215 SCTP_PARAM_ACTION_DISCARD = __constant_htons(0x0000), 215 SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000),
216 SCTP_PARAM_ACTION_DISCARD_ERR = __constant_htons(0x4000), 216 SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000),
217 SCTP_PARAM_ACTION_SKIP = __constant_htons(0x8000), 217 SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000),
218 SCTP_PARAM_ACTION_SKIP_ERR = __constant_htons(0xc000), 218 SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000),
219} sctp_param_action_t; 219} sctp_param_action_t;
220 220
221enum { SCTP_PARAM_ACTION_MASK = __constant_htons(0xc000), }; 221enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), };
222 222
223/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ 223/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
224 224
@@ -457,17 +457,17 @@ typedef struct sctp_operr_chunk {
457 */ 457 */
458typedef enum { 458typedef enum {
459 459
460 SCTP_ERROR_NO_ERROR = __constant_htons(0x00), 460 SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
461 SCTP_ERROR_INV_STRM = __constant_htons(0x01), 461 SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
462 SCTP_ERROR_MISS_PARAM = __constant_htons(0x02), 462 SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02),
463 SCTP_ERROR_STALE_COOKIE = __constant_htons(0x03), 463 SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03),
464 SCTP_ERROR_NO_RESOURCE = __constant_htons(0x04), 464 SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04),
465 SCTP_ERROR_DNS_FAILED = __constant_htons(0x05), 465 SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05),
466 SCTP_ERROR_UNKNOWN_CHUNK = __constant_htons(0x06), 466 SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06),
467 SCTP_ERROR_INV_PARAM = __constant_htons(0x07), 467 SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07),
468 SCTP_ERROR_UNKNOWN_PARAM = __constant_htons(0x08), 468 SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08),
469 SCTP_ERROR_NO_DATA = __constant_htons(0x09), 469 SCTP_ERROR_NO_DATA = cpu_to_be16(0x09),
470 SCTP_ERROR_COOKIE_IN_SHUTDOWN = __constant_htons(0x0a), 470 SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a),
471 471
472 472
473 /* SCTP Implementation Guide: 473 /* SCTP Implementation Guide:
@@ -476,9 +476,9 @@ typedef enum {
476 * 13 Protocol Violation 476 * 13 Protocol Violation
477 */ 477 */
478 478
479 SCTP_ERROR_RESTART = __constant_htons(0x0b), 479 SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
480 SCTP_ERROR_USER_ABORT = __constant_htons(0x0c), 480 SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
481 SCTP_ERROR_PROTO_VIOLATION = __constant_htons(0x0d), 481 SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
482 482
483 /* ADDIP Section 3.3 New Error Causes 483 /* ADDIP Section 3.3 New Error Causes
484 * 484 *
@@ -493,11 +493,11 @@ typedef enum {
493 * 0x0103 Association Aborted due to illegal ASCONF-ACK 493 * 0x0103 Association Aborted due to illegal ASCONF-ACK
494 * 0x0104 Request refused - no authorization. 494 * 0x0104 Request refused - no authorization.
495 */ 495 */
496 SCTP_ERROR_DEL_LAST_IP = __constant_htons(0x0100), 496 SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x0100),
497 SCTP_ERROR_RSRC_LOW = __constant_htons(0x0101), 497 SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x0101),
498 SCTP_ERROR_DEL_SRC_IP = __constant_htons(0x0102), 498 SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x0102),
499 SCTP_ERROR_ASCONF_ACK = __constant_htons(0x0103), 499 SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x0103),
500 SCTP_ERROR_REQ_REFUSED = __constant_htons(0x0104), 500 SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x0104),
501 501
502 /* AUTH Section 4. New Error Cause 502 /* AUTH Section 4. New Error Cause
503 * 503 *
@@ -509,7 +509,7 @@ typedef enum {
509 * -------------------------------------------------------------- 509 * --------------------------------------------------------------
510 * 0x0105 Unsupported HMAC Identifier 510 * 0x0105 Unsupported HMAC Identifier
511 */ 511 */
512 SCTP_ERROR_UNSUP_HMAC = __constant_htons(0x0105) 512 SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
513} sctp_error_t; 513} sctp_error_t;
514 514
515 515
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cf2cb50f77d..92470084458 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,9 +29,6 @@
29#include <linux/dmaengine.h> 29#include <linux/dmaengine.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31 31
32#define HAVE_ALLOC_SKB /* For the drivers to know */
33#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
34
35/* Don't change this without changing skb_csum_unnecessary! */ 32/* Don't change this without changing skb_csum_unnecessary! */
36#define CHECKSUM_NONE 0 33#define CHECKSUM_NONE 0
37#define CHECKSUM_UNNECESSARY 1 34#define CHECKSUM_UNNECESSARY 1
@@ -1287,7 +1284,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1287 * The networking layer reserves some headroom in skb data (via 1284 * The networking layer reserves some headroom in skb data (via
1288 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1285 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1289 * the header has to grow. In the default case, if the header has to grow 1286 * the header has to grow. In the default case, if the header has to grow
1290 * 16 bytes or less we avoid the reallocation. 1287 * 32 bytes or less we avoid the reallocation.
1291 * 1288 *
1292 * Unfortunately this headroom changes the DMA alignment of the resulting 1289 * Unfortunately this headroom changes the DMA alignment of the resulting
1293 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1290 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
@@ -1295,11 +1292,11 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1295 * perhaps setting it to a cacheline in size (since that will maintain 1292 * perhaps setting it to a cacheline in size (since that will maintain
1296 * cacheline alignment of the DMA). It must be a power of 2. 1293 * cacheline alignment of the DMA). It must be a power of 2.
1297 * 1294 *
1298 * Various parts of the networking layer expect at least 16 bytes of 1295 * Various parts of the networking layer expect at least 32 bytes of
1299 * headroom, you should not reduce this. 1296 * headroom, you should not reduce this.
1300 */ 1297 */
1301#ifndef NET_SKB_PAD 1298#ifndef NET_SKB_PAD
1302#define NET_SKB_PAD 16 1299#define NET_SKB_PAD 32
1303#endif 1300#endif
1304 1301
1305extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1302extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -1687,8 +1684,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1687 int shiftlen); 1684 int shiftlen);
1688 1685
1689extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1686extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1690extern int skb_gro_receive(struct sk_buff **head,
1691 struct sk_buff *skb);
1692 1687
1693static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1688static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1694 int len, void *buffer) 1689 int len, void *buffer)
@@ -1904,6 +1899,21 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
1904 to->queue_mapping = from->queue_mapping; 1899 to->queue_mapping = from->queue_mapping;
1905} 1900}
1906 1901
1902static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
1903{
1904 skb->queue_mapping = rx_queue + 1;
1905}
1906
1907static inline u16 skb_get_rx_queue(struct sk_buff *skb)
1908{
1909 return skb->queue_mapping - 1;
1910}
1911
1912static inline bool skb_rx_queue_recorded(struct sk_buff *skb)
1913{
1914 return (skb->queue_mapping != 0);
1915}
1916
1907#ifdef CONFIG_XFRM 1917#ifdef CONFIG_XFRM
1908static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 1918static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
1909{ 1919{
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
index 1cbf0313add..b32725075d7 100644
--- a/include/linux/smsc911x.h
+++ b/include/linux/smsc911x.h
@@ -43,5 +43,8 @@ struct smsc911x_platform_config {
43/* Constants for flags */ 43/* Constants for flags */
44#define SMSC911X_USE_16BIT (BIT(0)) 44#define SMSC911X_USE_16BIT (BIT(0))
45#define SMSC911X_USE_32BIT (BIT(1)) 45#define SMSC911X_USE_32BIT (BIT(1))
46#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2))
47#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3))
48#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4))
46 49
47#endif /* __LINUX_SMSC911X_H__ */ 50#endif /* __LINUX_SMSC911X_H__ */
diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h
new file mode 100644
index 00000000000..79506f5f9e6
--- /dev/null
+++ b/include/linux/spi/libertas_spi.h
@@ -0,0 +1,32 @@
1/*
2 * board-specific data for the libertas_spi driver.
3 *
4 * Copyright 2008 Analog Devices Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11#ifndef _LIBERTAS_SPI_H_
12#define _LIBERTAS_SPI_H_
13
14struct spi_device;
15
16struct libertas_spi_platform_data {
17 /* There are two ways to read data from the WLAN module's SPI
18 * interface. Setting 0 or 1 here controls which one is used.
19 *
20 * Usually you want to set use_dummy_writes = 1.
21 * However, if that doesn't work or if you are using a slow SPI clock
22 * speed, you may want to use 0 here. */
23 u16 use_dummy_writes;
24
25 /* GPIO number to use as chip select */
26 u16 gpio_cs;
27
28 /* Board specific setup/teardown */
29 int (*setup)(struct spi_device *spi);
30 int (*teardown)(struct spi_device *spi);
31};
32#endif
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 7d7e03dcf77..d3b1d18922f 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -181,6 +181,16 @@
181#define SSB_CHIPCO_PROG_WAITCNT 0x0124 181#define SSB_CHIPCO_PROG_WAITCNT 0x0124
182#define SSB_CHIPCO_FLASH_CFG 0x0128 182#define SSB_CHIPCO_FLASH_CFG 0x0128
183#define SSB_CHIPCO_FLASH_WAITCNT 0x012C 183#define SSB_CHIPCO_FLASH_WAITCNT 0x012C
184#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */
185#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */
186#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */
187#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */
188#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
189#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
190#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
191#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00010000 /* HT available */
192#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00020000 /* APL available */
193#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
184#define SSB_CHIPCO_UART0_DATA 0x0300 194#define SSB_CHIPCO_UART0_DATA 0x0300
185#define SSB_CHIPCO_UART0_IMR 0x0304 195#define SSB_CHIPCO_UART0_IMR 0x0304
186#define SSB_CHIPCO_UART0_FCR 0x0308 196#define SSB_CHIPCO_UART0_FCR 0x0308
@@ -197,6 +207,196 @@
197#define SSB_CHIPCO_UART1_LSR 0x0414 207#define SSB_CHIPCO_UART1_LSR 0x0414
198#define SSB_CHIPCO_UART1_MSR 0x0418 208#define SSB_CHIPCO_UART1_MSR 0x0418
199#define SSB_CHIPCO_UART1_SCRATCH 0x041C 209#define SSB_CHIPCO_UART1_SCRATCH 0x041C
210/* PMU registers (rev >= 20) */
211#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
212#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
213#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
214#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
215#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
216#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
217#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */
218#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2
219#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */
220#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */
221#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */
222#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */
223#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */
224#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */
225#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */
226#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */
227#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */
228#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */
229#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */
230#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */
231#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */
232#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */
233#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */
234#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */
235#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */
236#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */
237#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */
238#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */
239#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */
240#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
241#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */
242#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */
243#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650
244#define SSB_CHIPCO_CHIPCTL_DATA 0x0654
245#define SSB_CHIPCO_REGCTL_ADDR 0x0658
246#define SSB_CHIPCO_REGCTL_DATA 0x065C
247#define SSB_CHIPCO_PLLCTL_ADDR 0x0660
248#define SSB_CHIPCO_PLLCTL_DATA 0x0664
249
250
251
252/** PMU PLL registers */
253
254/* PMU rev 0 PLL registers */
255#define SSB_PMU0_PLLCTL0 0
256#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001
257#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */
258#define SSB_PMU0_PLLCTL1 1
259#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */
260#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28
261#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */
262#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8
263#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */
264#define SSB_PMU0_PLLCTL2 2
265#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */
266#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0
267
268/* PMU rev 1 PLL registers */
269#define SSB_PMU1_PLLCTL0 0
270#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */
271#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20
272#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */
273#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24
274#define SSB_PMU1_PLLCTL1 1
275#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */
276#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0
277#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */
278#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8
279#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */
280#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16
281#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */
282#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24
283#define SSB_PMU1_PLLCTL2 2
284#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */
285#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0
286#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */
287#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8
288#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */
289#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17
290#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */
291#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20
292#define SSB_PMU1_PLLCTL3 3
293#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */
294#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0
295#define SSB_PMU1_PLLCTL4 4
296#define SSB_PMU1_PLLCTL5 5
297#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */
298#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8
299
300/* BCM4312 PLL resource numbers. */
301#define SSB_PMURES_4312_SWITCHER_BURST 0
302#define SSB_PMURES_4312_SWITCHER_PWM 1
303#define SSB_PMURES_4312_PA_REF_LDO 2
304#define SSB_PMURES_4312_CORE_LDO_BURST 3
305#define SSB_PMURES_4312_CORE_LDO_PWM 4
306#define SSB_PMURES_4312_RADIO_LDO 5
307#define SSB_PMURES_4312_ILP_REQUEST 6
308#define SSB_PMURES_4312_BG_FILTBYP 7
309#define SSB_PMURES_4312_TX_FILTBYP 8
310#define SSB_PMURES_4312_RX_FILTBYP 9
311#define SSB_PMURES_4312_XTAL_PU 10
312#define SSB_PMURES_4312_ALP_AVAIL 11
313#define SSB_PMURES_4312_BB_PLL_FILTBYP 12
314#define SSB_PMURES_4312_RF_PLL_FILTBYP 13
315#define SSB_PMURES_4312_HT_AVAIL 14
316
317/* BCM4325 PLL resource numbers. */
318#define SSB_PMURES_4325_BUCK_BOOST_BURST 0
319#define SSB_PMURES_4325_CBUCK_BURST 1
320#define SSB_PMURES_4325_CBUCK_PWM 2
321#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3
322#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4
323#define SSB_PMURES_4325_BUCK_BOOST_PWM 5
324#define SSB_PMURES_4325_ILP_REQUEST 6
325#define SSB_PMURES_4325_ABUCK_BURST 7
326#define SSB_PMURES_4325_ABUCK_PWM 8
327#define SSB_PMURES_4325_LNLDO1_PU 9
328#define SSB_PMURES_4325_LNLDO2_PU 10
329#define SSB_PMURES_4325_LNLDO3_PU 11
330#define SSB_PMURES_4325_LNLDO4_PU 12
331#define SSB_PMURES_4325_XTAL_PU 13
332#define SSB_PMURES_4325_ALP_AVAIL 14
333#define SSB_PMURES_4325_RX_PWRSW_PU 15
334#define SSB_PMURES_4325_TX_PWRSW_PU 16
335#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17
336#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18
337#define SSB_PMURES_4325_AFE_PWRSW_PU 19
338#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20
339#define SSB_PMURES_4325_HT_AVAIL 21
340
341/* BCM4328 PLL resource numbers. */
342#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0
343#define SSB_PMURES_4328_BB_SWITCHER_PWM 1
344#define SSB_PMURES_4328_BB_SWITCHER_BURST 2
345#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3
346#define SSB_PMURES_4328_ILP_REQUEST 4
347#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5
348#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6
349#define SSB_PMURES_4328_ROM_SWITCH 7
350#define SSB_PMURES_4328_PA_REF_LDO 8
351#define SSB_PMURES_4328_RADIO_LDO 9
352#define SSB_PMURES_4328_AFE_LDO 10
353#define SSB_PMURES_4328_PLL_LDO 11
354#define SSB_PMURES_4328_BG_FILTBYP 12
355#define SSB_PMURES_4328_TX_FILTBYP 13
356#define SSB_PMURES_4328_RX_FILTBYP 14
357#define SSB_PMURES_4328_XTAL_PU 15
358#define SSB_PMURES_4328_XTAL_EN 16
359#define SSB_PMURES_4328_BB_PLL_FILTBYP 17
360#define SSB_PMURES_4328_RF_PLL_FILTBYP 18
361#define SSB_PMURES_4328_BB_PLL_PU 19
362
363/* BCM5354 PLL resource numbers. */
364#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0
365#define SSB_PMURES_5354_BB_SWITCHER_PWM 1
366#define SSB_PMURES_5354_BB_SWITCHER_BURST 2
367#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3
368#define SSB_PMURES_5354_ILP_REQUEST 4
369#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5
370#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6
371#define SSB_PMURES_5354_ROM_SWITCH 7
372#define SSB_PMURES_5354_PA_REF_LDO 8
373#define SSB_PMURES_5354_RADIO_LDO 9
374#define SSB_PMURES_5354_AFE_LDO 10
375#define SSB_PMURES_5354_PLL_LDO 11
376#define SSB_PMURES_5354_BG_FILTBYP 12
377#define SSB_PMURES_5354_TX_FILTBYP 13
378#define SSB_PMURES_5354_RX_FILTBYP 14
379#define SSB_PMURES_5354_XTAL_PU 15
380#define SSB_PMURES_5354_XTAL_EN 16
381#define SSB_PMURES_5354_BB_PLL_FILTBYP 17
382#define SSB_PMURES_5354_RF_PLL_FILTBYP 18
383#define SSB_PMURES_5354_BB_PLL_PU 19
384
385
386
387/** Chip specific Chip-Status register contents. */
388#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
389#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
390#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
391#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
392#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
393#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004
394#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2
395#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008
396#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3
397#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0
398#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
399#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
200 400
201 401
202 402
@@ -353,11 +553,20 @@
353struct ssb_device; 553struct ssb_device;
354struct ssb_serial_port; 554struct ssb_serial_port;
355 555
556/* Data for the PMU, if available.
557 * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU)
558 */
559struct ssb_chipcommon_pmu {
560 u8 rev; /* PMU revision */
561 u32 crystalfreq; /* The active crystal frequency (in kHz) */
562};
563
356struct ssb_chipcommon { 564struct ssb_chipcommon {
357 struct ssb_device *dev; 565 struct ssb_device *dev;
358 u32 capabilities; 566 u32 capabilities;
359 /* Fast Powerup Delay constant */ 567 /* Fast Powerup Delay constant */
360 u16 fast_pwrup_delay; 568 u16 fast_pwrup_delay;
569 struct ssb_chipcommon_pmu pmu;
361}; 570};
362 571
363static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) 572static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
@@ -365,6 +574,17 @@ static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
365 return (cc->dev != NULL); 574 return (cc->dev != NULL);
366} 575}
367 576
577/* Register access */
578#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset)
579#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val)
580
581#define chipco_mask32(cc, offset, mask) \
582 chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask))
583#define chipco_set32(cc, offset, set) \
584 chipco_write32(cc, offset, chipco_read32(cc, offset) | (set))
585#define chipco_maskset32(cc, offset, mask, set) \
586 chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set))
587
368extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); 588extern void ssb_chipcommon_init(struct ssb_chipcommon *cc);
369 589
370extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); 590extern void ssb_chipco_suspend(struct ssb_chipcommon *cc);
@@ -406,4 +626,8 @@ extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
406 struct ssb_serial_port *ports); 626 struct ssb_serial_port *ports);
407#endif /* CONFIG_SSB_SERIAL */ 627#endif /* CONFIG_SSB_SERIAL */
408 628
629/* PMU support */
630extern void ssb_pmu_init(struct ssb_chipcommon *cc);
631
632
409#endif /* LINUX_SSB_CHIPCO_H_ */ 633#endif /* LINUX_SSB_CHIPCO_H_ */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 99a0f991e85..a01b982b578 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -326,6 +326,42 @@
326#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ 326#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
327#define SSB_SPROM5_GPIOB_P3_SHIFT 8 327#define SSB_SPROM5_GPIOB_P3_SHIFT 8
328 328
329/* SPROM Revision 8 */
330#define SSB_SPROM8_BFLLO 0x1084 /* Boardflags (low 16 bits) */
331#define SSB_SPROM8_BFLHI 0x1086 /* Boardflags Hi */
332#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */
333#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */
334#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/
335#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
336#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
337#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
338#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
339#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */
340#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */
341#define SSB_SPROM8_AGAIN0_SHIFT 0
342#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */
343#define SSB_SPROM8_AGAIN1_SHIFT 8
344#define SSB_SPROM8_AGAIN23 0x10A0
345#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */
346#define SSB_SPROM8_AGAIN2_SHIFT 0
347#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
348#define SSB_SPROM8_AGAIN3_SHIFT 8
349#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */
350#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
351#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
352#define SSB_SPROM8_GPIOA_P1_SHIFT 8
353#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */
354#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
355#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
356#define SSB_SPROM8_GPIOB_P3_SHIFT 8
357#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power BG in path 1 */
358#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */
359#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
360#define SSB_SPROM8_ITSSI_BG_SHIFT 8
361#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power A in path 1 */
362#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power A */
363#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
364#define SSB_SPROM8_ITSSI_A_SHIFT 8
329 365
330/* Values for SSB_SPROM1_BINF_CCODE */ 366/* Values for SSB_SPROM1_BINF_CCODE */
331enum { 367enum {
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 39d471d1163..e76d3b22a46 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -490,6 +490,7 @@ enum
490 NET_IPV4_CONF_ARP_IGNORE=19, 490 NET_IPV4_CONF_ARP_IGNORE=19,
491 NET_IPV4_CONF_PROMOTE_SECONDARIES=20, 491 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
492 NET_IPV4_CONF_ARP_ACCEPT=21, 492 NET_IPV4_CONF_ARP_ACCEPT=21,
493 NET_IPV4_CONF_ARP_NOTIFY=22,
493 __NET_IPV4_CONF_MAX 494 __NET_IPV4_CONF_MAX
494}; 495};
495 496
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fe77e1499ab..0cd99e6baca 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -69,16 +69,16 @@ union tcp_word_hdr {
69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
70 70
71enum { 71enum {
72 TCP_FLAG_CWR = __constant_htonl(0x00800000), 72 TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
73 TCP_FLAG_ECE = __constant_htonl(0x00400000), 73 TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
74 TCP_FLAG_URG = __constant_htonl(0x00200000), 74 TCP_FLAG_URG = __cpu_to_be32(0x00200000),
75 TCP_FLAG_ACK = __constant_htonl(0x00100000), 75 TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
76 TCP_FLAG_PSH = __constant_htonl(0x00080000), 76 TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
77 TCP_FLAG_RST = __constant_htonl(0x00040000), 77 TCP_FLAG_RST = __cpu_to_be32(0x00040000),
78 TCP_FLAG_SYN = __constant_htonl(0x00020000), 78 TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
79 TCP_FLAG_FIN = __constant_htonl(0x00010000), 79 TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
80 TCP_RESERVED_BITS = __constant_htonl(0x0F000000), 80 TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
81 TCP_DATA_OFFSET = __constant_htonl(0xF0000000) 81 TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
82}; 82};
83 83
84/* TCP socket options */ 84/* TCP socket options */
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 0a6e6d4b929..37836b937d9 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -49,48 +49,45 @@ struct rndis_msg_hdr {
49 */ 49 */
50#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) 50#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
51 51
52 52#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000)
53#define ccpu2 __constant_cpu_to_le32
54
55#define RNDIS_MSG_COMPLETION ccpu2(0x80000000)
56 53
57/* codes for "msg_type" field of rndis messages; 54/* codes for "msg_type" field of rndis messages;
58 * only the data channel uses packet messages (maybe batched); 55 * only the data channel uses packet messages (maybe batched);
59 * everything else goes on the control channel. 56 * everything else goes on the control channel.
60 */ 57 */
61#define RNDIS_MSG_PACKET ccpu2(0x00000001) /* 1-N packets */ 58#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */
62#define RNDIS_MSG_INIT ccpu2(0x00000002) 59#define RNDIS_MSG_INIT cpu_to_le32(0x00000002)
63#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) 60#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
64#define RNDIS_MSG_HALT ccpu2(0x00000003) 61#define RNDIS_MSG_HALT cpu_to_le32(0x00000003)
65#define RNDIS_MSG_QUERY ccpu2(0x00000004) 62#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004)
66#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) 63#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
67#define RNDIS_MSG_SET ccpu2(0x00000005) 64#define RNDIS_MSG_SET cpu_to_le32(0x00000005)
68#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) 65#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
69#define RNDIS_MSG_RESET ccpu2(0x00000006) 66#define RNDIS_MSG_RESET cpu_to_le32(0x00000006)
70#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) 67#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
71#define RNDIS_MSG_INDICATE ccpu2(0x00000007) 68#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007)
72#define RNDIS_MSG_KEEPALIVE ccpu2(0x00000008) 69#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008)
73#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) 70#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
74 71
75/* codes for "status" field of completion messages */ 72/* codes for "status" field of completion messages */
76#define RNDIS_STATUS_SUCCESS ccpu2(0x00000000) 73#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000)
77#define RNDIS_STATUS_FAILURE ccpu2(0xc0000001) 74#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001)
78#define RNDIS_STATUS_INVALID_DATA ccpu2(0xc0010015) 75#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015)
79#define RNDIS_STATUS_NOT_SUPPORTED ccpu2(0xc00000bb) 76#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb)
80#define RNDIS_STATUS_MEDIA_CONNECT ccpu2(0x4001000b) 77#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b)
81#define RNDIS_STATUS_MEDIA_DISCONNECT ccpu2(0x4001000c) 78#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c)
82 79
83/* codes for OID_GEN_PHYSICAL_MEDIUM */ 80/* codes for OID_GEN_PHYSICAL_MEDIUM */
84#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED ccpu2(0x00000000) 81#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000)
85#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN ccpu2(0x00000001) 82#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001)
86#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM ccpu2(0x00000002) 83#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002)
87#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE ccpu2(0x00000003) 84#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003)
88#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE ccpu2(0x00000004) 85#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004)
89#define RNDIS_PHYSICAL_MEDIUM_DSL ccpu2(0x00000005) 86#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005)
90#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL ccpu2(0x00000006) 87#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006)
91#define RNDIS_PHYSICAL_MEDIUM_1394 ccpu2(0x00000007) 88#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007)
92#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN ccpu2(0x00000008) 89#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008)
93#define RNDIS_PHYSICAL_MEDIUM_MAX ccpu2(0x00000009) 90#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009)
94 91
95struct rndis_data_hdr { 92struct rndis_data_hdr {
96 __le32 msg_type; /* RNDIS_MSG_PACKET */ 93 __le32 msg_type; /* RNDIS_MSG_PACKET */
@@ -228,24 +225,24 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
228 * there are gobs more that may optionally be supported. We'll avoid as much 225 * there are gobs more that may optionally be supported. We'll avoid as much
229 * of that mess as possible. 226 * of that mess as possible.
230 */ 227 */
231#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101) 228#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101)
232#define OID_GEN_MAXIMUM_FRAME_SIZE ccpu2(0x00010106) 229#define OID_GEN_MAXIMUM_FRAME_SIZE cpu_to_le32(0x00010106)
233#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e) 230#define OID_GEN_CURRENT_PACKET_FILTER cpu_to_le32(0x0001010e)
234#define OID_GEN_PHYSICAL_MEDIUM ccpu2(0x00010202) 231#define OID_GEN_PHYSICAL_MEDIUM cpu_to_le32(0x00010202)
235 232
236/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ 233/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
237#define RNDIS_PACKET_TYPE_DIRECTED ccpu2(0x00000001) 234#define RNDIS_PACKET_TYPE_DIRECTED cpu_to_le32(0x00000001)
238#define RNDIS_PACKET_TYPE_MULTICAST ccpu2(0x00000002) 235#define RNDIS_PACKET_TYPE_MULTICAST cpu_to_le32(0x00000002)
239#define RNDIS_PACKET_TYPE_ALL_MULTICAST ccpu2(0x00000004) 236#define RNDIS_PACKET_TYPE_ALL_MULTICAST cpu_to_le32(0x00000004)
240#define RNDIS_PACKET_TYPE_BROADCAST ccpu2(0x00000008) 237#define RNDIS_PACKET_TYPE_BROADCAST cpu_to_le32(0x00000008)
241#define RNDIS_PACKET_TYPE_SOURCE_ROUTING ccpu2(0x00000010) 238#define RNDIS_PACKET_TYPE_SOURCE_ROUTING cpu_to_le32(0x00000010)
242#define RNDIS_PACKET_TYPE_PROMISCUOUS ccpu2(0x00000020) 239#define RNDIS_PACKET_TYPE_PROMISCUOUS cpu_to_le32(0x00000020)
243#define RNDIS_PACKET_TYPE_SMT ccpu2(0x00000040) 240#define RNDIS_PACKET_TYPE_SMT cpu_to_le32(0x00000040)
244#define RNDIS_PACKET_TYPE_ALL_LOCAL ccpu2(0x00000080) 241#define RNDIS_PACKET_TYPE_ALL_LOCAL cpu_to_le32(0x00000080)
245#define RNDIS_PACKET_TYPE_GROUP ccpu2(0x00001000) 242#define RNDIS_PACKET_TYPE_GROUP cpu_to_le32(0x00001000)
246#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL ccpu2(0x00002000) 243#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL cpu_to_le32(0x00002000)
247#define RNDIS_PACKET_TYPE_FUNCTIONAL ccpu2(0x00004000) 244#define RNDIS_PACKET_TYPE_FUNCTIONAL cpu_to_le32(0x00004000)
248#define RNDIS_PACKET_TYPE_MAC_FRAME ccpu2(0x00008000) 245#define RNDIS_PACKET_TYPE_MAC_FRAME cpu_to_le32(0x00008000)
249 246
250/* default filter used with RNDIS devices */ 247/* default filter used with RNDIS devices */
251#define RNDIS_DEFAULT_FILTER ( \ 248#define RNDIS_DEFAULT_FILTER ( \
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 3efa86c3ecb..242348bb376 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -22,11 +22,19 @@
22#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ 22#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
23#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ 23#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
24#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ 24#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
25#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
26#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
27#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
28#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
29
30#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
25 31
26struct virtio_net_config 32struct virtio_net_config
27{ 33{
28 /* The config defining mac address (if VIRTIO_NET_F_MAC) */ 34 /* The config defining mac address (if VIRTIO_NET_F_MAC) */
29 __u8 mac[6]; 35 __u8 mac[6];
36 /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
37 __u16 status;
30} __attribute__((packed)); 38} __attribute__((packed));
31 39
32/* This is the first element of the scatter-gather list. If you don't 40/* This is the first element of the scatter-gather list. If you don't
@@ -54,4 +62,67 @@ struct virtio_net_hdr_mrg_rxbuf {
54 __u16 num_buffers; /* Number of merged rx buffers */ 62 __u16 num_buffers; /* Number of merged rx buffers */
55}; 63};
56 64
65/*
66 * Control virtqueue data structures
67 *
68 * The control virtqueue expects a header in the first sg entry
69 * and an ack/status response in the last entry. Data for the
70 * command goes in between.
71 */
72struct virtio_net_ctrl_hdr {
73 __u8 class;
74 __u8 cmd;
75} __attribute__((packed));
76
77typedef __u8 virtio_net_ctrl_ack;
78
79#define VIRTIO_NET_OK 0
80#define VIRTIO_NET_ERR 1
81
82/*
83 * Control the RX mode, ie. promisucous and allmulti. PROMISC and
84 * ALLMULTI commands require an "out" sg entry containing a 1 byte
85 * state value, zero = disable, non-zero = enable. These commands
86 * are supported with the VIRTIO_NET_F_CTRL_RX feature.
87 */
88#define VIRTIO_NET_CTRL_RX 0
89 #define VIRTIO_NET_CTRL_RX_PROMISC 0
90 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
91
92/*
93 * Control the MAC filter table.
94 *
95 * The MAC filter table is managed by the hypervisor, the guest should
96 * assume the size is infinite. Filtering should be considered
97 * non-perfect, ie. based on hypervisor resources, the guest may
98 * received packets from sources not specified in the filter list.
99 *
100 * In addition to the class/cmd header, the TABLE_SET command requires
101 * two out scatterlists. Each contains a 4 byte count of entries followed
102 * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
103 * first sg list contains unicast addresses, the second is for multicast.
104 * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
105 * is available.
106 */
107struct virtio_net_ctrl_mac {
108 __u32 entries;
109 __u8 macs[][ETH_ALEN];
110} __attribute__((packed));
111
112#define VIRTIO_NET_CTRL_MAC 1
113 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
114
115/*
116 * Control VLAN filtering
117 *
118 * The VLAN filter table is controlled via a simple ADD/DEL interface.
119 * VLAN IDs not added may be filterd by the hypervisor. Del is the
120 * opposite of add. Both commands expect an out entry containing a 2
121 * byte VLAN ID. VLAN filterting is available with the
122 * VIRTIO_NET_F_CTRL_VLAN feature bit.
123 */
124#define VIRTIO_NET_CTRL_VLAN 2
125 #define VIRTIO_NET_CTRL_VLAN_ADD 0
126 #define VIRTIO_NET_CTRL_VLAN_DEL 1
127
57#endif /* _LINUX_VIRTIO_NET_H */ 128#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index d7958f9b52c..cb24204851f 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -577,18 +577,22 @@
577#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8 577#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8
578#define IW_AUTH_ROAMING_CONTROL 9 578#define IW_AUTH_ROAMING_CONTROL 9
579#define IW_AUTH_PRIVACY_INVOKED 10 579#define IW_AUTH_PRIVACY_INVOKED 10
580#define IW_AUTH_CIPHER_GROUP_MGMT 11
581#define IW_AUTH_MFP 12
580 582
581/* IW_AUTH_WPA_VERSION values (bit field) */ 583/* IW_AUTH_WPA_VERSION values (bit field) */
582#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001 584#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001
583#define IW_AUTH_WPA_VERSION_WPA 0x00000002 585#define IW_AUTH_WPA_VERSION_WPA 0x00000002
584#define IW_AUTH_WPA_VERSION_WPA2 0x00000004 586#define IW_AUTH_WPA_VERSION_WPA2 0x00000004
585 587
586/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */ 588/* IW_AUTH_PAIRWISE_CIPHER, IW_AUTH_GROUP_CIPHER, and IW_AUTH_CIPHER_GROUP_MGMT
589 * values (bit field) */
587#define IW_AUTH_CIPHER_NONE 0x00000001 590#define IW_AUTH_CIPHER_NONE 0x00000001
588#define IW_AUTH_CIPHER_WEP40 0x00000002 591#define IW_AUTH_CIPHER_WEP40 0x00000002
589#define IW_AUTH_CIPHER_TKIP 0x00000004 592#define IW_AUTH_CIPHER_TKIP 0x00000004
590#define IW_AUTH_CIPHER_CCMP 0x00000008 593#define IW_AUTH_CIPHER_CCMP 0x00000008
591#define IW_AUTH_CIPHER_WEP104 0x00000010 594#define IW_AUTH_CIPHER_WEP104 0x00000010
595#define IW_AUTH_CIPHER_AES_CMAC 0x00000020
592 596
593/* IW_AUTH_KEY_MGMT values (bit field) */ 597/* IW_AUTH_KEY_MGMT values (bit field) */
594#define IW_AUTH_KEY_MGMT_802_1X 1 598#define IW_AUTH_KEY_MGMT_802_1X 1
@@ -604,6 +608,11 @@
604#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming 608#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming
605 * control */ 609 * control */
606 610
611/* IW_AUTH_MFP (management frame protection) values */
612#define IW_AUTH_MFP_DISABLED 0 /* MFP disabled */
613#define IW_AUTH_MFP_OPTIONAL 1 /* MFP optional */
614#define IW_AUTH_MFP_REQUIRED 2 /* MFP required */
615
607/* SIOCSIWENCODEEXT definitions */ 616/* SIOCSIWENCODEEXT definitions */
608#define IW_ENCODE_SEQ_MAX_SIZE 8 617#define IW_ENCODE_SEQ_MAX_SIZE 8
609/* struct iw_encode_ext ->alg */ 618/* struct iw_encode_ext ->alg */
@@ -612,6 +621,7 @@
612#define IW_ENCODE_ALG_TKIP 2 621#define IW_ENCODE_ALG_TKIP 2
613#define IW_ENCODE_ALG_CCMP 3 622#define IW_ENCODE_ALG_CCMP 3
614#define IW_ENCODE_ALG_PMK 4 623#define IW_ENCODE_ALG_PMK 4
624#define IW_ENCODE_ALG_AES_CMAC 5
615/* struct iw_encode_ext ->ext_flags */ 625/* struct iw_encode_ext ->ext_flags */
616#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 626#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
617#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 627#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index b5a51a7bb36..467c531b8a7 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -50,7 +50,6 @@ struct atmarp_entry {
50struct clip_priv { 50struct clip_priv {
51 int number; /* for convenience ... */ 51 int number; /* for convenience ... */
52 spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ 52 spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
53 struct net_device_stats stats;
54 struct net_device *next; /* next CLIP interface */ 53 struct net_device *next; /* next CLIP interface */
55}; 54};
56 55
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 23c0ab74ded..c0d1f5b708c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4,6 +4,10 @@
4#include <linux/netlink.h> 4#include <linux/netlink.h>
5#include <linux/skbuff.h> 5#include <linux/skbuff.h>
6#include <linux/nl80211.h> 6#include <linux/nl80211.h>
7#include <linux/if_ether.h>
8#include <linux/ieee80211.h>
9#include <linux/wireless.h>
10#include <net/iw_handler.h>
7#include <net/genetlink.h> 11#include <net/genetlink.h>
8/* remove once we remove the wext stuff */ 12/* remove once we remove the wext stuff */
9#include <net/iw_handler.h> 13#include <net/iw_handler.h>
@@ -112,12 +116,14 @@ struct beacon_parameters {
112 * @STATION_FLAG_SHORT_PREAMBLE: station is capable of receiving frames 116 * @STATION_FLAG_SHORT_PREAMBLE: station is capable of receiving frames
113 * with short preambles 117 * with short preambles
114 * @STATION_FLAG_WME: station is WME/QoS capable 118 * @STATION_FLAG_WME: station is WME/QoS capable
119 * @STATION_FLAG_MFP: station uses management frame protection
115 */ 120 */
116enum station_flags { 121enum station_flags {
117 STATION_FLAG_CHANGED = 1<<0, 122 STATION_FLAG_CHANGED = 1<<0,
118 STATION_FLAG_AUTHORIZED = 1<<NL80211_STA_FLAG_AUTHORIZED, 123 STATION_FLAG_AUTHORIZED = 1<<NL80211_STA_FLAG_AUTHORIZED,
119 STATION_FLAG_SHORT_PREAMBLE = 1<<NL80211_STA_FLAG_SHORT_PREAMBLE, 124 STATION_FLAG_SHORT_PREAMBLE = 1<<NL80211_STA_FLAG_SHORT_PREAMBLE,
120 STATION_FLAG_WME = 1<<NL80211_STA_FLAG_WME, 125 STATION_FLAG_WME = 1<<NL80211_STA_FLAG_WME,
126 STATION_FLAG_MFP = 1<<NL80211_STA_FLAG_MFP,
121}; 127};
122 128
123/** 129/**
@@ -355,6 +361,51 @@ enum reg_set_by {
355 REGDOM_SET_BY_COUNTRY_IE, 361 REGDOM_SET_BY_COUNTRY_IE,
356}; 362};
357 363
364/**
365 * enum environment_cap - Environment parsed from country IE
366 * @ENVIRON_ANY: indicates country IE applies to both indoor and
367 * outdoor operation.
368 * @ENVIRON_INDOOR: indicates country IE applies only to indoor operation
369 * @ENVIRON_OUTDOOR: indicates country IE applies only to outdoor operation
370 */
371enum environment_cap {
372 ENVIRON_ANY,
373 ENVIRON_INDOOR,
374 ENVIRON_OUTDOOR,
375};
376
377/**
378 * struct regulatory_request - receipt of last regulatory request
379 *
380 * @wiphy: this is set if this request's initiator is
381 * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This
382 * can be used by the wireless core to deal with conflicts
383 * and potentially inform users of which devices specifically
384 * cased the conflicts.
385 * @initiator: indicates who sent this request, could be any of
386 * of those set in reg_set_by, %REGDOM_SET_BY_*
387 * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
388 * regulatory domain. We have a few special codes:
389 * 00 - World regulatory domain
390 * 99 - built by driver but a specific alpha2 cannot be determined
391 * 98 - result of an intersection between two regulatory domains
392 * @intersect: indicates whether the wireless core should intersect
393 * the requested regulatory domain with the presently set regulatory
394 * domain.
395 * @country_ie_checksum: checksum of the last processed and accepted
396 * country IE
397 * @country_ie_env: lets us know if the AP is telling us we are outdoor,
398 * indoor, or if it doesn't matter
399 */
400struct regulatory_request {
401 struct wiphy *wiphy;
402 enum reg_set_by initiator;
403 char alpha2[2];
404 bool intersect;
405 u32 country_ie_checksum;
406 enum environment_cap country_ie_env;
407};
408
358struct ieee80211_freq_range { 409struct ieee80211_freq_range {
359 u32 start_freq_khz; 410 u32 start_freq_khz;
360 u32 end_freq_khz; 411 u32 end_freq_khz;
@@ -431,6 +482,26 @@ struct ieee80211_txq_params {
431 u8 aifs; 482 u8 aifs;
432}; 483};
433 484
485/**
486 * struct mgmt_extra_ie_params - Extra management frame IE parameters
487 *
488 * Used to add extra IE(s) into management frames. If the driver cannot add the
489 * requested data into all management frames of the specified subtype that are
490 * generated in kernel or firmware/hardware, it must reject the configuration
491 * call. The IE data buffer is added to the end of the specified management
492 * frame body after all other IEs. This addition is not applied to frames that
493 * are injected through a monitor interface.
494 *
495 * @subtype: Management frame subtype
496 * @ies: IE data buffer or %NULL to remove previous data
497 * @ies_len: Length of @ies in octets
498 */
499struct mgmt_extra_ie_params {
500 u8 subtype;
501 u8 *ies;
502 int ies_len;
503};
504
434/* from net/wireless.h */ 505/* from net/wireless.h */
435struct wiphy; 506struct wiphy;
436 507
@@ -438,6 +509,85 @@ struct wiphy;
438struct ieee80211_channel; 509struct ieee80211_channel;
439 510
440/** 511/**
512 * struct cfg80211_ssid - SSID description
513 * @ssid: the SSID
514 * @ssid_len: length of the ssid
515 */
516struct cfg80211_ssid {
517 u8 ssid[IEEE80211_MAX_SSID_LEN];
518 u8 ssid_len;
519};
520
521/**
522 * struct cfg80211_scan_request - scan request description
523 *
524 * @ssids: SSIDs to scan for (active scan only)
525 * @n_ssids: number of SSIDs
526 * @channels: channels to scan on.
527 * @n_channels: number of channels for each band
528 * @wiphy: the wiphy this was for
529 * @ifidx: the interface index
530 */
531struct cfg80211_scan_request {
532 struct cfg80211_ssid *ssids;
533 int n_ssids;
534 struct ieee80211_channel **channels;
535 u32 n_channels;
536
537 /* internal */
538 struct wiphy *wiphy;
539 int ifidx;
540};
541
542/**
543 * enum cfg80211_signal_type - signal type
544 *
545 * @CFG80211_SIGNAL_TYPE_NONE: no signal strength information available
546 * @CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm)
547 * @CFG80211_SIGNAL_TYPE_UNSPEC: signal strength, increasing from 0 through 100
548 */
549enum cfg80211_signal_type {
550 CFG80211_SIGNAL_TYPE_NONE,
551 CFG80211_SIGNAL_TYPE_MBM,
552 CFG80211_SIGNAL_TYPE_UNSPEC,
553};
554
555/**
556 * struct cfg80211_bss - BSS description
557 *
558 * This structure describes a BSS (which may also be a mesh network)
559 * for use in scan results and similar.
560 *
561 * @bssid: BSSID of the BSS
562 * @tsf: timestamp of last received update
563 * @beacon_interval: the beacon interval as from the frame
564 * @capability: the capability field in host byte order
565 * @information_elements: the information elements (Note that there
566 * is no guarantee that these are well-formed!)
567 * @len_information_elements: total length of the information elements
568 * @signal: signal strength value
569 * @signal_type: signal type
570 * @free_priv: function pointer to free private data
571 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
572 */
573struct cfg80211_bss {
574 struct ieee80211_channel *channel;
575
576 u8 bssid[ETH_ALEN];
577 u64 tsf;
578 u16 beacon_interval;
579 u16 capability;
580 u8 *information_elements;
581 size_t len_information_elements;
582
583 s32 signal;
584 enum cfg80211_signal_type signal_type;
585
586 void (*free_priv)(struct cfg80211_bss *bss);
587 u8 priv[0] __attribute__((__aligned__(sizeof(void *))));
588};
589
590/**
441 * struct cfg80211_ops - backend description for wireless configuration 591 * struct cfg80211_ops - backend description for wireless configuration
442 * 592 *
443 * This struct is registered by fullmac card drivers and/or wireless stacks 593 * This struct is registered by fullmac card drivers and/or wireless stacks
@@ -450,6 +600,9 @@ struct ieee80211_channel;
450 * wireless extensions but this is subject to reevaluation as soon as this 600 * wireless extensions but this is subject to reevaluation as soon as this
451 * code is used more widely and we have a first user without wext. 601 * code is used more widely and we have a first user without wext.
452 * 602 *
603 * @suspend: wiphy device needs to be suspended
604 * @resume: wiphy device needs to be resumed
605 *
453 * @add_virtual_intf: create a new virtual interface with the given name, 606 * @add_virtual_intf: create a new virtual interface with the given name,
454 * must set the struct wireless_dev's iftype. 607 * must set the struct wireless_dev's iftype.
455 * 608 *
@@ -471,6 +624,8 @@ struct ieee80211_channel;
471 * 624 *
472 * @set_default_key: set the default key on an interface 625 * @set_default_key: set the default key on an interface
473 * 626 *
627 * @set_default_mgmt_key: set the default management frame key on an interface
628 *
474 * @add_beacon: Add a beacon with given parameters, @head, @interval 629 * @add_beacon: Add a beacon with given parameters, @head, @interval
475 * and @dtim_period will be valid, @tail is optional. 630 * and @dtim_period will be valid, @tail is optional.
476 * @set_beacon: Change the beacon parameters for an access point mode 631 * @set_beacon: Change the beacon parameters for an access point mode
@@ -497,8 +652,18 @@ struct ieee80211_channel;
497 * @set_txq_params: Set TX queue parameters 652 * @set_txq_params: Set TX queue parameters
498 * 653 *
499 * @set_channel: Set channel 654 * @set_channel: Set channel
655 *
656 * @set_mgmt_extra_ie: Set extra IE data for management frames
657 *
658 * @scan: Request to do a scan. If returning zero, the scan request is given
659 * the driver, and will be valid until passed to cfg80211_scan_done().
660 * For scan results, call cfg80211_inform_bss(); you can call this outside
661 * the scan/scan_done bracket too.
500 */ 662 */
501struct cfg80211_ops { 663struct cfg80211_ops {
664 int (*suspend)(struct wiphy *wiphy);
665 int (*resume)(struct wiphy *wiphy);
666
502 int (*add_virtual_intf)(struct wiphy *wiphy, char *name, 667 int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
503 enum nl80211_iftype type, u32 *flags, 668 enum nl80211_iftype type, u32 *flags,
504 struct vif_params *params); 669 struct vif_params *params);
@@ -518,6 +683,9 @@ struct cfg80211_ops {
518 int (*set_default_key)(struct wiphy *wiphy, 683 int (*set_default_key)(struct wiphy *wiphy,
519 struct net_device *netdev, 684 struct net_device *netdev,
520 u8 key_index); 685 u8 key_index);
686 int (*set_default_mgmt_key)(struct wiphy *wiphy,
687 struct net_device *netdev,
688 u8 key_index);
521 689
522 int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev, 690 int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev,
523 struct beacon_parameters *info); 691 struct beacon_parameters *info);
@@ -564,6 +732,13 @@ struct cfg80211_ops {
564 int (*set_channel)(struct wiphy *wiphy, 732 int (*set_channel)(struct wiphy *wiphy,
565 struct ieee80211_channel *chan, 733 struct ieee80211_channel *chan,
566 enum nl80211_channel_type channel_type); 734 enum nl80211_channel_type channel_type);
735
736 int (*set_mgmt_extra_ie)(struct wiphy *wiphy,
737 struct net_device *dev,
738 struct mgmt_extra_ie_params *params);
739
740 int (*scan)(struct wiphy *wiphy, struct net_device *dev,
741 struct cfg80211_scan_request *request);
567}; 742};
568 743
569/* temporary wext handlers */ 744/* temporary wext handlers */
@@ -574,5 +749,68 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
574 u32 *mode, char *extra); 749 u32 *mode, char *extra);
575int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, 750int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
576 u32 *mode, char *extra); 751 u32 *mode, char *extra);
752int cfg80211_wext_siwscan(struct net_device *dev,
753 struct iw_request_info *info,
754 union iwreq_data *wrqu, char *extra);
755int cfg80211_wext_giwscan(struct net_device *dev,
756 struct iw_request_info *info,
757 struct iw_point *data, char *extra);
758
759/**
760 * cfg80211_scan_done - notify that scan finished
761 *
762 * @request: the corresponding scan request
763 * @aborted: set to true if the scan was aborted for any reason,
764 * userspace will be notified of that
765 */
766void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted);
767
768/**
769 * cfg80211_inform_bss - inform cfg80211 of a new BSS
770 *
771 * @wiphy: the wiphy reporting the BSS
772 * @bss: the found BSS
773 * @gfp: context flags
774 *
775 * This informs cfg80211 that BSS information was found and
776 * the BSS should be updated/added.
777 */
778struct cfg80211_bss*
779cfg80211_inform_bss_frame(struct wiphy *wiphy,
780 struct ieee80211_channel *channel,
781 struct ieee80211_mgmt *mgmt, size_t len,
782 s32 signal, enum cfg80211_signal_type sigtype,
783 gfp_t gfp);
784
785struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
786 struct ieee80211_channel *channel,
787 const u8 *bssid,
788 const u8 *ssid, size_t ssid_len,
789 u16 capa_mask, u16 capa_val);
790static inline struct cfg80211_bss *
791cfg80211_get_ibss(struct wiphy *wiphy,
792 struct ieee80211_channel *channel,
793 const u8 *ssid, size_t ssid_len)
794{
795 return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
796 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
797}
798
799struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
800 struct ieee80211_channel *channel,
801 const u8 *meshid, size_t meshidlen,
802 const u8 *meshcfg);
803void cfg80211_put_bss(struct cfg80211_bss *bss);
804/**
805 * cfg80211_unlink_bss - unlink BSS from internal data structures
806 * @wiphy: the wiphy
807 * @bss: the bss to remove
808 *
809 * This function removes the given BSS from the internal data structures
810 * thereby making it no longer show up in scan results etc. Use this
811 * function when you detect a BSS is gone. Normally BSSes will also time
812 * out, so it is not necessary to use this function at all.
813 */
814void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
577 815
578#endif /* __NET_CFG80211_H */ 816#endif /* __NET_CFG80211_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 7040a782c65..9b5d08f4f6e 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -113,12 +113,12 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
113static inline int INET_ECN_set_ce(struct sk_buff *skb) 113static inline int INET_ECN_set_ce(struct sk_buff *skb)
114{ 114{
115 switch (skb->protocol) { 115 switch (skb->protocol) {
116 case __constant_htons(ETH_P_IP): 116 case cpu_to_be16(ETH_P_IP):
117 if (skb->network_header + sizeof(struct iphdr) <= skb->tail) 117 if (skb->network_header + sizeof(struct iphdr) <= skb->tail)
118 return IP_ECN_set_ce(ip_hdr(skb)); 118 return IP_ECN_set_ce(ip_hdr(skb));
119 break; 119 break;
120 120
121 case __constant_htons(ETH_P_IPV6): 121 case cpu_to_be16(ETH_P_IPV6):
122 if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail) 122 if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail)
123 return IP6_ECN_set_ce(ipv6_hdr(skb)); 123 return IP6_ECN_set_ce(ipv6_hdr(skb));
124 break; 124 break;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index d0a043153cc..a44e2248b2e 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -82,6 +82,7 @@ struct inet_bind_bucket {
82#endif 82#endif
83 unsigned short port; 83 unsigned short port;
84 signed short fastreuse; 84 signed short fastreuse;
85 int num_owners;
85 struct hlist_node node; 86 struct hlist_node node;
86 struct hlist_head owners; 87 struct hlist_head owners;
87}; 88};
@@ -133,7 +134,7 @@ struct inet_hashinfo {
133 struct inet_bind_hashbucket *bhash; 134 struct inet_bind_hashbucket *bhash;
134 135
135 unsigned int bhash_size; 136 unsigned int bhash_size;
136 /* Note : 4 bytes padding on 64 bit arches */ 137 /* 4 bytes hole on 64 bit */
137 138
138 struct kmem_cache *bind_bucket_cachep; 139 struct kmem_cache *bind_bucket_cachep;
139 140
@@ -150,6 +151,7 @@ struct inet_hashinfo {
150 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] 151 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
151 ____cacheline_aligned_in_smp; 152 ____cacheline_aligned_in_smp;
152 153
154 atomic_t bsockets;
153}; 155};
154 156
155static inline struct inet_ehash_bucket *inet_ehash_bucket( 157static inline struct inet_ehash_bucket *inet_ehash_bucket(
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ab9b003ab67..bbae1e87efc 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -184,8 +184,8 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
184/* 184/*
185 * The port number of FTP service (in network order). 185 * The port number of FTP service (in network order).
186 */ 186 */
187#define FTPPORT __constant_htons(21) 187#define FTPPORT cpu_to_be16(21)
188#define FTPDATA __constant_htons(20) 188#define FTPDATA cpu_to_be16(20)
189 189
190/* 190/*
191 * TCP State Values 191 * TCP State Values
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6d5b58a1c74..c1f16fc49ad 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -196,8 +196,8 @@ struct ip6_flowlabel
196 struct net *fl_net; 196 struct net *fl_net;
197}; 197};
198 198
199#define IPV6_FLOWINFO_MASK __constant_htonl(0x0FFFFFFF) 199#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
200#define IPV6_FLOWLABEL_MASK __constant_htonl(0x000FFFFF) 200#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
201 201
202struct ipv6_fl_socklist 202struct ipv6_fl_socklist
203{ 203{
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 4cc0b4eca94..a14121dd193 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -27,7 +27,7 @@ struct ipx_address {
27 27
28struct ipxhdr { 28struct ipxhdr {
29 __be16 ipx_checksum __attribute__ ((packed)); 29 __be16 ipx_checksum __attribute__ ((packed));
30#define IPX_NO_CHECKSUM __constant_htons(0xFFFF) 30#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF)
31 __be16 ipx_pktsize __attribute__ ((packed)); 31 __be16 ipx_pktsize __attribute__ ((packed));
32 __u8 ipx_tctrl; 32 __u8 ipx_tctrl;
33 __u8 ipx_type; 33 __u8 ipx_type;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 559422fc094..88fa3e03e3e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -207,7 +207,7 @@ struct ieee80211_bss_conf {
207 u16 beacon_int; 207 u16 beacon_int;
208 u16 assoc_capability; 208 u16 assoc_capability;
209 u64 timestamp; 209 u64 timestamp;
210 u64 basic_rates; 210 u32 basic_rates;
211 struct ieee80211_bss_ht_conf ht; 211 struct ieee80211_bss_ht_conf ht;
212}; 212};
213 213
@@ -262,6 +262,26 @@ enum mac80211_tx_control_flags {
262 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), 262 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
263}; 263};
264 264
265/**
266 * enum mac80211_rate_control_flags - per-rate flags set by the
267 * Rate Control algorithm.
268 *
269 * These flags are set by the Rate control algorithm for each rate during tx,
270 * in the @flags member of struct ieee80211_tx_rate.
271 *
272 * @IEEE80211_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate.
273 * @IEEE80211_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required.
274 * This is set if the current BSS requires ERP protection.
275 * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
276 * @IEEE80211_TX_RC_MCS: HT rate.
277 * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in
278 * Greenfield mode.
279 * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz.
280 * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the
281 * adjacent 20 MHz channels, if the current channel type is
282 * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
283 * @IEEE80211_TX_RC_SHORT_GI: Short Guard interval should be used for this rate.
284 */
265enum mac80211_rate_control_flags { 285enum mac80211_rate_control_flags {
266 IEEE80211_TX_RC_USE_RTS_CTS = BIT(0), 286 IEEE80211_TX_RC_USE_RTS_CTS = BIT(0),
267 IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1), 287 IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1),
@@ -507,11 +527,6 @@ static inline int __deprecated __IEEE80211_CONF_SHORT_SLOT_TIME(void)
507} 527}
508#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME()) 528#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME())
509 529
510struct ieee80211_ht_conf {
511 bool enabled;
512 enum nl80211_channel_type channel_type;
513};
514
515/** 530/**
516 * enum ieee80211_conf_changed - denotes which configuration changed 531 * enum ieee80211_conf_changed - denotes which configuration changed
517 * 532 *
@@ -520,10 +535,10 @@ struct ieee80211_ht_conf {
520 * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed 535 * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed
521 * @IEEE80211_CONF_CHANGE_RADIOTAP: the radiotap flag changed 536 * @IEEE80211_CONF_CHANGE_RADIOTAP: the radiotap flag changed
522 * @IEEE80211_CONF_CHANGE_PS: the PS flag changed 537 * @IEEE80211_CONF_CHANGE_PS: the PS flag changed
538 * @IEEE80211_CONF_CHANGE_DYNPS_TIMEOUT: the dynamic PS timeout changed
523 * @IEEE80211_CONF_CHANGE_POWER: the TX power changed 539 * @IEEE80211_CONF_CHANGE_POWER: the TX power changed
524 * @IEEE80211_CONF_CHANGE_CHANNEL: the channel changed 540 * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed
525 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed 541 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
526 * @IEEE80211_CONF_CHANGE_HT: HT configuration changed
527 */ 542 */
528enum ieee80211_conf_changed { 543enum ieee80211_conf_changed {
529 IEEE80211_CONF_CHANGE_RADIO_ENABLED = BIT(0), 544 IEEE80211_CONF_CHANGE_RADIO_ENABLED = BIT(0),
@@ -531,10 +546,10 @@ enum ieee80211_conf_changed {
531 IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2), 546 IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2),
532 IEEE80211_CONF_CHANGE_RADIOTAP = BIT(3), 547 IEEE80211_CONF_CHANGE_RADIOTAP = BIT(3),
533 IEEE80211_CONF_CHANGE_PS = BIT(4), 548 IEEE80211_CONF_CHANGE_PS = BIT(4),
534 IEEE80211_CONF_CHANGE_POWER = BIT(5), 549 IEEE80211_CONF_CHANGE_DYNPS_TIMEOUT = BIT(5),
535 IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), 550 IEEE80211_CONF_CHANGE_POWER = BIT(6),
536 IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), 551 IEEE80211_CONF_CHANGE_CHANNEL = BIT(7),
537 IEEE80211_CONF_CHANGE_HT = BIT(8), 552 IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(8),
538}; 553};
539 554
540/** 555/**
@@ -547,8 +562,9 @@ enum ieee80211_conf_changed {
547 * @listen_interval: listen interval in units of beacon interval 562 * @listen_interval: listen interval in units of beacon interval
548 * @flags: configuration flags defined above 563 * @flags: configuration flags defined above
549 * @power_level: requested transmit power (in dBm) 564 * @power_level: requested transmit power (in dBm)
565 * @dynamic_ps_timeout: dynamic powersave timeout (in ms)
550 * @channel: the channel to tune to 566 * @channel: the channel to tune to
551 * @ht: the HT configuration for the device 567 * @channel_type: the channel (HT) type
552 * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame 568 * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
553 * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, 569 * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
554 * but actually means the number of transmissions not the number of retries 570 * but actually means the number of transmissions not the number of retries
@@ -559,7 +575,7 @@ enum ieee80211_conf_changed {
559struct ieee80211_conf { 575struct ieee80211_conf {
560 int beacon_int; 576 int beacon_int;
561 u32 flags; 577 u32 flags;
562 int power_level; 578 int power_level, dynamic_ps_timeout;
563 579
564 u16 listen_interval; 580 u16 listen_interval;
565 bool radio_enabled; 581 bool radio_enabled;
@@ -567,7 +583,7 @@ struct ieee80211_conf {
567 u8 long_frame_max_tx_count, short_frame_max_tx_count; 583 u8 long_frame_max_tx_count, short_frame_max_tx_count;
568 584
569 struct ieee80211_channel *channel; 585 struct ieee80211_channel *channel;
570 struct ieee80211_ht_conf ht; 586 enum nl80211_channel_type channel_type;
571}; 587};
572 588
573/** 589/**
@@ -630,10 +646,12 @@ struct ieee80211_if_init_conf {
630 * @IEEE80211_IFCC_BSSID: The BSSID changed. 646 * @IEEE80211_IFCC_BSSID: The BSSID changed.
631 * @IEEE80211_IFCC_BEACON: The beacon for this interface changed 647 * @IEEE80211_IFCC_BEACON: The beacon for this interface changed
632 * (currently AP and MESH only), use ieee80211_beacon_get(). 648 * (currently AP and MESH only), use ieee80211_beacon_get().
649 * @IEEE80211_IFCC_BEACON_ENABLED: The enable_beacon value changed.
633 */ 650 */
634enum ieee80211_if_conf_change { 651enum ieee80211_if_conf_change {
635 IEEE80211_IFCC_BSSID = BIT(0), 652 IEEE80211_IFCC_BSSID = BIT(0),
636 IEEE80211_IFCC_BEACON = BIT(1), 653 IEEE80211_IFCC_BEACON = BIT(1),
654 IEEE80211_IFCC_BEACON_ENABLED = BIT(2),
637}; 655};
638 656
639/** 657/**
@@ -641,13 +659,16 @@ enum ieee80211_if_conf_change {
641 * 659 *
642 * @changed: parameters that have changed, see &enum ieee80211_if_conf_change. 660 * @changed: parameters that have changed, see &enum ieee80211_if_conf_change.
643 * @bssid: BSSID of the network we are associated to/creating. 661 * @bssid: BSSID of the network we are associated to/creating.
662 * @enable_beacon: Indicates whether beacons can be sent.
663 * This is valid only for AP/IBSS/MESH modes.
644 * 664 *
645 * This structure is passed to the config_interface() callback of 665 * This structure is passed to the config_interface() callback of
646 * &struct ieee80211_hw. 666 * &struct ieee80211_hw.
647 */ 667 */
648struct ieee80211_if_conf { 668struct ieee80211_if_conf {
649 u32 changed; 669 u32 changed;
650 u8 *bssid; 670 const u8 *bssid;
671 bool enable_beacon;
651}; 672};
652 673
653/** 674/**
@@ -655,11 +676,13 @@ struct ieee80211_if_conf {
655 * @ALG_WEP: WEP40 or WEP104 676 * @ALG_WEP: WEP40 or WEP104
656 * @ALG_TKIP: TKIP 677 * @ALG_TKIP: TKIP
657 * @ALG_CCMP: CCMP (AES) 678 * @ALG_CCMP: CCMP (AES)
679 * @ALG_AES_CMAC: AES-128-CMAC
658 */ 680 */
659enum ieee80211_key_alg { 681enum ieee80211_key_alg {
660 ALG_WEP, 682 ALG_WEP,
661 ALG_TKIP, 683 ALG_TKIP,
662 ALG_CCMP, 684 ALG_CCMP,
685 ALG_AES_CMAC,
663}; 686};
664 687
665/** 688/**
@@ -688,12 +711,16 @@ enum ieee80211_key_len {
688 * generation in software. 711 * generation in software.
689 * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates 712 * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
690 * that the key is pairwise rather then a shared key. 713 * that the key is pairwise rather then a shared key.
714 * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
715 * CCMP key if it requires CCMP encryption of management frames (MFP) to
716 * be done in software.
691 */ 717 */
692enum ieee80211_key_flags { 718enum ieee80211_key_flags {
693 IEEE80211_KEY_FLAG_WMM_STA = 1<<0, 719 IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
694 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, 720 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
695 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 721 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
696 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, 722 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
723 IEEE80211_KEY_FLAG_SW_MGMT = 1<<4,
697}; 724};
698 725
699/** 726/**
@@ -714,8 +741,8 @@ enum ieee80211_key_flags {
714 * - Temporal Encryption Key (128 bits) 741 * - Temporal Encryption Key (128 bits)
715 * - Temporal Authenticator Tx MIC Key (64 bits) 742 * - Temporal Authenticator Tx MIC Key (64 bits)
716 * - Temporal Authenticator Rx MIC Key (64 bits) 743 * - Temporal Authenticator Rx MIC Key (64 bits)
717 * @icv_len: FIXME 744 * @icv_len: The ICV length for this key type
718 * @iv_len: FIXME 745 * @iv_len: The IV length for this key type
719 */ 746 */
720struct ieee80211_key_conf { 747struct ieee80211_key_conf {
721 enum ieee80211_key_alg alg; 748 enum ieee80211_key_alg alg;
@@ -759,7 +786,7 @@ enum set_key_cmd {
759 * sizeof(void *), size is determined in hw information. 786 * sizeof(void *), size is determined in hw information.
760 */ 787 */
761struct ieee80211_sta { 788struct ieee80211_sta {
762 u64 supp_rates[IEEE80211_NUM_BANDS]; 789 u32 supp_rates[IEEE80211_NUM_BANDS];
763 u8 addr[ETH_ALEN]; 790 u8 addr[ETH_ALEN];
764 u16 aid; 791 u16 aid;
765 struct ieee80211_sta_ht_cap ht_cap; 792 struct ieee80211_sta_ht_cap ht_cap;
@@ -833,11 +860,6 @@ enum ieee80211_tkip_key_type {
833 * expect values between 0 and @max_signal. 860 * expect values between 0 and @max_signal.
834 * If possible please provide dB or dBm instead. 861 * If possible please provide dB or dBm instead.
835 * 862 *
836 * @IEEE80211_HW_SIGNAL_DB:
837 * Hardware gives signal values in dB, decibel difference from an
838 * arbitrary, fixed reference. We expect values between 0 and @max_signal.
839 * If possible please provide dBm instead.
840 *
841 * @IEEE80211_HW_SIGNAL_DBM: 863 * @IEEE80211_HW_SIGNAL_DBM:
842 * Hardware gives signal values in dBm, decibel difference from 864 * Hardware gives signal values in dBm, decibel difference from
843 * one milliwatt. This is the preferred method since it is standardized 865 * one milliwatt. This is the preferred method since it is standardized
@@ -854,10 +876,18 @@ enum ieee80211_tkip_key_type {
854 * @IEEE80211_HW_AMPDU_AGGREGATION: 876 * @IEEE80211_HW_AMPDU_AGGREGATION:
855 * Hardware supports 11n A-MPDU aggregation. 877 * Hardware supports 11n A-MPDU aggregation.
856 * 878 *
857 * @IEEE80211_HW_NO_STACK_DYNAMIC_PS: 879 * @IEEE80211_HW_SUPPORTS_PS:
858 * Hardware which has dynamic power save support, meaning 880 * Hardware has power save support (i.e. can go to sleep).
859 * that power save is enabled in idle periods, and don't need support 881 *
860 * from stack. 882 * @IEEE80211_HW_PS_NULLFUNC_STACK:
883 * Hardware requires nullfunc frame handling in stack, implies
884 * stack support for dynamic PS.
885 *
886 * @IEEE80211_HW_SUPPORTS_DYNAMIC_PS:
887 * Hardware has support for dynamic PS.
888 *
889 * @IEEE80211_HW_MFP_CAPABLE:
890 * Hardware supports management frame protection (MFP, IEEE 802.11w).
861 */ 891 */
862enum ieee80211_hw_flags { 892enum ieee80211_hw_flags {
863 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, 893 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
@@ -865,12 +895,14 @@ enum ieee80211_hw_flags {
865 IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, 895 IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
866 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, 896 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
867 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, 897 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
868 IEEE80211_HW_SIGNAL_DB = 1<<6, 898 IEEE80211_HW_SIGNAL_DBM = 1<<6,
869 IEEE80211_HW_SIGNAL_DBM = 1<<7, 899 IEEE80211_HW_NOISE_DBM = 1<<7,
870 IEEE80211_HW_NOISE_DBM = 1<<8, 900 IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
871 IEEE80211_HW_SPECTRUM_MGMT = 1<<9, 901 IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
872 IEEE80211_HW_AMPDU_AGGREGATION = 1<<10, 902 IEEE80211_HW_SUPPORTS_PS = 1<<10,
873 IEEE80211_HW_NO_STACK_DYNAMIC_PS = 1<<11, 903 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
904 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
905 IEEE80211_HW_MFP_CAPABLE = 1<<13,
874}; 906};
875 907
876/** 908/**
@@ -890,9 +922,8 @@ enum ieee80211_hw_flags {
890 * @workqueue: single threaded workqueue available for driver use, 922 * @workqueue: single threaded workqueue available for driver use,
891 * allocated by mac80211 on registration and flushed when an 923 * allocated by mac80211 on registration and flushed when an
892 * interface is removed. 924 * interface is removed.
893 * NOTICE: All work performed on this workqueue should NEVER 925 * NOTICE: All work performed on this workqueue must not
894 * acquire the RTNL lock (i.e. Don't use the function 926 * acquire the RTNL lock.
895 * ieee80211_iterate_active_interfaces())
896 * 927 *
897 * @priv: pointer to private area that was allocated for driver use 928 * @priv: pointer to private area that was allocated for driver use
898 * along with this structure. 929 * along with this structure.
@@ -952,6 +983,19 @@ struct ieee80211_hw {
952}; 983};
953 984
954/** 985/**
986 * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy
987 *
988 * @wiphy: the &struct wiphy which we want to query
989 *
990 * mac80211 drivers can use this to get to their respective
991 * &struct ieee80211_hw. Drivers wishing to get to their own private
992 * structure can then access it via hw->priv. Note that mac802111 drivers should
993 * not use wiphy_priv() to try to get their private driver structure as this
994 * is already used internally by mac80211.
995 */
996struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy);
997
998/**
955 * SET_IEEE80211_DEV - set device for 802.11 hardware 999 * SET_IEEE80211_DEV - set device for 802.11 hardware
956 * 1000 *
957 * @hw: the &struct ieee80211_hw to set the device for 1001 * @hw: the &struct ieee80211_hw to set the device for
@@ -1018,16 +1062,12 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1018 * 1062 *
1019 * The set_key() callback in the &struct ieee80211_ops for a given 1063 * The set_key() callback in the &struct ieee80211_ops for a given
1020 * device is called to enable hardware acceleration of encryption and 1064 * device is called to enable hardware acceleration of encryption and
1021 * decryption. The callback takes an @address parameter that will be 1065 * decryption. The callback takes a @sta parameter that will be NULL
1022 * the broadcast address for default keys, the other station's hardware 1066 * for default keys or keys used for transmission only, or point to
1023 * address for individual keys or the zero address for keys that will 1067 * the station information for the peer for individual keys.
1024 * be used only for transmission.
1025 * Multiple transmission keys with the same key index may be used when 1068 * Multiple transmission keys with the same key index may be used when
1026 * VLANs are configured for an access point. 1069 * VLANs are configured for an access point.
1027 * 1070 *
1028 * The @local_address parameter will always be set to our own address,
1029 * this is only relevant if you support multiple local addresses.
1030 *
1031 * When transmitting, the TX control data will use the @hw_key_idx 1071 * When transmitting, the TX control data will use the @hw_key_idx
1032 * selected by the driver by modifying the &struct ieee80211_key_conf 1072 * selected by the driver by modifying the &struct ieee80211_key_conf
1033 * pointed to by the @key parameter to the set_key() function. 1073 * pointed to by the @key parameter to the set_key() function.
@@ -1061,6 +1101,42 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1061 */ 1101 */
1062 1102
1063/** 1103/**
1104 * DOC: Powersave support
1105 *
1106 * mac80211 has support for various powersave implementations.
1107 *
1108 * First, it can support hardware that handles all powersaving by
1109 * itself, such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS
1110 * hardware flag. In that case, it will be told about the desired
1111 * powersave mode depending on the association status, and the driver
1112 * must take care of sending nullfunc frames when necessary, i.e. when
1113 * entering and leaving powersave mode. The driver is required to look at
1114 * the AID in beacons and signal to the AP that it woke up when it finds
1115 * traffic directed to it. This mode supports dynamic PS by simply
1116 * enabling/disabling PS.
1117 *
1118 * Additionally, such hardware may set the %IEEE80211_HW_SUPPORTS_DYNAMIC_PS
1119 * flag to indicate that it can support dynamic PS mode itself (see below).
1120 *
1121 * Other hardware designs cannot send nullfunc frames by themselves and also
1122 * need software support for parsing the TIM bitmap. This is also supported
1123 * by mac80211 by combining the %IEEE80211_HW_SUPPORTS_PS and
1124 * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still
1125 * required to pass up beacons. Additionally, in this case, mac80211 will
1126 * wake up the hardware when multicast traffic is announced in the beacon.
1127 *
1128 * FIXME: I don't think we can be fast enough in software when we want to
1129 * receive multicast traffic?
1130 *
1131 * Dynamic powersave mode is an extension to normal powersave mode in which
1132 * the hardware stays awake for a user-specified period of time after sending
1133 * a frame so that reply frames need not be buffered and therefore delayed
1134 * to the next wakeup. This can either be supported by hardware, in which case
1135 * the driver needs to look at the @dynamic_ps_timeout hardware configuration
1136 * value, or by the stack if all nullfunc handling is in the stack.
1137 */
1138
1139/**
1064 * DOC: Frame filtering 1140 * DOC: Frame filtering
1065 * 1141 *
1066 * mac80211 requires to see many management frames for proper 1142 * mac80211 requires to see many management frames for proper
@@ -1172,6 +1248,8 @@ enum ieee80211_ampdu_mlme_action {
1172 * configuration in the TX control data. This handler should, 1248 * configuration in the TX control data. This handler should,
1173 * preferably, never fail and stop queues appropriately, more 1249 * preferably, never fail and stop queues appropriately, more
1174 * importantly, however, it must never fail for A-MPDU-queues. 1250 * importantly, however, it must never fail for A-MPDU-queues.
1251 * This function should return NETDEV_TX_OK except in very
1252 * limited cases.
1175 * Must be implemented and atomic. 1253 * Must be implemented and atomic.
1176 * 1254 *
1177 * @start: Called before the first netdevice attached to the hardware 1255 * @start: Called before the first netdevice attached to the hardware
@@ -1212,9 +1290,12 @@ enum ieee80211_ampdu_mlme_action {
1212 * 1290 *
1213 * @config: Handler for configuration requests. IEEE 802.11 code calls this 1291 * @config: Handler for configuration requests. IEEE 802.11 code calls this
1214 * function to change hardware configuration, e.g., channel. 1292 * function to change hardware configuration, e.g., channel.
1293 * This function should never fail but returns a negative error code
1294 * if it does.
1215 * 1295 *
1216 * @config_interface: Handler for configuration requests related to interfaces 1296 * @config_interface: Handler for configuration requests related to interfaces
1217 * (e.g. BSSID changes.) 1297 * (e.g. BSSID changes.)
1298 * Returns a negative error code which will be seen in userspace.
1218 * 1299 *
1219 * @bss_info_changed: Handler for configuration requests related to BSS 1300 * @bss_info_changed: Handler for configuration requests related to BSS
1220 * parameters that may vary during BSS's lifespan, and may affect low 1301 * parameters that may vary during BSS's lifespan, and may affect low
@@ -1232,8 +1313,9 @@ enum ieee80211_ampdu_mlme_action {
1232 * 1313 *
1233 * @set_key: See the section "Hardware crypto acceleration" 1314 * @set_key: See the section "Hardware crypto acceleration"
1234 * This callback can sleep, and is only called between add_interface 1315 * This callback can sleep, and is only called between add_interface
1235 * and remove_interface calls, i.e. while the interface with the 1316 * and remove_interface calls, i.e. while the given virtual interface
1236 * given local_address is enabled. 1317 * is enabled.
1318 * Returns a negative error code if the key can't be added.
1237 * 1319 *
1238 * @update_tkip_key: See the section "Hardware crypto acceleration" 1320 * @update_tkip_key: See the section "Hardware crypto acceleration"
1239 * This callback will be called in the context of Rx. Called for drivers 1321 * This callback will be called in the context of Rx. Called for drivers
@@ -1245,8 +1327,10 @@ enum ieee80211_ampdu_mlme_action {
1245 * bands. When the scan finishes, ieee80211_scan_completed() must be 1327 * bands. When the scan finishes, ieee80211_scan_completed() must be
1246 * called; note that it also must be called when the scan cannot finish 1328 * called; note that it also must be called when the scan cannot finish
1247 * because the hardware is turned off! Anything else is a bug! 1329 * because the hardware is turned off! Anything else is a bug!
1330 * Returns a negative error code which will be seen in userspace.
1248 * 1331 *
1249 * @get_stats: return low-level statistics 1332 * @get_stats: Return low-level statistics.
1333 * Returns zero if statistics are available.
1250 * 1334 *
1251 * @get_tkip_seq: If your device implements TKIP encryption in hardware this 1335 * @get_tkip_seq: If your device implements TKIP encryption in hardware this
1252 * callback should be provided to read the TKIP transmit IVs (both IV32 1336 * callback should be provided to read the TKIP transmit IVs (both IV32
@@ -1260,6 +1344,7 @@ enum ieee80211_ampdu_mlme_action {
1260 * 1344 *
1261 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 1345 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1262 * bursting) for a hardware TX queue. 1346 * bursting) for a hardware TX queue.
1347 * Returns a negative error code on failure.
1263 * 1348 *
1264 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1349 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1265 * to get number of currently queued packets (queue length), maximum queue 1350 * to get number of currently queued packets (queue length), maximum queue
@@ -1268,8 +1353,12 @@ enum ieee80211_ampdu_mlme_action {
1268 * hw->ampdu_queues items. 1353 * hw->ampdu_queues items.
1269 * 1354 *
1270 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, 1355 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
1271 * this is only used for IBSS mode debugging and, as such, is not a 1356 * this is only used for IBSS mode BSSID merging and debugging. Is not a
1272 * required function. Must be atomic. 1357 * required function.
1358 *
1359 * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
1360 * Currently, this is only used for IBSS mode debugging. Is not a
1361 * required function.
1273 * 1362 *
1274 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize 1363 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
1275 * with other STAs in the IBSS. This is only used in IBSS mode. This 1364 * with other STAs in the IBSS. This is only used in IBSS mode. This
@@ -1279,13 +1368,15 @@ enum ieee80211_ampdu_mlme_action {
1279 * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. 1368 * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us.
1280 * This is needed only for IBSS mode and the result of this function is 1369 * This is needed only for IBSS mode and the result of this function is
1281 * used to determine whether to reply to Probe Requests. 1370 * used to determine whether to reply to Probe Requests.
1371 * Returns non-zero if this device sent the last beacon.
1282 * 1372 *
1283 * @ampdu_action: Perform a certain A-MPDU action 1373 * @ampdu_action: Perform a certain A-MPDU action
1284 * The RA/TID combination determines the destination and TID we want 1374 * The RA/TID combination determines the destination and TID we want
1285 * the ampdu action to be performed for. The action is defined through 1375 * the ampdu action to be performed for. The action is defined through
1286 * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) 1376 * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
1287 * is the first frame we expect to perform the action on. notice 1377 * is the first frame we expect to perform the action on. Notice
1288 * that TX/RX_STOP can pass NULL for this parameter. 1378 * that TX/RX_STOP can pass NULL for this parameter.
1379 * Returns a negative error code on failure.
1289 */ 1380 */
1290struct ieee80211_ops { 1381struct ieee80211_ops {
1291 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1382 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1310,12 +1401,13 @@ struct ieee80211_ops {
1310 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 1401 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
1311 bool set); 1402 bool set);
1312 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1403 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1313 const u8 *local_address, const u8 *address, 1404 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1314 struct ieee80211_key_conf *key); 1405 struct ieee80211_key_conf *key);
1315 void (*update_tkip_key)(struct ieee80211_hw *hw, 1406 void (*update_tkip_key)(struct ieee80211_hw *hw,
1316 struct ieee80211_key_conf *conf, const u8 *address, 1407 struct ieee80211_key_conf *conf, const u8 *address,
1317 u32 iv32, u16 *phase1key); 1408 u32 iv32, u16 *phase1key);
1318 int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); 1409 int (*hw_scan)(struct ieee80211_hw *hw,
1410 struct cfg80211_scan_request *req);
1319 int (*get_stats)(struct ieee80211_hw *hw, 1411 int (*get_stats)(struct ieee80211_hw *hw,
1320 struct ieee80211_low_level_stats *stats); 1412 struct ieee80211_low_level_stats *stats);
1321 void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, 1413 void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
@@ -1328,6 +1420,7 @@ struct ieee80211_ops {
1328 int (*get_tx_stats)(struct ieee80211_hw *hw, 1420 int (*get_tx_stats)(struct ieee80211_hw *hw,
1329 struct ieee80211_tx_queue_stats *stats); 1421 struct ieee80211_tx_queue_stats *stats);
1330 u64 (*get_tsf)(struct ieee80211_hw *hw); 1422 u64 (*get_tsf)(struct ieee80211_hw *hw);
1423 void (*set_tsf)(struct ieee80211_hw *hw, u64 tsf);
1331 void (*reset_tsf)(struct ieee80211_hw *hw); 1424 void (*reset_tsf)(struct ieee80211_hw *hw);
1332 int (*tx_last_beacon)(struct ieee80211_hw *hw); 1425 int (*tx_last_beacon)(struct ieee80211_hw *hw);
1333 int (*ampdu_action)(struct ieee80211_hw *hw, 1426 int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -1752,8 +1845,9 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
1752 * mac80211 that the scan finished. 1845 * mac80211 that the scan finished.
1753 * 1846 *
1754 * @hw: the hardware that finished the scan 1847 * @hw: the hardware that finished the scan
1848 * @aborted: set to true if scan was aborted
1755 */ 1849 */
1756void ieee80211_scan_completed(struct ieee80211_hw *hw); 1850void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
1757 1851
1758/** 1852/**
1759 * ieee80211_iterate_active_interfaces - iterate active interfaces 1853 * ieee80211_iterate_active_interfaces - iterate active interfaces
@@ -1962,4 +2056,34 @@ rate_lowest_index(struct ieee80211_supported_band *sband,
1962int ieee80211_rate_control_register(struct rate_control_ops *ops); 2056int ieee80211_rate_control_register(struct rate_control_ops *ops);
1963void ieee80211_rate_control_unregister(struct rate_control_ops *ops); 2057void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
1964 2058
2059static inline bool
2060conf_is_ht20(struct ieee80211_conf *conf)
2061{
2062 return conf->channel_type == NL80211_CHAN_HT20;
2063}
2064
2065static inline bool
2066conf_is_ht40_minus(struct ieee80211_conf *conf)
2067{
2068 return conf->channel_type == NL80211_CHAN_HT40MINUS;
2069}
2070
2071static inline bool
2072conf_is_ht40_plus(struct ieee80211_conf *conf)
2073{
2074 return conf->channel_type == NL80211_CHAN_HT40PLUS;
2075}
2076
2077static inline bool
2078conf_is_ht40(struct ieee80211_conf *conf)
2079{
2080 return conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf);
2081}
2082
2083static inline bool
2084conf_is_ht(struct ieee80211_conf *conf)
2085{
2086 return conf->channel_type != NL80211_CHAN_NO_HT;
2087}
2088
1965#endif /* MAC80211_H */ 2089#endif /* MAC80211_H */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 977f482d97a..2eb3814d625 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -54,5 +54,18 @@ struct netns_ipv4 {
54 54
55 struct timer_list rt_secret_timer; 55 struct timer_list rt_secret_timer;
56 atomic_t rt_genid; 56 atomic_t rt_genid;
57
58#ifdef CONFIG_IP_MROUTE
59 struct sock *mroute_sk;
60 struct mfc_cache **mfc_cache_array;
61 struct vif_device *vif_table;
62 int maxvif;
63 atomic_t cache_resolve_queue_len;
64 int mroute_do_assert;
65 int mroute_do_pim;
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 int mroute_reg_vif_num;
68#endif
69#endif
57}; 70};
58#endif 71#endif
diff --git a/include/net/netrom.h b/include/net/netrom.h
index f06852bba62..15696b1fd30 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -59,10 +59,6 @@ enum {
59#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */ 59#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */
60#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */ 60#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */
61 61
62struct nr_private {
63 struct net_device_stats stats;
64};
65
66struct nr_sock { 62struct nr_sock {
67 struct sock sock; 63 struct sock sock;
68 ax25_address user_addr, source_addr, dest_addr; 64 ax25_address user_addr, source_addr, dest_addr;
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 057b0a8a288..d43f71b5ec0 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -105,7 +105,6 @@ void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
105 105
106int phonet_sysctl_init(void); 106int phonet_sysctl_init(void);
107void phonet_sysctl_exit(void); 107void phonet_sysctl_exit(void);
108void phonet_netlink_register(void);
109int isi_register(void); 108int isi_register(void);
110void isi_unregister(void); 109void isi_unregister(void);
111 110
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index aa1c59a1d33..5054dc5ea2c 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -28,7 +28,7 @@ struct phonet_device_list {
28 spinlock_t lock; 28 spinlock_t lock;
29}; 29};
30 30
31extern struct phonet_device_list pndevs; 31struct phonet_device_list *phonet_device_list(struct net *net);
32 32
33struct phonet_device { 33struct phonet_device {
34 struct list_head list; 34 struct list_head list;
@@ -36,8 +36,9 @@ struct phonet_device {
36 DECLARE_BITMAP(addrs, 64); 36 DECLARE_BITMAP(addrs, 64);
37}; 37};
38 38
39void phonet_device_init(void); 39int phonet_device_init(void);
40void phonet_device_exit(void); 40void phonet_device_exit(void);
41int phonet_netlink_register(void);
41struct net_device *phonet_device_get(struct net *net); 42struct net_device *phonet_device_get(struct net *net);
42 43
43int phonet_address_add(struct net_device *dev, u8 addr); 44int phonet_address_add(struct net_device *dev, u8 addr);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 4082f39f507..e37fe3129c1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -85,6 +85,7 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
85 struct nlattr *tab); 85 struct nlattr *tab);
86extern void qdisc_put_rtab(struct qdisc_rate_table *tab); 86extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
87extern void qdisc_put_stab(struct qdisc_size_table *tab); 87extern void qdisc_put_stab(struct qdisc_size_table *tab);
88extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
88 89
89extern void __qdisc_run(struct Qdisc *q); 90extern void __qdisc_run(struct Qdisc *q);
90 91
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f8c47429044..3d78a4d2246 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -42,9 +42,10 @@ struct Qdisc
42 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 42 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
43 struct sk_buff * (*dequeue)(struct Qdisc *dev); 43 struct sk_buff * (*dequeue)(struct Qdisc *dev);
44 unsigned flags; 44 unsigned flags;
45#define TCQ_F_BUILTIN 1 45#define TCQ_F_BUILTIN 1
46#define TCQ_F_THROTTLED 2 46#define TCQ_F_THROTTLED 2
47#define TCQ_F_INGRESS 4 47#define TCQ_F_INGRESS 4
48#define TCQ_F_WARN_NONWC (1 << 16)
48 int padded; 49 int padded;
49 struct Qdisc_ops *ops; 50 struct Qdisc_ops *ops;
50 struct qdisc_size_table *stab; 51 struct qdisc_size_table *stab;
diff --git a/include/net/sock.h b/include/net/sock.h
index ce3b5b62268..ded6854e3e4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -945,6 +945,11 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
945 unsigned long size, 945 unsigned long size,
946 int noblock, 946 int noblock,
947 int *errcode); 947 int *errcode);
948extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
949 unsigned long header_len,
950 unsigned long data_len,
951 int noblock,
952 int *errcode);
948extern void *sock_kmalloc(struct sock *sk, int size, 953extern void *sock_kmalloc(struct sock *sk, int size,
949 gfp_t priority); 954 gfp_t priority);
950extern void sock_kfree_s(struct sock *sk, void *mem, int size); 955extern void sock_kfree_s(struct sock *sk, void *mem, int size);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 876b6f2bb4f..bfb240c6cf7 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -46,7 +46,7 @@ extern int datagram_send_ctl(struct net *net,
46 struct ipv6_txoptions *opt, 46 struct ipv6_txoptions *opt,
47 int *hlimit, int *tclass); 47 int *hlimit, int *tclass);
48 48
49#define LOOPBACK4_IPV6 __constant_htonl(0x7f000006) 49#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
50 50
51/* 51/*
52 * address family specific functions 52 * address family specific functions
diff --git a/include/net/wireless.h b/include/net/wireless.h
index 21c5d966142..1c6285eb166 100644
--- a/include/net/wireless.h
+++ b/include/net/wireless.h
@@ -181,12 +181,25 @@ struct ieee80211_supported_band {
181 * struct wiphy - wireless hardware description 181 * struct wiphy - wireless hardware description
182 * @idx: the wiphy index assigned to this item 182 * @idx: the wiphy index assigned to this item
183 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> 183 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
184 * @fw_handles_regulatory: tells us the firmware for this device 184 * @custom_regulatory: tells us the driver for this device
185 * has its own regulatory solution and cannot identify the 185 * has its own custom regulatory domain and cannot identify the
186 * ISO / IEC 3166 alpha2 it belongs to. When this is enabled 186 * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
187 * we will disregard the first regulatory hint (when the 187 * we will disregard the first regulatory hint (when the
188 * initiator is %REGDOM_SET_BY_CORE). 188 * initiator is %REGDOM_SET_BY_CORE).
189 * @strict_regulatory: tells us the driver for this device will ignore
190 * regulatory domain settings until it gets its own regulatory domain
191 * via its regulatory_hint(). After its gets its own regulatory domain
192 * it will only allow further regulatory domain settings to further
193 * enhance compliance. For example if channel 13 and 14 are disabled
194 * by this regulatory domain no user regulatory domain can enable these
195 * channels at a later time. This can be used for devices which do not
196 * have calibration information gauranteed for frequencies or settings
197 * outside of its regulatory domain.
189 * @reg_notifier: the driver's regulatory notification callback 198 * @reg_notifier: the driver's regulatory notification callback
199 * @regd: the driver's regulatory domain, if one was requested via
200 * the regulatory_hint() API. This can be used by the driver
201 * on the reg_notifier() if it chooses to ignore future
202 * regulatory domain changes caused by other drivers.
190 */ 203 */
191struct wiphy { 204struct wiphy {
192 /* assign these fields before you register the wiphy */ 205 /* assign these fields before you register the wiphy */
@@ -197,7 +210,11 @@ struct wiphy {
197 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ 210 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
198 u16 interface_modes; 211 u16 interface_modes;
199 212
200 bool fw_handles_regulatory; 213 bool custom_regulatory;
214 bool strict_regulatory;
215
216 int bss_priv_size;
217 u8 max_scan_ssids;
201 218
202 /* If multiple wiphys are registered and you're handed e.g. 219 /* If multiple wiphys are registered and you're handed e.g.
203 * a regular netdev with assigned ieee80211_ptr, you won't 220 * a regular netdev with assigned ieee80211_ptr, you won't
@@ -209,10 +226,13 @@ struct wiphy {
209 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; 226 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
210 227
211 /* Lets us get back the wiphy on the callback */ 228 /* Lets us get back the wiphy on the callback */
212 int (*reg_notifier)(struct wiphy *wiphy, enum reg_set_by setby); 229 int (*reg_notifier)(struct wiphy *wiphy,
230 struct regulatory_request *request);
213 231
214 /* fields below are read-only, assigned by cfg80211 */ 232 /* fields below are read-only, assigned by cfg80211 */
215 233
234 const struct ieee80211_regdomain *regd;
235
216 /* the item in /sys/class/ieee80211/ points to this, 236 /* the item in /sys/class/ieee80211/ points to this,
217 * you need use set_wiphy_dev() (see below) */ 237 * you need use set_wiphy_dev() (see below) */
218 struct device dev; 238 struct device dev;
@@ -361,7 +381,7 @@ ieee80211_get_channel(struct wiphy *wiphy, int freq)
361 */ 381 */
362struct ieee80211_rate * 382struct ieee80211_rate *
363ieee80211_get_response_rate(struct ieee80211_supported_band *sband, 383ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
364 u64 basic_rates, int bitrate); 384 u32 basic_rates, int bitrate);
365 385
366/** 386/**
367 * regulatory_hint - driver hint to the wireless core a regulatory domain 387 * regulatory_hint - driver hint to the wireless core a regulatory domain
@@ -395,4 +415,45 @@ extern void regulatory_hint(struct wiphy *wiphy, const char *alpha2);
395extern void regulatory_hint_11d(struct wiphy *wiphy, 415extern void regulatory_hint_11d(struct wiphy *wiphy,
396 u8 *country_ie, 416 u8 *country_ie,
397 u8 country_ie_len); 417 u8 country_ie_len);
418
419/**
420 * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
421 * @wiphy: the wireless device we want to process the regulatory domain on
422 * @regd: the custom regulatory domain to use for this wiphy
423 *
424 * Drivers can sometimes have custom regulatory domains which do not apply
425 * to a specific country. Drivers can use this to apply such custom regulatory
426 * domains. This routine must be called prior to wiphy registration. The
427 * custom regulatory domain will be trusted completely and as such previous
428 * default channel settings will be disregarded. If no rule is found for a
429 * channel on the regulatory domain the channel will be disabled.
430 */
431extern void wiphy_apply_custom_regulatory(
432 struct wiphy *wiphy,
433 const struct ieee80211_regdomain *regd);
434
435/**
436 * freq_reg_info - get regulatory information for the given frequency
437 * @wiphy: the wiphy for which we want to process this rule for
438 * @center_freq: Frequency in KHz for which we want regulatory information for
439 * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one
440 * you can set this to 0. If this frequency is allowed we then set
441 * this value to the maximum allowed bandwidth.
442 * @reg_rule: the regulatory rule which we have for this frequency
443 *
444 * Use this function to get the regulatory rule for a specific frequency on
445 * a given wireless device. If the device has a specific regulatory domain
446 * it wants to follow we respect that unless a country IE has been received
447 * and processed already.
448 *
449 * Returns 0 if it was able to find a valid regulatory rule which does
450 * apply to the given center_freq otherwise it returns non-zero. It will
451 * also return -ERANGE if we determine the given center_freq does not even have
452 * a regulatory rule for a frequency range in the center_freq's band. See
453 * freq_in_rule_band() for our current definition of a band -- this is purely
454 * subjective and right now its 802.11 specific.
455 */
456extern int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
457 const struct ieee80211_reg_rule **reg_rule);
458
398#endif /* __NET_WIRELESS_H */ 459#endif /* __NET_WIRELESS_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 936e333e7ce..c179318edd9 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -388,7 +388,7 @@ enum {
388 IB_MULTICAST_QPN = 0xffffff 388 IB_MULTICAST_QPN = 0xffffff
389}; 389};
390 390
391#define IB_LID_PERMISSIVE __constant_htons(0xFFFF) 391#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
392 392
393enum ib_ah_flags { 393enum ib_ah_flags {
394 IB_AH_GRH = 1 394 IB_AH_GRH = 1
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index fafeb48f27c..b38423ca711 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, 219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, 220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, 221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
222 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
222 {} 223 {}
223}; 224};
224 225
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 318328ddbd1..38131028d16 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -227,6 +227,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
227 NETLINK_CB(skb).dst_group = 1; 227 NETLINK_CB(skb).dst_group = 1;
228 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 228 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 GFP_KERNEL); 229 GFP_KERNEL);
230 /* ENOBUFS should be handled in userspace */
231 if (retval == -ENOBUFS)
232 retval = 0;
230 } else 233 } else
231 retval = -ENOMEM; 234 retval = -ENOMEM;
232 } 235 }
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 70980baeb68..6ed711748f2 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -51,7 +51,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
51 int rc = 1; 51 int rc = 1;
52 struct datalink_proto *proto; 52 struct datalink_proto *proto;
53 static struct packet_type snap_packet_type = { 53 static struct packet_type snap_packet_type = {
54 .type = __constant_htons(ETH_P_SNAP), 54 .type = cpu_to_be16(ETH_P_SNAP),
55 }; 55 };
56 56
57 if (unlikely(!pskb_may_pull(skb, 5))) 57 if (unlikely(!pskb_may_pull(skb, 5)))
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 41e8f65bd3f..4163ea65bf4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -52,7 +52,7 @@ static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
52static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 52static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
53 53
54static struct packet_type vlan_packet_type = { 54static struct packet_type vlan_packet_type = {
55 .type = __constant_htons(ETH_P_8021Q), 55 .type = cpu_to_be16(ETH_P_8021Q),
56 .func = vlan_skb_recv, /* VLAN receive method */ 56 .func = vlan_skb_recv, /* VLAN receive method */
57}; 57};
58 58
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d622..70435af153f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -85,7 +85,9 @@ static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
85 goto drop; 85 goto drop;
86 86
87 for (p = napi->gro_list; p; p = p->next) { 87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev; 88 NAPI_GRO_CB(p)->same_flow =
89 p->dev == skb->dev && !compare_ether_header(
90 skb_mac_header(p), skb_gro_mac_header(skb));
89 NAPI_GRO_CB(p)->flush = 0; 91 NAPI_GRO_CB(p)->flush = 0;
90 } 92 }
91 93
@@ -98,22 +100,9 @@ drop:
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 100int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb) 101 unsigned int vlan_tci, struct sk_buff *skb)
100{ 102{
101 int err = NET_RX_SUCCESS; 103 skb_gro_reset_offset(skb);
102 104
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 105 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117} 106}
118EXPORT_SYMBOL(vlan_gro_receive); 107EXPORT_SYMBOL(vlan_gro_receive);
119 108
@@ -121,27 +110,11 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info) 110 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{ 111{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 112 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125 113
126 if (!skb) 114 if (!skb)
127 goto out; 115 return NET_RX_DROP;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143 116
144out: 117 return napi_frags_finish(napi, skb,
145 return err; 118 vlan_gro_common(napi, grp, vlan_tci, skb));
146} 119}
147EXPORT_SYMBOL(vlan_gro_frags); 120EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/Kconfig b/net/Kconfig
index cdb8fdef6c4..a12bae0e3fe 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -185,6 +185,7 @@ source "net/x25/Kconfig"
185source "net/lapb/Kconfig" 185source "net/lapb/Kconfig"
186source "net/econet/Kconfig" 186source "net/econet/Kconfig"
187source "net/wanrouter/Kconfig" 187source "net/wanrouter/Kconfig"
188source "net/phonet/Kconfig"
188source "net/sched/Kconfig" 189source "net/sched/Kconfig"
189source "net/dcb/Kconfig" 190source "net/dcb/Kconfig"
190 191
@@ -229,7 +230,6 @@ source "net/can/Kconfig"
229source "net/irda/Kconfig" 230source "net/irda/Kconfig"
230source "net/bluetooth/Kconfig" 231source "net/bluetooth/Kconfig"
231source "net/rxrpc/Kconfig" 232source "net/rxrpc/Kconfig"
232source "net/phonet/Kconfig"
233 233
234config FIB_RULES 234config FIB_RULES
235 bool 235 bool
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5abce07fb50..510a6782da8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1861,12 +1861,12 @@ static struct notifier_block ddp_notifier = {
1861}; 1861};
1862 1862
1863static struct packet_type ltalk_packet_type = { 1863static struct packet_type ltalk_packet_type = {
1864 .type = __constant_htons(ETH_P_LOCALTALK), 1864 .type = cpu_to_be16(ETH_P_LOCALTALK),
1865 .func = ltalk_rcv, 1865 .func = ltalk_rcv,
1866}; 1866};
1867 1867
1868static struct packet_type ppptalk_packet_type = { 1868static struct packet_type ppptalk_packet_type = {
1869 .type = __constant_htons(ETH_P_PPPTALK), 1869 .type = cpu_to_be16(ETH_P_PPPTALK),
1870 .func = atalk_rcv, 1870 .func = atalk_rcv,
1871}; 1871};
1872 1872
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index d856a62ab50..72277d70c98 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -9,22 +9,20 @@
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/if_ltalk.h> 10#include <linux/if_ltalk.h>
11 11
12#ifdef CONFIG_COMPAT_NET_DEV_OPS
12static int ltalk_change_mtu(struct net_device *dev, int mtu) 13static int ltalk_change_mtu(struct net_device *dev, int mtu)
13{ 14{
14 return -EINVAL; 15 return -EINVAL;
15} 16}
16 17#endif
17static int ltalk_mac_addr(struct net_device *dev, void *addr)
18{
19 return -EINVAL;
20}
21 18
22static void ltalk_setup(struct net_device *dev) 19static void ltalk_setup(struct net_device *dev)
23{ 20{
24 /* Fill in the fields of the device structure with localtalk-generic values. */ 21 /* Fill in the fields of the device structure with localtalk-generic values. */
25 22
23#ifdef CONFIG_COMPAT_NET_DEV_OPS
26 dev->change_mtu = ltalk_change_mtu; 24 dev->change_mtu = ltalk_change_mtu;
27 dev->set_mac_address = ltalk_mac_addr; 25#endif
28 26
29 dev->type = ARPHRD_LOCALTLK; 27 dev->type = ARPHRD_LOCALTLK;
30 dev->hard_header_len = LTALK_HLEN; 28 dev->hard_header_len = LTALK_HLEN;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index ea9438fc685..334fcd4a4ea 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -83,7 +83,6 @@ struct br2684_dev {
83 struct list_head br2684_devs; 83 struct list_head br2684_devs;
84 int number; 84 int number;
85 struct list_head brvccs; /* one device <=> one vcc (before xmas) */ 85 struct list_head brvccs; /* one device <=> one vcc (before xmas) */
86 struct net_device_stats stats;
87 int mac_was_set; 86 int mac_was_set;
88 enum br2684_payload payload; 87 enum br2684_payload payload;
89}; 88};
@@ -148,9 +147,10 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
148 * the way for multiple vcc's per itf. Returns true if we can send, 147 * the way for multiple vcc's per itf. Returns true if we can send,
149 * otherwise false 148 * otherwise false
150 */ 149 */
151static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, 150static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
152 struct br2684_vcc *brvcc) 151 struct br2684_vcc *brvcc)
153{ 152{
153 struct br2684_dev *brdev = BRPRIV(dev);
154 struct atm_vcc *atmvcc; 154 struct atm_vcc *atmvcc;
155 int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; 155 int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
156 156
@@ -211,8 +211,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
211 } 211 }
212 atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); 212 atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
213 ATM_SKB(skb)->atm_options = atmvcc->atm_options; 213 ATM_SKB(skb)->atm_options = atmvcc->atm_options;
214 brdev->stats.tx_packets++; 214 dev->stats.tx_packets++;
215 brdev->stats.tx_bytes += skb->len; 215 dev->stats.tx_bytes += skb->len;
216 atmvcc->send(atmvcc, skb); 216 atmvcc->send(atmvcc, skb);
217 return 1; 217 return 1;
218} 218}
@@ -233,14 +233,14 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
233 brvcc = pick_outgoing_vcc(skb, brdev); 233 brvcc = pick_outgoing_vcc(skb, brdev);
234 if (brvcc == NULL) { 234 if (brvcc == NULL) {
235 pr_debug("no vcc attached to dev %s\n", dev->name); 235 pr_debug("no vcc attached to dev %s\n", dev->name);
236 brdev->stats.tx_errors++; 236 dev->stats.tx_errors++;
237 brdev->stats.tx_carrier_errors++; 237 dev->stats.tx_carrier_errors++;
238 /* netif_stop_queue(dev); */ 238 /* netif_stop_queue(dev); */
239 dev_kfree_skb(skb); 239 dev_kfree_skb(skb);
240 read_unlock(&devs_lock); 240 read_unlock(&devs_lock);
241 return 0; 241 return 0;
242 } 242 }
243 if (!br2684_xmit_vcc(skb, brdev, brvcc)) { 243 if (!br2684_xmit_vcc(skb, dev, brvcc)) {
244 /* 244 /*
245 * We should probably use netif_*_queue() here, but that 245 * We should probably use netif_*_queue() here, but that
246 * involves added complication. We need to walk before 246 * involves added complication. We need to walk before
@@ -248,27 +248,20 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
248 * 248 *
249 * Don't free here! this pointer might be no longer valid! 249 * Don't free here! this pointer might be no longer valid!
250 */ 250 */
251 brdev->stats.tx_errors++; 251 dev->stats.tx_errors++;
252 brdev->stats.tx_fifo_errors++; 252 dev->stats.tx_fifo_errors++;
253 } 253 }
254 read_unlock(&devs_lock); 254 read_unlock(&devs_lock);
255 return 0; 255 return 0;
256} 256}
257 257
258static struct net_device_stats *br2684_get_stats(struct net_device *dev)
259{
260 pr_debug("br2684_get_stats\n");
261 return &BRPRIV(dev)->stats;
262}
263
264/* 258/*
265 * We remember when the MAC gets set, so we don't override it later with 259 * We remember when the MAC gets set, so we don't override it later with
266 * the ESI of the ATM card of the first VC 260 * the ESI of the ATM card of the first VC
267 */ 261 */
268static int (*my_eth_mac_addr) (struct net_device *, void *);
269static int br2684_mac_addr(struct net_device *dev, void *p) 262static int br2684_mac_addr(struct net_device *dev, void *p)
270{ 263{
271 int err = my_eth_mac_addr(dev, p); 264 int err = eth_mac_addr(dev, p);
272 if (!err) 265 if (!err)
273 BRPRIV(dev)->mac_was_set = 1; 266 BRPRIV(dev)->mac_was_set = 1;
274 return err; 267 return err;
@@ -430,17 +423,17 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
430 /* sigh, interface is down? */ 423 /* sigh, interface is down? */
431 if (unlikely(!(net_dev->flags & IFF_UP))) 424 if (unlikely(!(net_dev->flags & IFF_UP)))
432 goto dropped; 425 goto dropped;
433 brdev->stats.rx_packets++; 426 net_dev->stats.rx_packets++;
434 brdev->stats.rx_bytes += skb->len; 427 net_dev->stats.rx_bytes += skb->len;
435 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 428 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
436 netif_rx(skb); 429 netif_rx(skb);
437 return; 430 return;
438 431
439dropped: 432dropped:
440 brdev->stats.rx_dropped++; 433 net_dev->stats.rx_dropped++;
441 goto free_skb; 434 goto free_skb;
442error: 435error:
443 brdev->stats.rx_errors++; 436 net_dev->stats.rx_errors++;
444free_skb: 437free_skb:
445 dev_kfree_skb(skb); 438 dev_kfree_skb(skb);
446 return; 439 return;
@@ -531,8 +524,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
531 524
532 skb->next = skb->prev = NULL; 525 skb->next = skb->prev = NULL;
533 br2684_push(atmvcc, skb); 526 br2684_push(atmvcc, skb);
534 BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; 527 skb->dev->stats.rx_bytes -= skb->len;
535 BRPRIV(skb->dev)->stats.rx_packets--; 528 skb->dev->stats.rx_packets--;
536 529
537 skb = next; 530 skb = next;
538 } 531 }
@@ -544,17 +537,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
544 return err; 537 return err;
545} 538}
546 539
540static const struct net_device_ops br2684_netdev_ops = {
541 .ndo_start_xmit = br2684_start_xmit,
542 .ndo_set_mac_address = br2684_mac_addr,
543 .ndo_change_mtu = eth_change_mtu,
544 .ndo_validate_addr = eth_validate_addr,
545};
546
547static void br2684_setup(struct net_device *netdev) 547static void br2684_setup(struct net_device *netdev)
548{ 548{
549 struct br2684_dev *brdev = BRPRIV(netdev); 549 struct br2684_dev *brdev = BRPRIV(netdev);
550 550
551 ether_setup(netdev); 551 ether_setup(netdev);
552 brdev->net_dev = netdev;
553 552
554 my_eth_mac_addr = netdev->set_mac_address; 553 netdev->netdev_ops = &br2684_netdev_ops;
555 netdev->set_mac_address = br2684_mac_addr;
556 netdev->hard_start_xmit = br2684_start_xmit;
557 netdev->get_stats = br2684_get_stats;
558 554
559 INIT_LIST_HEAD(&brdev->brvccs); 555 INIT_LIST_HEAD(&brdev->brvccs);
560} 556}
@@ -565,10 +561,8 @@ static void br2684_setup_routed(struct net_device *netdev)
565 brdev->net_dev = netdev; 561 brdev->net_dev = netdev;
566 562
567 netdev->hard_header_len = 0; 563 netdev->hard_header_len = 0;
568 my_eth_mac_addr = netdev->set_mac_address; 564
569 netdev->set_mac_address = br2684_mac_addr; 565 netdev->netdev_ops = &br2684_netdev_ops;
570 netdev->hard_start_xmit = br2684_start_xmit;
571 netdev->get_stats = br2684_get_stats;
572 netdev->addr_len = 0; 566 netdev->addr_len = 0;
573 netdev->mtu = 1500; 567 netdev->mtu = 1500;
574 netdev->type = ARPHRD_PPP; 568 netdev->type = ARPHRD_PPP;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 2d33a83be79..da42fd06b61 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -214,15 +214,15 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
214 skb->protocol = ((__be16 *) skb->data)[3]; 214 skb->protocol = ((__be16 *) skb->data)[3];
215 skb_pull(skb, RFC1483LLC_LEN); 215 skb_pull(skb, RFC1483LLC_LEN);
216 if (skb->protocol == htons(ETH_P_ARP)) { 216 if (skb->protocol == htons(ETH_P_ARP)) {
217 PRIV(skb->dev)->stats.rx_packets++; 217 skb->dev->stats.rx_packets++;
218 PRIV(skb->dev)->stats.rx_bytes += skb->len; 218 skb->dev->stats.rx_bytes += skb->len;
219 clip_arp_rcv(skb); 219 clip_arp_rcv(skb);
220 return; 220 return;
221 } 221 }
222 } 222 }
223 clip_vcc->last_use = jiffies; 223 clip_vcc->last_use = jiffies;
224 PRIV(skb->dev)->stats.rx_packets++; 224 skb->dev->stats.rx_packets++;
225 PRIV(skb->dev)->stats.rx_bytes += skb->len; 225 skb->dev->stats.rx_bytes += skb->len;
226 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 226 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
227 netif_rx(skb); 227 netif_rx(skb);
228} 228}
@@ -372,7 +372,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
372 if (!skb->dst) { 372 if (!skb->dst) {
373 printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); 373 printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
374 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
375 clip_priv->stats.tx_dropped++; 375 dev->stats.tx_dropped++;
376 return 0; 376 return 0;
377 } 377 }
378 if (!skb->dst->neighbour) { 378 if (!skb->dst->neighbour) {
@@ -380,13 +380,13 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
380 skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); 380 skb->dst->neighbour = clip_find_neighbour(skb->dst, 1);
381 if (!skb->dst->neighbour) { 381 if (!skb->dst->neighbour) {
382 dev_kfree_skb(skb); /* lost that one */ 382 dev_kfree_skb(skb); /* lost that one */
383 clip_priv->stats.tx_dropped++; 383 dev->stats.tx_dropped++;
384 return 0; 384 return 0;
385 } 385 }
386#endif 386#endif
387 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); 387 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
388 dev_kfree_skb(skb); 388 dev_kfree_skb(skb);
389 clip_priv->stats.tx_dropped++; 389 dev->stats.tx_dropped++;
390 return 0; 390 return 0;
391 } 391 }
392 entry = NEIGH2ENTRY(skb->dst->neighbour); 392 entry = NEIGH2ENTRY(skb->dst->neighbour);
@@ -400,7 +400,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
400 skb_queue_tail(&entry->neigh->arp_queue, skb); 400 skb_queue_tail(&entry->neigh->arp_queue, skb);
401 else { 401 else {
402 dev_kfree_skb(skb); 402 dev_kfree_skb(skb);
403 clip_priv->stats.tx_dropped++; 403 dev->stats.tx_dropped++;
404 } 404 }
405 return 0; 405 return 0;
406 } 406 }
@@ -423,8 +423,8 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
423 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 423 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
424 return 0; 424 return 0;
425 } 425 }
426 clip_priv->stats.tx_packets++; 426 dev->stats.tx_packets++;
427 clip_priv->stats.tx_bytes += skb->len; 427 dev->stats.tx_bytes += skb->len;
428 vcc->send(vcc, skb); 428 vcc->send(vcc, skb);
429 if (atm_may_send(vcc, 0)) { 429 if (atm_may_send(vcc, 0)) {
430 entry->vccs->xoff = 0; 430 entry->vccs->xoff = 0;
@@ -443,11 +443,6 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
443 return 0; 443 return 0;
444} 444}
445 445
446static struct net_device_stats *clip_get_stats(struct net_device *dev)
447{
448 return &PRIV(dev)->stats;
449}
450
451static int clip_mkip(struct atm_vcc *vcc, int timeout) 446static int clip_mkip(struct atm_vcc *vcc, int timeout)
452{ 447{
453 struct clip_vcc *clip_vcc; 448 struct clip_vcc *clip_vcc;
@@ -501,8 +496,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
501 496
502 skb_get(skb); 497 skb_get(skb);
503 clip_push(vcc, skb); 498 clip_push(vcc, skb);
504 PRIV(skb->dev)->stats.rx_packets--; 499 skb->dev->stats.rx_packets--;
505 PRIV(skb->dev)->stats.rx_bytes -= len; 500 skb->dev->stats.rx_bytes -= len;
506 kfree_skb(skb); 501 kfree_skb(skb);
507 } 502 }
508 503
@@ -561,7 +556,6 @@ static void clip_setup(struct net_device *dev)
561{ 556{
562 dev->hard_start_xmit = clip_start_xmit; 557 dev->hard_start_xmit = clip_start_xmit;
563 /* sg_xmit ... */ 558 /* sg_xmit ... */
564 dev->get_stats = clip_get_stats;
565 dev->type = ARPHRD_ATM; 559 dev->type = ARPHRD_ATM;
566 dev->hard_header_len = RFC1483LLC_LEN; 560 dev->hard_header_len = RFC1483LLC_LEN;
567 dev->mtu = RFC1626_MTU; 561 dev->mtu = RFC1626_MTU;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index e5e301550e8..c0cba9a037e 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -62,7 +62,6 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
62static int lec_open(struct net_device *dev); 62static int lec_open(struct net_device *dev);
63static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); 63static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev);
64static int lec_close(struct net_device *dev); 64static int lec_close(struct net_device *dev);
65static struct net_device_stats *lec_get_stats(struct net_device *dev);
66static void lec_init(struct net_device *dev); 65static void lec_init(struct net_device *dev);
67static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 66static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
68 const unsigned char *mac_addr); 67 const unsigned char *mac_addr);
@@ -218,28 +217,28 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
218 217
219static int lec_open(struct net_device *dev) 218static int lec_open(struct net_device *dev)
220{ 219{
221 struct lec_priv *priv = netdev_priv(dev);
222
223 netif_start_queue(dev); 220 netif_start_queue(dev);
224 memset(&priv->stats, 0, sizeof(struct net_device_stats)); 221 memset(&dev->stats, 0, sizeof(struct net_device_stats));
225 222
226 return 0; 223 return 0;
227} 224}
228 225
229static __inline__ void 226static void
230lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv) 227lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
231{ 228{
229 struct net_device *dev = skb->dev;
230
232 ATM_SKB(skb)->vcc = vcc; 231 ATM_SKB(skb)->vcc = vcc;
233 ATM_SKB(skb)->atm_options = vcc->atm_options; 232 ATM_SKB(skb)->atm_options = vcc->atm_options;
234 233
235 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 234 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
236 if (vcc->send(vcc, skb) < 0) { 235 if (vcc->send(vcc, skb) < 0) {
237 priv->stats.tx_dropped++; 236 dev->stats.tx_dropped++;
238 return; 237 return;
239 } 238 }
240 239
241 priv->stats.tx_packets++; 240 dev->stats.tx_packets++;
242 priv->stats.tx_bytes += skb->len; 241 dev->stats.tx_bytes += skb->len;
243} 242}
244 243
245static void lec_tx_timeout(struct net_device *dev) 244static void lec_tx_timeout(struct net_device *dev)
@@ -270,7 +269,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
270 pr_debug("lec_start_xmit called\n"); 269 pr_debug("lec_start_xmit called\n");
271 if (!priv->lecd) { 270 if (!priv->lecd) {
272 printk("%s:No lecd attached\n", dev->name); 271 printk("%s:No lecd attached\n", dev->name);
273 priv->stats.tx_errors++; 272 dev->stats.tx_errors++;
274 netif_stop_queue(dev); 273 netif_stop_queue(dev);
275 return -EUNATCH; 274 return -EUNATCH;
276 } 275 }
@@ -345,7 +344,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
345 GFP_ATOMIC); 344 GFP_ATOMIC);
346 dev_kfree_skb(skb); 345 dev_kfree_skb(skb);
347 if (skb2 == NULL) { 346 if (skb2 == NULL) {
348 priv->stats.tx_dropped++; 347 dev->stats.tx_dropped++;
349 return 0; 348 return 0;
350 } 349 }
351 skb = skb2; 350 skb = skb2;
@@ -380,7 +379,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
380 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", 379 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ",
381 dev->name); 380 dev->name);
382 pr_debug("MAC address %pM\n", lec_h->h_dest); 381 pr_debug("MAC address %pM\n", lec_h->h_dest);
383 priv->stats.tx_dropped++; 382 dev->stats.tx_dropped++;
384 dev_kfree_skb(skb); 383 dev_kfree_skb(skb);
385 } 384 }
386 goto out; 385 goto out;
@@ -392,10 +391,10 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
392 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 391 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
393 pr_debug("lec.c: emptying tx queue, "); 392 pr_debug("lec.c: emptying tx queue, ");
394 pr_debug("MAC address %pM\n", lec_h->h_dest); 393 pr_debug("MAC address %pM\n", lec_h->h_dest);
395 lec_send(vcc, skb2, priv); 394 lec_send(vcc, skb2);
396 } 395 }
397 396
398 lec_send(vcc, skb, priv); 397 lec_send(vcc, skb);
399 398
400 if (!atm_may_send(vcc, 0)) { 399 if (!atm_may_send(vcc, 0)) {
401 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); 400 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
@@ -427,15 +426,6 @@ static int lec_close(struct net_device *dev)
427 return 0; 426 return 0;
428} 427}
429 428
430/*
431 * Get the current statistics.
432 * This may be called with the card open or closed.
433 */
434static struct net_device_stats *lec_get_stats(struct net_device *dev)
435{
436 return &((struct lec_priv *)netdev_priv(dev))->stats;
437}
438
439static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) 429static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
440{ 430{
441 unsigned long flags; 431 unsigned long flags;
@@ -677,17 +667,19 @@ static void lec_set_multicast_list(struct net_device *dev)
677 return; 667 return;
678} 668}
679 669
670static const struct net_device_ops lec_netdev_ops = {
671 .ndo_open = lec_open,
672 .ndo_stop = lec_close,
673 .ndo_start_xmit = lec_start_xmit,
674 .ndo_change_mtu = lec_change_mtu,
675 .ndo_tx_timeout = lec_tx_timeout,
676 .ndo_set_multicast_list = lec_set_multicast_list,
677};
678
679
680static void lec_init(struct net_device *dev) 680static void lec_init(struct net_device *dev)
681{ 681{
682 dev->change_mtu = lec_change_mtu; 682 dev->netdev_ops = &lec_netdev_ops;
683 dev->open = lec_open;
684 dev->stop = lec_close;
685 dev->hard_start_xmit = lec_start_xmit;
686 dev->tx_timeout = lec_tx_timeout;
687
688 dev->get_stats = lec_get_stats;
689 dev->set_multicast_list = lec_set_multicast_list;
690 dev->do_ioctl = NULL;
691 printk("%s: Initialized!\n", dev->name); 683 printk("%s: Initialized!\n", dev->name);
692} 684}
693 685
@@ -810,8 +802,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
810 else 802 else
811#endif 803#endif
812 skb->protocol = eth_type_trans(skb, dev); 804 skb->protocol = eth_type_trans(skb, dev);
813 priv->stats.rx_packets++; 805 dev->stats.rx_packets++;
814 priv->stats.rx_bytes += skb->len; 806 dev->stats.rx_bytes += skb->len;
815 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 807 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
816 netif_rx(skb); 808 netif_rx(skb);
817 } 809 }
@@ -1887,7 +1879,7 @@ restart:
1887 lec_arp_hold(entry); 1879 lec_arp_hold(entry);
1888 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1880 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1889 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 1881 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1890 lec_send(vcc, skb, entry->priv); 1882 lec_send(vcc, skb);
1891 entry->last_used = jiffies; 1883 entry->last_used = jiffies;
1892 entry->status = ESI_FORWARD_DIRECT; 1884 entry->status = ESI_FORWARD_DIRECT;
1893 lec_arp_put(entry); 1885 lec_arp_put(entry);
@@ -2305,7 +2297,7 @@ restart:
2305 lec_arp_hold(entry); 2297 lec_arp_hold(entry);
2306 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2298 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2307 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2299 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
2308 lec_send(vcc, skb, entry->priv); 2300 lec_send(vcc, skb);
2309 entry->last_used = jiffies; 2301 entry->last_used = jiffies;
2310 entry->status = ESI_FORWARD_DIRECT; 2302 entry->status = ESI_FORWARD_DIRECT;
2311 lec_arp_put(entry); 2303 lec_arp_put(entry);
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 0d376682c1a..9d14d196cc1 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -69,7 +69,6 @@ struct lane2_ops {
69#define LEC_ARP_TABLE_SIZE 16 69#define LEC_ARP_TABLE_SIZE 16
70 70
71struct lec_priv { 71struct lec_priv {
72 struct net_device_stats stats;
73 unsigned short lecid; /* Lecid of this client */ 72 unsigned short lecid; /* Lecid of this client */
74 struct hlist_head lec_arp_empty_ones; 73 struct hlist_head lec_arp_empty_ones;
75 /* Used for storing VCC's that don't have a MAC address attached yet */ 74 /* Used for storing VCC's that don't have a MAC address attached yet */
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 00d9e5e1315..d127fd3ba5c 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1986,7 +1986,7 @@ static const struct proto_ops ax25_proto_ops = {
1986 * Called by socket.c on kernel start up 1986 * Called by socket.c on kernel start up
1987 */ 1987 */
1988static struct packet_type ax25_packet_type = { 1988static struct packet_type ax25_packet_type = {
1989 .type = __constant_htons(ETH_P_AX25), 1989 .type = cpu_to_be16(ETH_P_AX25),
1990 .dev = NULL, /* All devices */ 1990 .dev = NULL, /* All devices */
1991 .func = ax25_kiss_rcv, 1991 .func = ax25_kiss_rcv,
1992}; 1992};
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 8443af57a37..71338f11210 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -61,27 +61,24 @@ void ax25_protocol_release(unsigned int pid)
61 61
62 write_lock_bh(&protocol_list_lock); 62 write_lock_bh(&protocol_list_lock);
63 protocol = protocol_list; 63 protocol = protocol_list;
64 if (protocol == NULL) { 64 if (protocol == NULL)
65 write_unlock_bh(&protocol_list_lock); 65 goto out;
66 return;
67 }
68 66
69 if (protocol->pid == pid) { 67 if (protocol->pid == pid) {
70 protocol_list = protocol->next; 68 protocol_list = protocol->next;
71 write_unlock_bh(&protocol_list_lock); 69 goto out;
72 return;
73 } 70 }
74 71
75 while (protocol != NULL && protocol->next != NULL) { 72 while (protocol != NULL && protocol->next != NULL) {
76 if (protocol->next->pid == pid) { 73 if (protocol->next->pid == pid) {
77 s = protocol->next; 74 s = protocol->next;
78 protocol->next = protocol->next->next; 75 protocol->next = protocol->next->next;
79 write_unlock_bh(&protocol_list_lock); 76 goto out;
80 return;
81 } 77 }
82 78
83 protocol = protocol->next; 79 protocol = protocol->next;
84 } 80 }
81out:
85 write_unlock_bh(&protocol_list_lock); 82 write_unlock_bh(&protocol_list_lock);
86} 83}
87 84
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index cf754ace0b7..3953ac4214c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -107,7 +107,7 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
107 107
108static struct dst_ops fake_dst_ops = { 108static struct dst_ops fake_dst_ops = {
109 .family = AF_INET, 109 .family = AF_INET,
110 .protocol = __constant_htons(ETH_P_IP), 110 .protocol = cpu_to_be16(ETH_P_IP),
111 .update_pmtu = fake_update_pmtu, 111 .update_pmtu = fake_update_pmtu,
112 .entries = ATOMIC_INIT(0), 112 .entries = ATOMIC_INIT(0),
113}; 113};
diff --git a/net/can/af_can.c b/net/can/af_can.c
index fa417ca6cbe..d90e8dd975f 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -828,7 +828,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
828 */ 828 */
829 829
830static struct packet_type can_packet __read_mostly = { 830static struct packet_type can_packet __read_mostly = {
831 .type = __constant_htons(ETH_P_CAN), 831 .type = cpu_to_be16(ETH_P_CAN),
832 .dev = NULL, 832 .dev = NULL,
833 .func = can_rcv, 833 .func = can_rcv,
834}; 834};
diff --git a/net/core/dev.c b/net/core/dev.c
index a17e0066236..1e27a67df24 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -135,6 +135,14 @@
135/* This should be increased if a protocol with a bigger head is added. */ 135/* This should be increased if a protocol with a bigger head is added. */
136#define GRO_MAX_HEAD (MAX_HEADER + 128) 136#define GRO_MAX_HEAD (MAX_HEADER + 128)
137 137
138enum {
139 GRO_MERGED,
140 GRO_MERGED_FREE,
141 GRO_HELD,
142 GRO_NORMAL,
143 GRO_DROP,
144};
145
138/* 146/*
139 * The list of packet types we will receive (as opposed to discard) 147 * The list of packet types we will receive (as opposed to discard)
140 * and the routines to invoke. 148 * and the routines to invoke.
@@ -1708,56 +1716,26 @@ out_kfree_skb:
1708 return 0; 1716 return 0;
1709} 1717}
1710 1718
1711static u32 simple_tx_hashrnd; 1719static u32 skb_tx_hashrnd;
1712static int simple_tx_hashrnd_initialized = 0; 1720static int skb_tx_hashrnd_initialized = 0;
1713 1721
1714static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) 1722static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
1715{ 1723{
1716 u32 addr1, addr2, ports; 1724 u32 hash;
1717 u32 hash, ihl;
1718 u8 ip_proto = 0;
1719 1725
1720 if (unlikely(!simple_tx_hashrnd_initialized)) { 1726 if (unlikely(!skb_tx_hashrnd_initialized)) {
1721 get_random_bytes(&simple_tx_hashrnd, 4); 1727 get_random_bytes(&skb_tx_hashrnd, 4);
1722 simple_tx_hashrnd_initialized = 1; 1728 skb_tx_hashrnd_initialized = 1;
1723 } 1729 }
1724 1730
1725 switch (skb->protocol) { 1731 if (skb_rx_queue_recorded(skb)) {
1726 case htons(ETH_P_IP): 1732 hash = skb_get_rx_queue(skb);
1727 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) 1733 } else if (skb->sk && skb->sk->sk_hash) {
1728 ip_proto = ip_hdr(skb)->protocol; 1734 hash = skb->sk->sk_hash;
1729 addr1 = ip_hdr(skb)->saddr; 1735 } else
1730 addr2 = ip_hdr(skb)->daddr; 1736 hash = skb->protocol;
1731 ihl = ip_hdr(skb)->ihl;
1732 break;
1733 case htons(ETH_P_IPV6):
1734 ip_proto = ipv6_hdr(skb)->nexthdr;
1735 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1736 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1737 ihl = (40 >> 2);
1738 break;
1739 default:
1740 return 0;
1741 }
1742
1743
1744 switch (ip_proto) {
1745 case IPPROTO_TCP:
1746 case IPPROTO_UDP:
1747 case IPPROTO_DCCP:
1748 case IPPROTO_ESP:
1749 case IPPROTO_AH:
1750 case IPPROTO_SCTP:
1751 case IPPROTO_UDPLITE:
1752 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1753 break;
1754
1755 default:
1756 ports = 0;
1757 break;
1758 }
1759 1737
1760 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd); 1738 hash = jhash_1word(hash, skb_tx_hashrnd);
1761 1739
1762 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 1740 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1763} 1741}
@@ -1771,7 +1749,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1771 if (ops->ndo_select_queue) 1749 if (ops->ndo_select_queue)
1772 queue_index = ops->ndo_select_queue(dev, skb); 1750 queue_index = ops->ndo_select_queue(dev, skb);
1773 else if (dev->real_num_tx_queues > 1) 1751 else if (dev->real_num_tx_queues > 1)
1774 queue_index = simple_tx_hash(dev, skb); 1752 queue_index = skb_tx_hash(dev, skb);
1775 1753
1776 skb_set_queue_mapping(skb, queue_index); 1754 skb_set_queue_mapping(skb, queue_index);
1777 return netdev_get_tx_queue(dev, queue_index); 1755 return netdev_get_tx_queue(dev, queue_index);
@@ -2303,6 +2281,8 @@ ncls:
2303 if (!skb) 2281 if (!skb)
2304 goto out; 2282 goto out;
2305 2283
2284 skb_orphan(skb);
2285
2306 type = skb->protocol; 2286 type = skb->protocol;
2307 list_for_each_entry_rcu(ptype, 2287 list_for_each_entry_rcu(ptype,
2308 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2288 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
@@ -2372,7 +2352,6 @@ static int napi_gro_complete(struct sk_buff *skb)
2372 2352
2373out: 2353out:
2374 skb_shinfo(skb)->gso_size = 0; 2354 skb_shinfo(skb)->gso_size = 0;
2375 __skb_push(skb, -skb_network_offset(skb));
2376 return netif_receive_skb(skb); 2355 return netif_receive_skb(skb);
2377} 2356}
2378 2357
@@ -2386,20 +2365,40 @@ void napi_gro_flush(struct napi_struct *napi)
2386 napi_gro_complete(skb); 2365 napi_gro_complete(skb);
2387 } 2366 }
2388 2367
2368 napi->gro_count = 0;
2389 napi->gro_list = NULL; 2369 napi->gro_list = NULL;
2390} 2370}
2391EXPORT_SYMBOL(napi_gro_flush); 2371EXPORT_SYMBOL(napi_gro_flush);
2392 2372
2373void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2374{
2375 unsigned int offset = skb_gro_offset(skb);
2376
2377 hlen += offset;
2378 if (hlen <= skb_headlen(skb))
2379 return skb->data + offset;
2380
2381 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2382 skb_shinfo(skb)->frags[0].size <=
2383 hlen - skb_headlen(skb) ||
2384 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2385 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2386
2387 return page_address(skb_shinfo(skb)->frags[0].page) +
2388 skb_shinfo(skb)->frags[0].page_offset +
2389 offset - skb_headlen(skb);
2390}
2391EXPORT_SYMBOL(skb_gro_header);
2392
2393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2394{ 2394{
2395 struct sk_buff **pp = NULL; 2395 struct sk_buff **pp = NULL;
2396 struct packet_type *ptype; 2396 struct packet_type *ptype;
2397 __be16 type = skb->protocol; 2397 __be16 type = skb->protocol;
2398 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2398 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2399 int count = 0;
2400 int same_flow; 2399 int same_flow;
2401 int mac_len; 2400 int mac_len;
2402 int free; 2401 int ret;
2403 2402
2404 if (!(skb->dev->features & NETIF_F_GRO)) 2403 if (!(skb->dev->features & NETIF_F_GRO))
2405 goto normal; 2404 goto normal;
@@ -2409,30 +2408,16 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2409 2408
2410 rcu_read_lock(); 2409 rcu_read_lock();
2411 list_for_each_entry_rcu(ptype, head, list) { 2410 list_for_each_entry_rcu(ptype, head, list) {
2412 struct sk_buff *p;
2413
2414 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 2411 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2415 continue; 2412 continue;
2416 2413
2417 skb_reset_network_header(skb); 2414 skb_set_network_header(skb, skb_gro_offset(skb));
2418 mac_len = skb->network_header - skb->mac_header; 2415 mac_len = skb->network_header - skb->mac_header;
2419 skb->mac_len = mac_len; 2416 skb->mac_len = mac_len;
2420 NAPI_GRO_CB(skb)->same_flow = 0; 2417 NAPI_GRO_CB(skb)->same_flow = 0;
2421 NAPI_GRO_CB(skb)->flush = 0; 2418 NAPI_GRO_CB(skb)->flush = 0;
2422 NAPI_GRO_CB(skb)->free = 0; 2419 NAPI_GRO_CB(skb)->free = 0;
2423 2420
2424 for (p = napi->gro_list; p; p = p->next) {
2425 count++;
2426
2427 if (!NAPI_GRO_CB(p)->same_flow)
2428 continue;
2429
2430 if (p->mac_len != mac_len ||
2431 memcmp(skb_mac_header(p), skb_mac_header(skb),
2432 mac_len))
2433 NAPI_GRO_CB(p)->same_flow = 0;
2434 }
2435
2436 pp = ptype->gro_receive(&napi->gro_list, skb); 2421 pp = ptype->gro_receive(&napi->gro_list, skb);
2437 break; 2422 break;
2438 } 2423 }
@@ -2442,7 +2427,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2442 goto normal; 2427 goto normal;
2443 2428
2444 same_flow = NAPI_GRO_CB(skb)->same_flow; 2429 same_flow = NAPI_GRO_CB(skb)->same_flow;
2445 free = NAPI_GRO_CB(skb)->free; 2430 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2446 2431
2447 if (pp) { 2432 if (pp) {
2448 struct sk_buff *nskb = *pp; 2433 struct sk_buff *nskb = *pp;
@@ -2450,27 +2435,35 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2450 *pp = nskb->next; 2435 *pp = nskb->next;
2451 nskb->next = NULL; 2436 nskb->next = NULL;
2452 napi_gro_complete(nskb); 2437 napi_gro_complete(nskb);
2453 count--; 2438 napi->gro_count--;
2454 } 2439 }
2455 2440
2456 if (same_flow) 2441 if (same_flow)
2457 goto ok; 2442 goto ok;
2458 2443
2459 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) { 2444 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2460 __skb_push(skb, -skb_network_offset(skb));
2461 goto normal; 2445 goto normal;
2462 }
2463 2446
2447 napi->gro_count++;
2464 NAPI_GRO_CB(skb)->count = 1; 2448 NAPI_GRO_CB(skb)->count = 1;
2465 skb_shinfo(skb)->gso_size = skb->len; 2449 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2466 skb->next = napi->gro_list; 2450 skb->next = napi->gro_list;
2467 napi->gro_list = skb; 2451 napi->gro_list = skb;
2452 ret = GRO_HELD;
2453
2454pull:
2455 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
2456 if (napi->gro_list == skb)
2457 napi->gro_list = skb->next;
2458 ret = GRO_DROP;
2459 }
2468 2460
2469ok: 2461ok:
2470 return free; 2462 return ret;
2471 2463
2472normal: 2464normal:
2473 return -1; 2465 ret = GRO_NORMAL;
2466 goto pull;
2474} 2467}
2475EXPORT_SYMBOL(dev_gro_receive); 2468EXPORT_SYMBOL(dev_gro_receive);
2476 2469
@@ -2479,25 +2472,40 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2479 struct sk_buff *p; 2472 struct sk_buff *p;
2480 2473
2481 for (p = napi->gro_list; p; p = p->next) { 2474 for (p = napi->gro_list; p; p = p->next) {
2482 NAPI_GRO_CB(p)->same_flow = 1; 2475 NAPI_GRO_CB(p)->same_flow = !compare_ether_header(
2476 skb_mac_header(p), skb_gro_mac_header(skb));
2483 NAPI_GRO_CB(p)->flush = 0; 2477 NAPI_GRO_CB(p)->flush = 0;
2484 } 2478 }
2485 2479
2486 return dev_gro_receive(napi, skb); 2480 return dev_gro_receive(napi, skb);
2487} 2481}
2488 2482
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2483int napi_skb_finish(int ret, struct sk_buff *skb)
2490{ 2484{
2491 switch (__napi_gro_receive(napi, skb)) { 2485 int err = NET_RX_SUCCESS;
2492 case -1: 2486
2487 switch (ret) {
2488 case GRO_NORMAL:
2493 return netif_receive_skb(skb); 2489 return netif_receive_skb(skb);
2494 2490
2495 case 1: 2491 case GRO_DROP:
2492 err = NET_RX_DROP;
2493 /* fall through */
2494
2495 case GRO_MERGED_FREE:
2496 kfree_skb(skb); 2496 kfree_skb(skb);
2497 break; 2497 break;
2498 } 2498 }
2499 2499
2500 return NET_RX_SUCCESS; 2500 return err;
2501}
2502EXPORT_SYMBOL(napi_skb_finish);
2503
2504int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2505{
2506 skb_gro_reset_offset(skb);
2507
2508 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2501} 2509}
2502EXPORT_SYMBOL(napi_gro_receive); 2510EXPORT_SYMBOL(napi_gro_receive);
2503 2511
@@ -2515,6 +2523,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2515{ 2523{
2516 struct net_device *dev = napi->dev; 2524 struct net_device *dev = napi->dev;
2517 struct sk_buff *skb = napi->skb; 2525 struct sk_buff *skb = napi->skb;
2526 struct ethhdr *eth;
2527 skb_frag_t *frag;
2528 int i;
2518 2529
2519 napi->skb = NULL; 2530 napi->skb = NULL;
2520 2531
@@ -2527,20 +2538,36 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2527 } 2538 }
2528 2539
2529 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2540 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2541 frag = &info->frags[info->nr_frags - 1];
2542
2543 for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
2544 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2545 frag->size);
2546 frag++;
2547 }
2530 skb_shinfo(skb)->nr_frags = info->nr_frags; 2548 skb_shinfo(skb)->nr_frags = info->nr_frags;
2531 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
2532 2549
2533 skb->data_len = info->len; 2550 skb->data_len = info->len;
2534 skb->len += info->len; 2551 skb->len += info->len;
2535 skb->truesize += info->len; 2552 skb->truesize += info->len;
2536 2553
2537 if (!pskb_may_pull(skb, ETH_HLEN)) { 2554 skb_reset_mac_header(skb);
2555 skb_gro_reset_offset(skb);
2556
2557 eth = skb_gro_header(skb, sizeof(*eth));
2558 if (!eth) {
2538 napi_reuse_skb(napi, skb); 2559 napi_reuse_skb(napi, skb);
2539 skb = NULL; 2560 skb = NULL;
2540 goto out; 2561 goto out;
2541 } 2562 }
2542 2563
2543 skb->protocol = eth_type_trans(skb, dev); 2564 skb_gro_pull(skb, sizeof(*eth));
2565
2566 /*
2567 * This works because the only protocols we care about don't require
2568 * special handling. We'll fix it up properly at the end.
2569 */
2570 skb->protocol = eth->h_proto;
2544 2571
2545 skb->ip_summed = info->ip_summed; 2572 skb->ip_summed = info->ip_summed;
2546 skb->csum = info->csum; 2573 skb->csum = info->csum;
@@ -2550,29 +2577,43 @@ out:
2550} 2577}
2551EXPORT_SYMBOL(napi_fraginfo_skb); 2578EXPORT_SYMBOL(napi_fraginfo_skb);
2552 2579
2553int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2580int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2554{ 2581{
2555 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 2582 int err = NET_RX_SUCCESS;
2556 int err = NET_RX_DROP;
2557 2583
2558 if (!skb) 2584 switch (ret) {
2559 goto out; 2585 case GRO_NORMAL:
2586 case GRO_HELD:
2587 skb->protocol = eth_type_trans(skb, napi->dev);
2560 2588
2561 err = NET_RX_SUCCESS; 2589 if (ret == GRO_NORMAL)
2590 return netif_receive_skb(skb);
2562 2591
2563 switch (__napi_gro_receive(napi, skb)) { 2592 skb_gro_pull(skb, -ETH_HLEN);
2564 case -1: 2593 break;
2565 return netif_receive_skb(skb);
2566 2594
2567 case 0: 2595 case GRO_DROP:
2568 goto out; 2596 err = NET_RX_DROP;
2569 } 2597 /* fall through */
2570 2598
2571 napi_reuse_skb(napi, skb); 2599 case GRO_MERGED_FREE:
2600 napi_reuse_skb(napi, skb);
2601 break;
2602 }
2572 2603
2573out:
2574 return err; 2604 return err;
2575} 2605}
2606EXPORT_SYMBOL(napi_frags_finish);
2607
2608int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2609{
2610 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2611
2612 if (!skb)
2613 return NET_RX_DROP;
2614
2615 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2616}
2576EXPORT_SYMBOL(napi_gro_frags); 2617EXPORT_SYMBOL(napi_gro_frags);
2577 2618
2578static int process_backlog(struct napi_struct *napi, int quota) 2619static int process_backlog(struct napi_struct *napi, int quota)
@@ -2652,6 +2693,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2652 int (*poll)(struct napi_struct *, int), int weight) 2693 int (*poll)(struct napi_struct *, int), int weight)
2653{ 2694{
2654 INIT_LIST_HEAD(&napi->poll_list); 2695 INIT_LIST_HEAD(&napi->poll_list);
2696 napi->gro_count = 0;
2655 napi->gro_list = NULL; 2697 napi->gro_list = NULL;
2656 napi->skb = NULL; 2698 napi->skb = NULL;
2657 napi->poll = poll; 2699 napi->poll = poll;
@@ -2680,6 +2722,7 @@ void netif_napi_del(struct napi_struct *napi)
2680 } 2722 }
2681 2723
2682 napi->gro_list = NULL; 2724 napi->gro_list = NULL;
2725 napi->gro_count = 0;
2683} 2726}
2684EXPORT_SYMBOL(netif_napi_del); 2727EXPORT_SYMBOL(netif_napi_del);
2685 2728
@@ -5185,6 +5228,7 @@ static int __init net_dev_init(void)
5185 queue->backlog.poll = process_backlog; 5228 queue->backlog.poll = process_backlog;
5186 queue->backlog.weight = weight_p; 5229 queue->backlog.weight = weight_p;
5187 queue->backlog.gro_list = NULL; 5230 queue->backlog.gro_list = NULL;
5231 queue->backlog.gro_count = 0;
5188 } 5232 }
5189 5233
5190 dev_boot_phase = 0; 5234 dev_boot_phase = 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da74b844f4e..ab7d2e9f02f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -123,6 +123,7 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
123 skb->dev ? skb->dev->name : "<NULL>"); 123 skb->dev ? skb->dev->name : "<NULL>");
124 BUG(); 124 BUG();
125} 125}
126EXPORT_SYMBOL(skb_over_panic);
126 127
127/** 128/**
128 * skb_under_panic - private function 129 * skb_under_panic - private function
@@ -142,6 +143,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
142 skb->dev ? skb->dev->name : "<NULL>"); 143 skb->dev ? skb->dev->name : "<NULL>");
143 BUG(); 144 BUG();
144} 145}
146EXPORT_SYMBOL(skb_under_panic);
145 147
146void skb_truesize_bug(struct sk_buff *skb) 148void skb_truesize_bug(struct sk_buff *skb)
147{ 149{
@@ -231,6 +233,7 @@ nodata:
231 skb = NULL; 233 skb = NULL;
232 goto out; 234 goto out;
233} 235}
236EXPORT_SYMBOL(__alloc_skb);
234 237
235/** 238/**
236 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 239 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -258,6 +261,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
258 } 261 }
259 return skb; 262 return skb;
260} 263}
264EXPORT_SYMBOL(__netdev_alloc_skb);
261 265
262struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 266struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
263{ 267{
@@ -426,6 +430,7 @@ void __kfree_skb(struct sk_buff *skb)
426 skb_release_all(skb); 430 skb_release_all(skb);
427 kfree_skbmem(skb); 431 kfree_skbmem(skb);
428} 432}
433EXPORT_SYMBOL(__kfree_skb);
429 434
430/** 435/**
431 * kfree_skb - free an sk_buff 436 * kfree_skb - free an sk_buff
@@ -444,6 +449,7 @@ void kfree_skb(struct sk_buff *skb)
444 return; 449 return;
445 __kfree_skb(skb); 450 __kfree_skb(skb);
446} 451}
452EXPORT_SYMBOL(kfree_skb);
447 453
448/** 454/**
449 * skb_recycle_check - check if skb can be reused for receive 455 * skb_recycle_check - check if skb can be reused for receive
@@ -613,6 +619,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
613 619
614 return __skb_clone(n, skb); 620 return __skb_clone(n, skb);
615} 621}
622EXPORT_SYMBOL(skb_clone);
616 623
617static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 624static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
618{ 625{
@@ -679,7 +686,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
679 copy_skb_header(n, skb); 686 copy_skb_header(n, skb);
680 return n; 687 return n;
681} 688}
682 689EXPORT_SYMBOL(skb_copy);
683 690
684/** 691/**
685 * pskb_copy - create copy of an sk_buff with private head. 692 * pskb_copy - create copy of an sk_buff with private head.
@@ -738,6 +745,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
738out: 745out:
739 return n; 746 return n;
740} 747}
748EXPORT_SYMBOL(pskb_copy);
741 749
742/** 750/**
743 * pskb_expand_head - reallocate header of &sk_buff 751 * pskb_expand_head - reallocate header of &sk_buff
@@ -821,6 +829,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
821nodata: 829nodata:
822 return -ENOMEM; 830 return -ENOMEM;
823} 831}
832EXPORT_SYMBOL(pskb_expand_head);
824 833
825/* Make private copy of skb with writable head and some headroom */ 834/* Make private copy of skb with writable head and some headroom */
826 835
@@ -841,7 +850,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
841 } 850 }
842 return skb2; 851 return skb2;
843} 852}
844 853EXPORT_SYMBOL(skb_realloc_headroom);
845 854
846/** 855/**
847 * skb_copy_expand - copy and expand sk_buff 856 * skb_copy_expand - copy and expand sk_buff
@@ -906,6 +915,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
906 915
907 return n; 916 return n;
908} 917}
918EXPORT_SYMBOL(skb_copy_expand);
909 919
910/** 920/**
911 * skb_pad - zero pad the tail of an skb 921 * skb_pad - zero pad the tail of an skb
@@ -951,6 +961,7 @@ free_skb:
951 kfree_skb(skb); 961 kfree_skb(skb);
952 return err; 962 return err;
953} 963}
964EXPORT_SYMBOL(skb_pad);
954 965
955/** 966/**
956 * skb_put - add data to a buffer 967 * skb_put - add data to a buffer
@@ -1108,6 +1119,7 @@ done:
1108 1119
1109 return 0; 1120 return 0;
1110} 1121}
1122EXPORT_SYMBOL(___pskb_trim);
1111 1123
1112/** 1124/**
1113 * __pskb_pull_tail - advance tail of skb header 1125 * __pskb_pull_tail - advance tail of skb header
@@ -1246,6 +1258,7 @@ pull_pages:
1246 1258
1247 return skb_tail_pointer(skb); 1259 return skb_tail_pointer(skb);
1248} 1260}
1261EXPORT_SYMBOL(__pskb_pull_tail);
1249 1262
1250/* Copy some data bits from skb to kernel buffer. */ 1263/* Copy some data bits from skb to kernel buffer. */
1251 1264
@@ -1323,6 +1336,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1323fault: 1336fault:
1324 return -EFAULT; 1337 return -EFAULT;
1325} 1338}
1339EXPORT_SYMBOL(skb_copy_bits);
1326 1340
1327/* 1341/*
1328 * Callback from splice_to_pipe(), if we need to release some pages 1342 * Callback from splice_to_pipe(), if we need to release some pages
@@ -1333,14 +1347,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1333 put_page(spd->pages[i]); 1347 put_page(spd->pages[i]);
1334} 1348}
1335 1349
1336static inline struct page *linear_to_page(struct page *page, unsigned int len, 1350static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1337 unsigned int offset) 1351 unsigned int *offset,
1352 struct sk_buff *skb)
1338{ 1353{
1339 struct page *p = alloc_pages(GFP_KERNEL, 0); 1354 struct sock *sk = skb->sk;
1355 struct page *p = sk->sk_sndmsg_page;
1356 unsigned int off;
1340 1357
1341 if (!p) 1358 if (!p) {
1342 return NULL; 1359new_page:
1343 memcpy(page_address(p) + offset, page_address(page) + offset, len); 1360 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1361 if (!p)
1362 return NULL;
1363
1364 off = sk->sk_sndmsg_off = 0;
1365 /* hold one ref to this page until it's full */
1366 } else {
1367 unsigned int mlen;
1368
1369 off = sk->sk_sndmsg_off;
1370 mlen = PAGE_SIZE - off;
1371 if (mlen < 64 && mlen < *len) {
1372 put_page(p);
1373 goto new_page;
1374 }
1375
1376 *len = min_t(unsigned int, *len, mlen);
1377 }
1378
1379 memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1380 sk->sk_sndmsg_off += *len;
1381 *offset = off;
1382 get_page(p);
1344 1383
1345 return p; 1384 return p;
1346} 1385}
@@ -1349,21 +1388,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len,
1349 * Fill page/offset/length into spd, if it can hold more pages. 1388 * Fill page/offset/length into spd, if it can hold more pages.
1350 */ 1389 */
1351static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1390static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1352 unsigned int len, unsigned int offset, 1391 unsigned int *len, unsigned int offset,
1353 struct sk_buff *skb, int linear) 1392 struct sk_buff *skb, int linear)
1354{ 1393{
1355 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1394 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1356 return 1; 1395 return 1;
1357 1396
1358 if (linear) { 1397 if (linear) {
1359 page = linear_to_page(page, len, offset); 1398 page = linear_to_page(page, len, &offset, skb);
1360 if (!page) 1399 if (!page)
1361 return 1; 1400 return 1;
1362 } else 1401 } else
1363 get_page(page); 1402 get_page(page);
1364 1403
1365 spd->pages[spd->nr_pages] = page; 1404 spd->pages[spd->nr_pages] = page;
1366 spd->partial[spd->nr_pages].len = len; 1405 spd->partial[spd->nr_pages].len = *len;
1367 spd->partial[spd->nr_pages].offset = offset; 1406 spd->partial[spd->nr_pages].offset = offset;
1368 spd->nr_pages++; 1407 spd->nr_pages++;
1369 1408
@@ -1373,8 +1412,13 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1373static inline void __segment_seek(struct page **page, unsigned int *poff, 1412static inline void __segment_seek(struct page **page, unsigned int *poff,
1374 unsigned int *plen, unsigned int off) 1413 unsigned int *plen, unsigned int off)
1375{ 1414{
1415 unsigned long n;
1416
1376 *poff += off; 1417 *poff += off;
1377 *page += *poff / PAGE_SIZE; 1418 n = *poff / PAGE_SIZE;
1419 if (n)
1420 *page = nth_page(*page, n);
1421
1378 *poff = *poff % PAGE_SIZE; 1422 *poff = *poff % PAGE_SIZE;
1379 *plen -= off; 1423 *plen -= off;
1380} 1424}
@@ -1405,7 +1449,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1405 /* the linear region may spread across several pages */ 1449 /* the linear region may spread across several pages */
1406 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1450 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1407 1451
1408 if (spd_fill_page(spd, page, flen, poff, skb, linear)) 1452 if (spd_fill_page(spd, page, &flen, poff, skb, linear))
1409 return 1; 1453 return 1;
1410 1454
1411 __segment_seek(&page, &poff, &plen, flen); 1455 __segment_seek(&page, &poff, &plen, flen);
@@ -1598,7 +1642,6 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1598fault: 1642fault:
1599 return -EFAULT; 1643 return -EFAULT;
1600} 1644}
1601
1602EXPORT_SYMBOL(skb_store_bits); 1645EXPORT_SYMBOL(skb_store_bits);
1603 1646
1604/* Checksum skb data. */ 1647/* Checksum skb data. */
@@ -1675,6 +1718,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1675 1718
1676 return csum; 1719 return csum;
1677} 1720}
1721EXPORT_SYMBOL(skb_checksum);
1678 1722
1679/* Both of above in one bottle. */ 1723/* Both of above in one bottle. */
1680 1724
@@ -1756,6 +1800,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1756 BUG_ON(len); 1800 BUG_ON(len);
1757 return csum; 1801 return csum;
1758} 1802}
1803EXPORT_SYMBOL(skb_copy_and_csum_bits);
1759 1804
1760void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1805void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1761{ 1806{
@@ -1782,6 +1827,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1782 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1827 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1783 } 1828 }
1784} 1829}
1830EXPORT_SYMBOL(skb_copy_and_csum_dev);
1785 1831
1786/** 1832/**
1787 * skb_dequeue - remove from the head of the queue 1833 * skb_dequeue - remove from the head of the queue
@@ -1802,6 +1848,7 @@ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1802 spin_unlock_irqrestore(&list->lock, flags); 1848 spin_unlock_irqrestore(&list->lock, flags);
1803 return result; 1849 return result;
1804} 1850}
1851EXPORT_SYMBOL(skb_dequeue);
1805 1852
1806/** 1853/**
1807 * skb_dequeue_tail - remove from the tail of the queue 1854 * skb_dequeue_tail - remove from the tail of the queue
@@ -1821,6 +1868,7 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1821 spin_unlock_irqrestore(&list->lock, flags); 1868 spin_unlock_irqrestore(&list->lock, flags);
1822 return result; 1869 return result;
1823} 1870}
1871EXPORT_SYMBOL(skb_dequeue_tail);
1824 1872
1825/** 1873/**
1826 * skb_queue_purge - empty a list 1874 * skb_queue_purge - empty a list
@@ -1836,6 +1884,7 @@ void skb_queue_purge(struct sk_buff_head *list)
1836 while ((skb = skb_dequeue(list)) != NULL) 1884 while ((skb = skb_dequeue(list)) != NULL)
1837 kfree_skb(skb); 1885 kfree_skb(skb);
1838} 1886}
1887EXPORT_SYMBOL(skb_queue_purge);
1839 1888
1840/** 1889/**
1841 * skb_queue_head - queue a buffer at the list head 1890 * skb_queue_head - queue a buffer at the list head
@@ -1856,6 +1905,7 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1856 __skb_queue_head(list, newsk); 1905 __skb_queue_head(list, newsk);
1857 spin_unlock_irqrestore(&list->lock, flags); 1906 spin_unlock_irqrestore(&list->lock, flags);
1858} 1907}
1908EXPORT_SYMBOL(skb_queue_head);
1859 1909
1860/** 1910/**
1861 * skb_queue_tail - queue a buffer at the list tail 1911 * skb_queue_tail - queue a buffer at the list tail
@@ -1876,6 +1926,7 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1876 __skb_queue_tail(list, newsk); 1926 __skb_queue_tail(list, newsk);
1877 spin_unlock_irqrestore(&list->lock, flags); 1927 spin_unlock_irqrestore(&list->lock, flags);
1878} 1928}
1929EXPORT_SYMBOL(skb_queue_tail);
1879 1930
1880/** 1931/**
1881 * skb_unlink - remove a buffer from a list 1932 * skb_unlink - remove a buffer from a list
@@ -1895,6 +1946,7 @@ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1895 __skb_unlink(skb, list); 1946 __skb_unlink(skb, list);
1896 spin_unlock_irqrestore(&list->lock, flags); 1947 spin_unlock_irqrestore(&list->lock, flags);
1897} 1948}
1949EXPORT_SYMBOL(skb_unlink);
1898 1950
1899/** 1951/**
1900 * skb_append - append a buffer 1952 * skb_append - append a buffer
@@ -1914,7 +1966,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
1914 __skb_queue_after(list, old, newsk); 1966 __skb_queue_after(list, old, newsk);
1915 spin_unlock_irqrestore(&list->lock, flags); 1967 spin_unlock_irqrestore(&list->lock, flags);
1916} 1968}
1917 1969EXPORT_SYMBOL(skb_append);
1918 1970
1919/** 1971/**
1920 * skb_insert - insert a buffer 1972 * skb_insert - insert a buffer
@@ -1936,6 +1988,7 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
1936 __skb_insert(newsk, old->prev, old, list); 1988 __skb_insert(newsk, old->prev, old, list);
1937 spin_unlock_irqrestore(&list->lock, flags); 1989 spin_unlock_irqrestore(&list->lock, flags);
1938} 1990}
1991EXPORT_SYMBOL(skb_insert);
1939 1992
1940static inline void skb_split_inside_header(struct sk_buff *skb, 1993static inline void skb_split_inside_header(struct sk_buff *skb,
1941 struct sk_buff* skb1, 1994 struct sk_buff* skb1,
@@ -2014,6 +2067,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2014 else /* Second chunk has no header, nothing to copy. */ 2067 else /* Second chunk has no header, nothing to copy. */
2015 skb_split_no_header(skb, skb1, len, pos); 2068 skb_split_no_header(skb, skb1, len, pos);
2016} 2069}
2070EXPORT_SYMBOL(skb_split);
2017 2071
2018/* Shifting from/to a cloned skb is a no-go. 2072/* Shifting from/to a cloned skb is a no-go.
2019 * 2073 *
@@ -2176,6 +2230,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2176 st->frag_idx = st->stepped_offset = 0; 2230 st->frag_idx = st->stepped_offset = 0;
2177 st->frag_data = NULL; 2231 st->frag_data = NULL;
2178} 2232}
2233EXPORT_SYMBOL(skb_prepare_seq_read);
2179 2234
2180/** 2235/**
2181 * skb_seq_read - Sequentially read skb data 2236 * skb_seq_read - Sequentially read skb data
@@ -2263,6 +2318,7 @@ next_skb:
2263 2318
2264 return 0; 2319 return 0;
2265} 2320}
2321EXPORT_SYMBOL(skb_seq_read);
2266 2322
2267/** 2323/**
2268 * skb_abort_seq_read - Abort a sequential read of skb data 2324 * skb_abort_seq_read - Abort a sequential read of skb data
@@ -2276,6 +2332,7 @@ void skb_abort_seq_read(struct skb_seq_state *st)
2276 if (st->frag_data) 2332 if (st->frag_data)
2277 kunmap_skb_frag(st->frag_data); 2333 kunmap_skb_frag(st->frag_data);
2278} 2334}
2335EXPORT_SYMBOL(skb_abort_seq_read);
2279 2336
2280#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2337#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2281 2338
@@ -2318,6 +2375,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2318 ret = textsearch_find(config, state); 2375 ret = textsearch_find(config, state);
2319 return (ret <= to - from ? ret : UINT_MAX); 2376 return (ret <= to - from ? ret : UINT_MAX);
2320} 2377}
2378EXPORT_SYMBOL(skb_find_text);
2321 2379
2322/** 2380/**
2323 * skb_append_datato_frags: - append the user data to a skb 2381 * skb_append_datato_frags: - append the user data to a skb
@@ -2390,6 +2448,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2390 2448
2391 return 0; 2449 return 0;
2392} 2450}
2451EXPORT_SYMBOL(skb_append_datato_frags);
2393 2452
2394/** 2453/**
2395 * skb_pull_rcsum - pull skb and update receive checksum 2454 * skb_pull_rcsum - pull skb and update receive checksum
@@ -2577,7 +2636,6 @@ err:
2577 } 2636 }
2578 return ERR_PTR(err); 2637 return ERR_PTR(err);
2579} 2638}
2580
2581EXPORT_SYMBOL_GPL(skb_segment); 2639EXPORT_SYMBOL_GPL(skb_segment);
2582 2640
2583int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2641int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
@@ -2585,17 +2643,23 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2585 struct sk_buff *p = *head; 2643 struct sk_buff *p = *head;
2586 struct sk_buff *nskb; 2644 struct sk_buff *nskb;
2587 unsigned int headroom; 2645 unsigned int headroom;
2588 unsigned int hlen = p->data - skb_mac_header(p); 2646 unsigned int len = skb_gro_len(skb);
2589 unsigned int len = skb->len;
2590 2647
2591 if (hlen + p->len + len >= 65536) 2648 if (p->len + len >= 65536)
2592 return -E2BIG; 2649 return -E2BIG;
2593 2650
2594 if (skb_shinfo(p)->frag_list) 2651 if (skb_shinfo(p)->frag_list)
2595 goto merge; 2652 goto merge;
2596 else if (!skb_headlen(p) && !skb_headlen(skb) && 2653 else if (skb_headlen(skb) <= skb_gro_offset(skb)) {
2597 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < 2654 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags >
2598 MAX_SKB_FRAGS) { 2655 MAX_SKB_FRAGS)
2656 return -E2BIG;
2657
2658 skb_shinfo(skb)->frags[0].page_offset +=
2659 skb_gro_offset(skb) - skb_headlen(skb);
2660 skb_shinfo(skb)->frags[0].size -=
2661 skb_gro_offset(skb) - skb_headlen(skb);
2662
2599 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2663 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2600 skb_shinfo(skb)->frags, 2664 skb_shinfo(skb)->frags,
2601 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2665 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -2612,7 +2676,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2612 } 2676 }
2613 2677
2614 headroom = skb_headroom(p); 2678 headroom = skb_headroom(p);
2615 nskb = netdev_alloc_skb(p->dev, headroom); 2679 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2616 if (unlikely(!nskb)) 2680 if (unlikely(!nskb))
2617 return -ENOMEM; 2681 return -ENOMEM;
2618 2682
@@ -2620,12 +2684,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2620 nskb->mac_len = p->mac_len; 2684 nskb->mac_len = p->mac_len;
2621 2685
2622 skb_reserve(nskb, headroom); 2686 skb_reserve(nskb, headroom);
2687 __skb_put(nskb, skb_gro_offset(p));
2623 2688
2624 skb_set_mac_header(nskb, -hlen); 2689 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2625 skb_set_network_header(nskb, skb_network_offset(p)); 2690 skb_set_network_header(nskb, skb_network_offset(p));
2626 skb_set_transport_header(nskb, skb_transport_offset(p)); 2691 skb_set_transport_header(nskb, skb_transport_offset(p));
2627 2692
2628 memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen); 2693 __skb_pull(p, skb_gro_offset(p));
2694 memcpy(skb_mac_header(nskb), skb_mac_header(p),
2695 p->data - skb_mac_header(p));
2629 2696
2630 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2697 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2631 skb_shinfo(nskb)->frag_list = p; 2698 skb_shinfo(nskb)->frag_list = p;
@@ -2644,6 +2711,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2644 p = nskb; 2711 p = nskb;
2645 2712
2646merge: 2713merge:
2714 if (skb_gro_offset(skb) > skb_headlen(skb)) {
2715 skb_shinfo(skb)->frags[0].page_offset +=
2716 skb_gro_offset(skb) - skb_headlen(skb);
2717 skb_shinfo(skb)->frags[0].size -=
2718 skb_gro_offset(skb) - skb_headlen(skb);
2719 skb_gro_reset_offset(skb);
2720 skb_gro_pull(skb, skb_headlen(skb));
2721 }
2722
2723 __skb_pull(skb, skb_gro_offset(skb));
2724
2647 p->prev->next = skb; 2725 p->prev->next = skb;
2648 p->prev = skb; 2726 p->prev = skb;
2649 skb_header_release(skb); 2727 skb_header_release(skb);
@@ -2755,6 +2833,7 @@ int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int le
2755 2833
2756 return nsg; 2834 return nsg;
2757} 2835}
2836EXPORT_SYMBOL_GPL(skb_to_sgvec);
2758 2837
2759/** 2838/**
2760 * skb_cow_data - Check that a socket buffer's data buffers are writable 2839 * skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -2864,6 +2943,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2864 2943
2865 return elt; 2944 return elt;
2866} 2945}
2946EXPORT_SYMBOL_GPL(skb_cow_data);
2867 2947
2868/** 2948/**
2869 * skb_partial_csum_set - set up and verify partial csum values for packet 2949 * skb_partial_csum_set - set up and verify partial csum values for packet
@@ -2892,6 +2972,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2892 skb->csum_offset = off; 2972 skb->csum_offset = off;
2893 return true; 2973 return true;
2894} 2974}
2975EXPORT_SYMBOL_GPL(skb_partial_csum_set);
2895 2976
2896void __skb_warn_lro_forwarding(const struct sk_buff *skb) 2977void __skb_warn_lro_forwarding(const struct sk_buff *skb)
2897{ 2978{
@@ -2899,42 +2980,4 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
2899 pr_warning("%s: received packets cannot be forwarded" 2980 pr_warning("%s: received packets cannot be forwarded"
2900 " while LRO is enabled\n", skb->dev->name); 2981 " while LRO is enabled\n", skb->dev->name);
2901} 2982}
2902
2903EXPORT_SYMBOL(___pskb_trim);
2904EXPORT_SYMBOL(__kfree_skb);
2905EXPORT_SYMBOL(kfree_skb);
2906EXPORT_SYMBOL(__pskb_pull_tail);
2907EXPORT_SYMBOL(__alloc_skb);
2908EXPORT_SYMBOL(__netdev_alloc_skb);
2909EXPORT_SYMBOL(pskb_copy);
2910EXPORT_SYMBOL(pskb_expand_head);
2911EXPORT_SYMBOL(skb_checksum);
2912EXPORT_SYMBOL(skb_clone);
2913EXPORT_SYMBOL(skb_copy);
2914EXPORT_SYMBOL(skb_copy_and_csum_bits);
2915EXPORT_SYMBOL(skb_copy_and_csum_dev);
2916EXPORT_SYMBOL(skb_copy_bits);
2917EXPORT_SYMBOL(skb_copy_expand);
2918EXPORT_SYMBOL(skb_over_panic);
2919EXPORT_SYMBOL(skb_pad);
2920EXPORT_SYMBOL(skb_realloc_headroom);
2921EXPORT_SYMBOL(skb_under_panic);
2922EXPORT_SYMBOL(skb_dequeue);
2923EXPORT_SYMBOL(skb_dequeue_tail);
2924EXPORT_SYMBOL(skb_insert);
2925EXPORT_SYMBOL(skb_queue_purge);
2926EXPORT_SYMBOL(skb_queue_head);
2927EXPORT_SYMBOL(skb_queue_tail);
2928EXPORT_SYMBOL(skb_unlink);
2929EXPORT_SYMBOL(skb_append);
2930EXPORT_SYMBOL(skb_split);
2931EXPORT_SYMBOL(skb_prepare_seq_read);
2932EXPORT_SYMBOL(skb_seq_read);
2933EXPORT_SYMBOL(skb_abort_seq_read);
2934EXPORT_SYMBOL(skb_find_text);
2935EXPORT_SYMBOL(skb_append_datato_frags);
2936EXPORT_SYMBOL(__skb_warn_lro_forwarding); 2983EXPORT_SYMBOL(__skb_warn_lro_forwarding);
2937
2938EXPORT_SYMBOL_GPL(skb_to_sgvec);
2939EXPORT_SYMBOL_GPL(skb_cow_data);
2940EXPORT_SYMBOL_GPL(skb_partial_csum_set);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6f2e1337975..4c64be4f876 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1256,10 +1256,9 @@ static long sock_wait_for_wmem(struct sock * sk, long timeo)
1256 * Generic send/receive buffer handlers 1256 * Generic send/receive buffer handlers
1257 */ 1257 */
1258 1258
1259static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 1259struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1260 unsigned long header_len, 1260 unsigned long data_len, int noblock,
1261 unsigned long data_len, 1261 int *errcode)
1262 int noblock, int *errcode)
1263{ 1262{
1264 struct sk_buff *skb; 1263 struct sk_buff *skb;
1265 gfp_t gfp_mask; 1264 gfp_t gfp_mask;
@@ -1339,6 +1338,7 @@ failure:
1339 *errcode = err; 1338 *errcode = err;
1340 return NULL; 1339 return NULL;
1341} 1340}
1341EXPORT_SYMBOL(sock_alloc_send_pskb);
1342 1342
1343struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1343struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1344 int noblock, int *errcode) 1344 int noblock, int *errcode)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index f2230fc168e..08a569ff02d 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -42,9 +42,11 @@
42extern int dccp_debug; 42extern int dccp_debug;
43#define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) 43#define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a)
44#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) 44#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
45#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
45#else 46#else
46#define dccp_pr_debug(format, a...) 47#define dccp_pr_debug(format, a...)
47#define dccp_pr_debug_cat(format, a...) 48#define dccp_pr_debug_cat(format, a...)
49#define dccp_debug(format, a...)
48#endif 50#endif
49 51
50extern struct inet_hashinfo dccp_hashinfo; 52extern struct inet_hashinfo dccp_hashinfo;
@@ -95,9 +97,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
95extern int sysctl_dccp_request_retries; 97extern int sysctl_dccp_request_retries;
96extern int sysctl_dccp_retries1; 98extern int sysctl_dccp_retries1;
97extern int sysctl_dccp_retries2; 99extern int sysctl_dccp_retries2;
98extern int sysctl_dccp_feat_sequence_window;
99extern int sysctl_dccp_feat_rx_ccid;
100extern int sysctl_dccp_feat_tx_ccid;
101extern int sysctl_dccp_tx_qlen; 100extern int sysctl_dccp_tx_qlen;
102extern int sysctl_dccp_sync_ratelimit; 101extern int sysctl_dccp_sync_ratelimit;
103 102
@@ -409,23 +408,21 @@ static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack,
409static inline void dccp_update_gsr(struct sock *sk, u64 seq) 408static inline void dccp_update_gsr(struct sock *sk, u64 seq)
410{ 409{
411 struct dccp_sock *dp = dccp_sk(sk); 410 struct dccp_sock *dp = dccp_sk(sk);
412 const struct dccp_minisock *dmsk = dccp_msk(sk);
413 411
414 dp->dccps_gsr = seq; 412 dp->dccps_gsr = seq;
415 dccp_set_seqno(&dp->dccps_swl, 413 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
416 dp->dccps_gsr + 1 - (dmsk->dccpms_sequence_window / 4)); 414 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
417 dccp_set_seqno(&dp->dccps_swh, 415 dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4);
418 dp->dccps_gsr + (3 * dmsk->dccpms_sequence_window) / 4);
419} 416}
420 417
421static inline void dccp_update_gss(struct sock *sk, u64 seq) 418static inline void dccp_update_gss(struct sock *sk, u64 seq)
422{ 419{
423 struct dccp_sock *dp = dccp_sk(sk); 420 struct dccp_sock *dp = dccp_sk(sk);
424 421
425 dp->dccps_awh = dp->dccps_gss = seq; 422 dp->dccps_gss = seq;
426 dccp_set_seqno(&dp->dccps_awl, 423 /* Ack validity window depends on local Sequence Window value (7.5.1) */
427 (dp->dccps_gss - 424 dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win);
428 dccp_msk(sk)->dccpms_sequence_window + 1)); 425 dp->dccps_awh = dp->dccps_gss;
429} 426}
430 427
431static inline int dccp_ack_pending(const struct sock *sk) 428static inline int dccp_ack_pending(const struct sock *sk)
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 4152308958a..b04160a2eea 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -25,6 +25,11 @@
25#include "ccid.h" 25#include "ccid.h"
26#include "feat.h" 26#include "feat.h"
27 27
28/* feature-specific sysctls - initialised to the defaults from RFC 4340, 6.4 */
29unsigned long sysctl_dccp_sequence_window __read_mostly = 100;
30int sysctl_dccp_rx_ccid __read_mostly = 2,
31 sysctl_dccp_tx_ccid __read_mostly = 2;
32
28/* 33/*
29 * Feature activation handlers. 34 * Feature activation handlers.
30 * 35 *
@@ -51,8 +56,17 @@ static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx)
51 56
52static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx) 57static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx)
53{ 58{
54 if (!rx) 59 struct dccp_sock *dp = dccp_sk(sk);
55 dccp_msk(sk)->dccpms_sequence_window = seq_win; 60
61 if (rx) {
62 dp->dccps_r_seq_win = seq_win;
63 /* propagate changes to update SWL/SWH */
64 dccp_update_gsr(sk, dp->dccps_gsr);
65 } else {
66 dp->dccps_l_seq_win = seq_win;
67 /* propagate changes to update AWL */
68 dccp_update_gss(sk, dp->dccps_gss);
69 }
56 return 0; 70 return 0;
57} 71}
58 72
@@ -194,6 +208,100 @@ static int dccp_feat_default_value(u8 feat_num)
194 return idx < 0 ? 0 : dccp_feat_table[idx].default_value; 208 return idx < 0 ? 0 : dccp_feat_table[idx].default_value;
195} 209}
196 210
211/*
212 * Debugging and verbose-printing section
213 */
214static const char *dccp_feat_fname(const u8 feat)
215{
216 static const char *feature_names[] = {
217 [DCCPF_RESERVED] = "Reserved",
218 [DCCPF_CCID] = "CCID",
219 [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos",
220 [DCCPF_SEQUENCE_WINDOW] = "Sequence Window",
221 [DCCPF_ECN_INCAPABLE] = "ECN Incapable",
222 [DCCPF_ACK_RATIO] = "Ack Ratio",
223 [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector",
224 [DCCPF_SEND_NDP_COUNT] = "Send NDP Count",
225 [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage",
226 [DCCPF_DATA_CHECKSUM] = "Send Data Checksum",
227 };
228 if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC)
229 return feature_names[DCCPF_RESERVED];
230
231 if (feat == DCCPF_SEND_LEV_RATE)
232 return "Send Loss Event Rate";
233 if (feat >= DCCPF_MIN_CCID_SPECIFIC)
234 return "CCID-specific";
235
236 return feature_names[feat];
237}
238
239static const char *dccp_feat_sname[] = { "DEFAULT", "INITIALISING", "CHANGING",
240 "UNSTABLE", "STABLE" };
241
242#ifdef CONFIG_IP_DCCP_DEBUG
243static const char *dccp_feat_oname(const u8 opt)
244{
245 switch (opt) {
246 case DCCPO_CHANGE_L: return "Change_L";
247 case DCCPO_CONFIRM_L: return "Confirm_L";
248 case DCCPO_CHANGE_R: return "Change_R";
249 case DCCPO_CONFIRM_R: return "Confirm_R";
250 }
251 return NULL;
252}
253
254static void dccp_feat_printval(u8 feat_num, dccp_feat_val const *val)
255{
256 u8 i, type = dccp_feat_type(feat_num);
257
258 if (val == NULL || (type == FEAT_SP && val->sp.vec == NULL))
259 dccp_pr_debug_cat("(NULL)");
260 else if (type == FEAT_SP)
261 for (i = 0; i < val->sp.len; i++)
262 dccp_pr_debug_cat("%s%u", i ? " " : "", val->sp.vec[i]);
263 else if (type == FEAT_NN)
264 dccp_pr_debug_cat("%llu", (unsigned long long)val->nn);
265 else
266 dccp_pr_debug_cat("unknown type %u", type);
267}
268
269static void dccp_feat_printvals(u8 feat_num, u8 *list, u8 len)
270{
271 u8 type = dccp_feat_type(feat_num);
272 dccp_feat_val fval = { .sp.vec = list, .sp.len = len };
273
274 if (type == FEAT_NN)
275 fval.nn = dccp_decode_value_var(list, len);
276 dccp_feat_printval(feat_num, &fval);
277}
278
279static void dccp_feat_print_entry(struct dccp_feat_entry const *entry)
280{
281 dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote",
282 dccp_feat_fname(entry->feat_num));
283 dccp_feat_printval(entry->feat_num, &entry->val);
284 dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state],
285 entry->needs_confirm ? "(Confirm pending)" : "");
286}
287
288#define dccp_feat_print_opt(opt, feat, val, len, mandatory) do { \
289 dccp_pr_debug("%s(%s, ", dccp_feat_oname(opt), dccp_feat_fname(feat));\
290 dccp_feat_printvals(feat, val, len); \
291 dccp_pr_debug_cat(") %s\n", mandatory ? "!" : ""); } while (0)
292
293#define dccp_feat_print_fnlist(fn_list) { \
294 const struct dccp_feat_entry *___entry; \
295 \
296 dccp_pr_debug("List Dump:\n"); \
297 list_for_each_entry(___entry, fn_list, node) \
298 dccp_feat_print_entry(___entry); \
299}
300#else /* ! CONFIG_IP_DCCP_DEBUG */
301#define dccp_feat_print_opt(opt, feat, val, len, mandatory)
302#define dccp_feat_print_fnlist(fn_list)
303#endif
304
197static int __dccp_feat_activate(struct sock *sk, const int idx, 305static int __dccp_feat_activate(struct sock *sk, const int idx,
198 const bool is_local, dccp_feat_val const *fval) 306 const bool is_local, dccp_feat_val const *fval)
199{ 307{
@@ -226,6 +334,10 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
226 /* Location is RX if this is a local-RX or remote-TX feature */ 334 /* Location is RX if this is a local-RX or remote-TX feature */
227 rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX)); 335 rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX));
228 336
337 dccp_debug(" -> activating %s %s, %sval=%llu\n", rx ? "RX" : "TX",
338 dccp_feat_fname(dccp_feat_table[idx].feat_num),
339 fval ? "" : "default ", (unsigned long long)val);
340
229 return dccp_feat_table[idx].activation_hdlr(sk, val, rx); 341 return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
230} 342}
231 343
@@ -530,6 +642,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
530 return -1; 642 return -1;
531 } 643 }
532 } 644 }
645 dccp_feat_print_opt(opt, pos->feat_num, ptr, len, 0);
533 646
534 if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt)) 647 if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt))
535 return -1; 648 return -1;
@@ -783,6 +896,7 @@ int dccp_feat_finalise_settings(struct dccp_sock *dp)
783 while (i--) 896 while (i--)
784 if (ccids[i] > 0 && dccp_feat_propagate_ccid(fn, ccids[i], i)) 897 if (ccids[i] > 0 && dccp_feat_propagate_ccid(fn, ccids[i], i))
785 return -1; 898 return -1;
899 dccp_feat_print_fnlist(fn);
786 return 0; 900 return 0;
787} 901}
788 902
@@ -901,6 +1015,8 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
901 if (len == 0 || type == FEAT_UNKNOWN) /* 6.1 and 6.6.8 */ 1015 if (len == 0 || type == FEAT_UNKNOWN) /* 6.1 and 6.6.8 */
902 goto unknown_feature_or_value; 1016 goto unknown_feature_or_value;
903 1017
1018 dccp_feat_print_opt(opt, feat, val, len, is_mandatory);
1019
904 /* 1020 /*
905 * Negotiation of NN features: Change R is invalid, so there is no 1021 * Negotiation of NN features: Change R is invalid, so there is no
906 * simultaneous negotiation; hence we do not look up in the list. 1022 * simultaneous negotiation; hence we do not look up in the list.
@@ -1006,6 +1122,8 @@ static u8 dccp_feat_confirm_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
1006 const bool local = (opt == DCCPO_CONFIRM_R); 1122 const bool local = (opt == DCCPO_CONFIRM_R);
1007 struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); 1123 struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local);
1008 1124
1125 dccp_feat_print_opt(opt, feat, val, len, is_mandatory);
1126
1009 if (entry == NULL) { /* nothing queued: ignore or handle error */ 1127 if (entry == NULL) { /* nothing queued: ignore or handle error */
1010 if (is_mandatory && type == FEAT_UNKNOWN) 1128 if (is_mandatory && type == FEAT_UNKNOWN)
1011 return DCCP_RESET_CODE_MANDATORY_ERROR; 1129 return DCCP_RESET_CODE_MANDATORY_ERROR;
@@ -1115,23 +1233,70 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
1115 return 0; /* ignore FN options in all other states */ 1233 return 0; /* ignore FN options in all other states */
1116} 1234}
1117 1235
1236/**
1237 * dccp_feat_init - Seed feature negotiation with host-specific defaults
1238 * This initialises global defaults, depending on the value of the sysctls.
1239 * These can later be overridden by registering changes via setsockopt calls.
1240 * The last link in the chain is finalise_settings, to make sure that between
1241 * here and the start of actual feature negotiation no inconsistencies enter.
1242 *
1243 * All features not appearing below use either defaults or are otherwise
1244 * later adjusted through dccp_feat_finalise_settings().
1245 */
1118int dccp_feat_init(struct sock *sk) 1246int dccp_feat_init(struct sock *sk)
1119{ 1247{
1120 struct dccp_sock *dp = dccp_sk(sk); 1248 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
1121 struct dccp_minisock *dmsk = dccp_msk(sk); 1249 u8 on = 1, off = 0;
1122 int rc; 1250 int rc;
1251 struct {
1252 u8 *val;
1253 u8 len;
1254 } tx, rx;
1255
1256 /* Non-negotiable (NN) features */
1257 rc = __feat_register_nn(fn, DCCPF_SEQUENCE_WINDOW, 0,
1258 sysctl_dccp_sequence_window);
1259 if (rc)
1260 return rc;
1261
1262 /* Server-priority (SP) features */
1263
1264 /* Advertise that short seqnos are not supported (7.6.1) */
1265 rc = __feat_register_sp(fn, DCCPF_SHORT_SEQNOS, true, true, &off, 1);
1266 if (rc)
1267 return rc;
1123 1268
1124 INIT_LIST_HEAD(&dmsk->dccpms_pending); /* XXX no longer used */ 1269 /* RFC 4340 12.1: "If a DCCP is not ECN capable, ..." */
1125 INIT_LIST_HEAD(&dmsk->dccpms_conf); /* XXX no longer used */ 1270 rc = __feat_register_sp(fn, DCCPF_ECN_INCAPABLE, true, true, &on, 1);
1271 if (rc)
1272 return rc;
1273
1274 /*
1275 * We advertise the available list of CCIDs and reorder according to
1276 * preferences, to avoid failure resulting from negotiating different
1277 * singleton values (which always leads to failure).
1278 * These settings can still (later) be overridden via sockopts.
1279 */
1280 if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
1281 ccid_get_builtin_ccids(&rx.val, &rx.len))
1282 return -ENOBUFS;
1283
1284 if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
1285 !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
1286 goto free_ccid_lists;
1287
1288 rc = __feat_register_sp(fn, DCCPF_CCID, true, false, tx.val, tx.len);
1289 if (rc)
1290 goto free_ccid_lists;
1291
1292 rc = __feat_register_sp(fn, DCCPF_CCID, false, false, rx.val, rx.len);
1126 1293
1127 /* Ack ratio */ 1294free_ccid_lists:
1128 rc = __feat_register_nn(&dp->dccps_featneg, DCCPF_ACK_RATIO, 0, 1295 kfree(tx.val);
1129 dp->dccps_l_ack_ratio); 1296 kfree(rx.val);
1130 return rc; 1297 return rc;
1131} 1298}
1132 1299
1133EXPORT_SYMBOL_GPL(dccp_feat_init);
1134
1135int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) 1300int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list)
1136{ 1301{
1137 struct dccp_sock *dp = dccp_sk(sk); 1302 struct dccp_sock *dp = dccp_sk(sk);
@@ -1156,9 +1321,10 @@ int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list)
1156 goto activation_failed; 1321 goto activation_failed;
1157 } 1322 }
1158 if (cur->state != FEAT_STABLE) { 1323 if (cur->state != FEAT_STABLE) {
1159 DCCP_CRIT("Negotiation of %s %u failed in state %u", 1324 DCCP_CRIT("Negotiation of %s %s failed in state %s",
1160 cur->is_local ? "local" : "remote", 1325 cur->is_local ? "local" : "remote",
1161 cur->feat_num, cur->state); 1326 dccp_feat_fname(cur->feat_num),
1327 dccp_feat_sname[cur->state]);
1162 goto activation_failed; 1328 goto activation_failed;
1163 } 1329 }
1164 fvals[idx][cur->is_local] = &cur->val; 1330 fvals[idx][cur->is_local] = &cur->val;
@@ -1199,43 +1365,3 @@ activation_failed:
1199 dp->dccps_hc_rx_ackvec = NULL; 1365 dp->dccps_hc_rx_ackvec = NULL;
1200 return -1; 1366 return -1;
1201} 1367}
1202
1203#ifdef CONFIG_IP_DCCP_DEBUG
1204const char *dccp_feat_typename(const u8 type)
1205{
1206 switch(type) {
1207 case DCCPO_CHANGE_L: return("ChangeL");
1208 case DCCPO_CONFIRM_L: return("ConfirmL");
1209 case DCCPO_CHANGE_R: return("ChangeR");
1210 case DCCPO_CONFIRM_R: return("ConfirmR");
1211 /* the following case must not appear in feature negotation */
1212 default: dccp_pr_debug("unknown type %d [BUG!]\n", type);
1213 }
1214 return NULL;
1215}
1216
1217const char *dccp_feat_name(const u8 feat)
1218{
1219 static const char *feature_names[] = {
1220 [DCCPF_RESERVED] = "Reserved",
1221 [DCCPF_CCID] = "CCID",
1222 [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos",
1223 [DCCPF_SEQUENCE_WINDOW] = "Sequence Window",
1224 [DCCPF_ECN_INCAPABLE] = "ECN Incapable",
1225 [DCCPF_ACK_RATIO] = "Ack Ratio",
1226 [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector",
1227 [DCCPF_SEND_NDP_COUNT] = "Send NDP Count",
1228 [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage",
1229 [DCCPF_DATA_CHECKSUM] = "Send Data Checksum",
1230 };
1231 if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC)
1232 return feature_names[DCCPF_RESERVED];
1233
1234 if (feat == DCCPF_SEND_LEV_RATE)
1235 return "Send Loss Event Rate";
1236 if (feat >= DCCPF_MIN_CCID_SPECIFIC)
1237 return "CCID-specific";
1238
1239 return feature_names[feat];
1240}
1241#endif /* CONFIG_IP_DCCP_DEBUG */
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 9b46e2a7866..f96721619de 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -100,26 +100,21 @@ struct ccid_dependency {
100 u8 val; 100 u8 val;
101}; 101};
102 102
103#ifdef CONFIG_IP_DCCP_DEBUG 103/*
104extern const char *dccp_feat_typename(const u8 type); 104 * Sysctls to seed defaults for feature negotiation
105extern const char *dccp_feat_name(const u8 feat); 105 */
106 106extern unsigned long sysctl_dccp_sequence_window;
107static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val) 107extern int sysctl_dccp_rx_ccid;
108{ 108extern int sysctl_dccp_tx_ccid;
109 dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type),
110 dccp_feat_name(feat), feat, val);
111}
112#else
113#define dccp_feat_debug(type, feat, val)
114#endif /* CONFIG_IP_DCCP_DEBUG */
115 109
110extern int dccp_feat_init(struct sock *sk);
111extern void dccp_feat_initialise_sysctls(void);
116extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local, 112extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
117 u8 const *list, u8 len); 113 u8 const *list, u8 len);
118extern int dccp_feat_register_nn(struct sock *sk, u8 feat, u64 val); 114extern int dccp_feat_register_nn(struct sock *sk, u8 feat, u64 val);
119extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *, 115extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
120 u8 mand, u8 opt, u8 feat, u8 *val, u8 len); 116 u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
121extern int dccp_feat_clone_list(struct list_head const *, struct list_head *); 117extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
122extern int dccp_feat_init(struct sock *sk);
123 118
124/* 119/*
125 * Encoding variable-length options and their maximum length. 120 * Encoding variable-length options and their maximum length.
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 6821ae33dd3..5ca49cec95f 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -42,11 +42,6 @@ struct inet_timewait_death_row dccp_death_row = {
42 42
43EXPORT_SYMBOL_GPL(dccp_death_row); 43EXPORT_SYMBOL_GPL(dccp_death_row);
44 44
45void dccp_minisock_init(struct dccp_minisock *dmsk)
46{
47 dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window;
48}
49
50void dccp_time_wait(struct sock *sk, int state, int timeo) 45void dccp_time_wait(struct sock *sk, int state, int timeo)
51{ 46{
52 struct inet_timewait_sock *tw = NULL; 47 struct inet_timewait_sock *tw = NULL;
@@ -110,7 +105,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
110 struct dccp_request_sock *dreq = dccp_rsk(req); 105 struct dccp_request_sock *dreq = dccp_rsk(req);
111 struct inet_connection_sock *newicsk = inet_csk(newsk); 106 struct inet_connection_sock *newicsk = inet_csk(newsk);
112 struct dccp_sock *newdp = dccp_sk(newsk); 107 struct dccp_sock *newdp = dccp_sk(newsk);
113 struct dccp_minisock *newdmsk = dccp_msk(newsk);
114 108
115 newdp->dccps_role = DCCP_ROLE_SERVER; 109 newdp->dccps_role = DCCP_ROLE_SERVER;
116 newdp->dccps_hc_rx_ackvec = NULL; 110 newdp->dccps_hc_rx_ackvec = NULL;
@@ -128,10 +122,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
128 * Initialize S.GAR := S.ISS 122 * Initialize S.GAR := S.ISS
129 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies 123 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
130 */ 124 */
131
132 /* See dccp_v4_conn_request */
133 newdmsk->dccpms_sequence_window = req->rcv_wnd;
134
135 newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; 125 newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss;
136 dccp_update_gss(newsk, dreq->dreq_iss); 126 dccp_update_gss(newsk, dreq->dreq_iss);
137 127
@@ -290,7 +280,6 @@ int dccp_reqsk_init(struct request_sock *req,
290 inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; 280 inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
291 inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport; 281 inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport;
292 inet_rsk(req)->acked = 0; 282 inet_rsk(req)->acked = 0;
293 req->rcv_wnd = sysctl_dccp_feat_sequence_window;
294 dreq->dreq_timestamp_echo = 0; 283 dreq->dreq_timestamp_echo = 0;
295 284
296 /* inherit feature negotiation options from listening socket */ 285 /* inherit feature negotiation options from listening socket */
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 7b1165c21f5..1b08cae9c65 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -23,10 +23,6 @@
23#include "dccp.h" 23#include "dccp.h"
24#include "feat.h" 24#include "feat.h"
25 25
26int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW;
27int sysctl_dccp_feat_rx_ccid = DCCPF_INITIAL_CCID;
28int sysctl_dccp_feat_tx_ccid = DCCPF_INITIAL_CCID;
29
30u64 dccp_decode_value_var(const u8 *bf, const u8 len) 26u64 dccp_decode_value_var(const u8 *bf, const u8 len)
31{ 27{
32 u64 value = 0; 28 u64 value = 0;
@@ -502,10 +498,6 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
502 *to++ = *val; 498 *to++ = *val;
503 if (len) 499 if (len)
504 memcpy(to, val, len); 500 memcpy(to, val, len);
505
506 dccp_pr_debug("%s(%s (%d), ...), length %d\n",
507 dccp_feat_typename(type),
508 dccp_feat_name(feat), feat, len);
509 return 0; 501 return 0;
510} 502}
511 503
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 945b4d5d23b..314a1b5c033 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -174,8 +174,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
174 struct dccp_sock *dp = dccp_sk(sk); 174 struct dccp_sock *dp = dccp_sk(sk);
175 struct inet_connection_sock *icsk = inet_csk(sk); 175 struct inet_connection_sock *icsk = inet_csk(sk);
176 176
177 dccp_minisock_init(&dp->dccps_minisock);
178
179 icsk->icsk_rto = DCCP_TIMEOUT_INIT; 177 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
180 icsk->icsk_syn_retries = sysctl_dccp_request_retries; 178 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
181 sk->sk_state = DCCP_CLOSED; 179 sk->sk_state = DCCP_CLOSED;
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 018e210875e..a5a1856234e 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -18,55 +18,72 @@
18#error This file should not be compiled without CONFIG_SYSCTL defined 18#error This file should not be compiled without CONFIG_SYSCTL defined
19#endif 19#endif
20 20
21/* Boundary values */
22static int zero = 0,
23 u8_max = 0xFF;
24static unsigned long seqw_min = 32;
25
21static struct ctl_table dccp_default_table[] = { 26static struct ctl_table dccp_default_table[] = {
22 { 27 {
23 .procname = "seq_window", 28 .procname = "seq_window",
24 .data = &sysctl_dccp_feat_sequence_window, 29 .data = &sysctl_dccp_sequence_window,
25 .maxlen = sizeof(sysctl_dccp_feat_sequence_window), 30 .maxlen = sizeof(sysctl_dccp_sequence_window),
26 .mode = 0644, 31 .mode = 0644,
27 .proc_handler = proc_dointvec, 32 .proc_handler = proc_doulongvec_minmax,
33 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
28 }, 34 },
29 { 35 {
30 .procname = "rx_ccid", 36 .procname = "rx_ccid",
31 .data = &sysctl_dccp_feat_rx_ccid, 37 .data = &sysctl_dccp_rx_ccid,
32 .maxlen = sizeof(sysctl_dccp_feat_rx_ccid), 38 .maxlen = sizeof(sysctl_dccp_rx_ccid),
33 .mode = 0644, 39 .mode = 0644,
34 .proc_handler = proc_dointvec, 40 .proc_handler = proc_dointvec_minmax,
41 .extra1 = &zero,
42 .extra2 = &u8_max, /* RFC 4340, 10. */
35 }, 43 },
36 { 44 {
37 .procname = "tx_ccid", 45 .procname = "tx_ccid",
38 .data = &sysctl_dccp_feat_tx_ccid, 46 .data = &sysctl_dccp_tx_ccid,
39 .maxlen = sizeof(sysctl_dccp_feat_tx_ccid), 47 .maxlen = sizeof(sysctl_dccp_tx_ccid),
40 .mode = 0644, 48 .mode = 0644,
41 .proc_handler = proc_dointvec, 49 .proc_handler = proc_dointvec_minmax,
50 .extra1 = &zero,
51 .extra2 = &u8_max, /* RFC 4340, 10. */
42 }, 52 },
43 { 53 {
44 .procname = "request_retries", 54 .procname = "request_retries",
45 .data = &sysctl_dccp_request_retries, 55 .data = &sysctl_dccp_request_retries,
46 .maxlen = sizeof(sysctl_dccp_request_retries), 56 .maxlen = sizeof(sysctl_dccp_request_retries),
47 .mode = 0644, 57 .mode = 0644,
48 .proc_handler = proc_dointvec, 58 .proc_handler = proc_dointvec_minmax,
59 .extra1 = &zero,
60 .extra2 = &u8_max,
49 }, 61 },
50 { 62 {
51 .procname = "retries1", 63 .procname = "retries1",
52 .data = &sysctl_dccp_retries1, 64 .data = &sysctl_dccp_retries1,
53 .maxlen = sizeof(sysctl_dccp_retries1), 65 .maxlen = sizeof(sysctl_dccp_retries1),
54 .mode = 0644, 66 .mode = 0644,
55 .proc_handler = proc_dointvec, 67 .proc_handler = proc_dointvec_minmax,
68 .extra1 = &zero,
69 .extra2 = &u8_max,
56 }, 70 },
57 { 71 {
58 .procname = "retries2", 72 .procname = "retries2",
59 .data = &sysctl_dccp_retries2, 73 .data = &sysctl_dccp_retries2,
60 .maxlen = sizeof(sysctl_dccp_retries2), 74 .maxlen = sizeof(sysctl_dccp_retries2),
61 .mode = 0644, 75 .mode = 0644,
62 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec_minmax,
77 .extra1 = &zero,
78 .extra2 = &u8_max,
63 }, 79 },
64 { 80 {
65 .procname = "tx_qlen", 81 .procname = "tx_qlen",
66 .data = &sysctl_dccp_tx_qlen, 82 .data = &sysctl_dccp_tx_qlen,
67 .maxlen = sizeof(sysctl_dccp_tx_qlen), 83 .maxlen = sizeof(sysctl_dccp_tx_qlen),
68 .mode = 0644, 84 .mode = 0644,
69 .proc_handler = proc_dointvec, 85 .proc_handler = proc_dointvec_minmax,
86 .extra1 = &zero,
70 }, 87 },
71 { 88 {
72 .procname = "sync_ratelimit", 89 .procname = "sync_ratelimit",
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index cf0e1849929..12bf7d4c16c 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2113,7 +2113,7 @@ static struct notifier_block dn_dev_notifier = {
2113extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); 2113extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2114 2114
2115static struct packet_type dn_dix_packet_type = { 2115static struct packet_type dn_dix_packet_type = {
2116 .type = __constant_htons(ETH_P_DNA_RT), 2116 .type = cpu_to_be16(ETH_P_DNA_RT),
2117 .dev = NULL, /* All devices */ 2117 .dev = NULL, /* All devices */
2118 .func = dn_route_rcv, 2118 .func = dn_route_rcv,
2119}; 2119};
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index c754670b7fc..5130dee0b38 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -124,7 +124,7 @@ int decnet_dst_gc_interval = 2;
124 124
125static struct dst_ops dn_dst_ops = { 125static struct dst_ops dn_dst_ops = {
126 .family = PF_DECnet, 126 .family = PF_DECnet,
127 .protocol = __constant_htons(ETH_P_DNA_RT), 127 .protocol = cpu_to_be16(ETH_P_DNA_RT),
128 .gc_thresh = 128, 128 .gc_thresh = 128,
129 .gc = dn_dst_gc, 129 .gc = dn_dst_gc,
130 .check = dn_dst_check, 130 .check = dn_dst_check,
diff --git a/net/dsa/mv88e6123_61_65.c b/net/dsa/mv88e6123_61_65.c
index ec8c6a0482d..10031872221 100644
--- a/net/dsa/mv88e6123_61_65.c
+++ b/net/dsa/mv88e6123_61_65.c
@@ -394,7 +394,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
394} 394}
395 395
396static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { 396static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
397 .tag_protocol = __constant_htons(ETH_P_EDSA), 397 .tag_protocol = cpu_to_be16(ETH_P_EDSA),
398 .priv_size = sizeof(struct mv88e6xxx_priv_state), 398 .priv_size = sizeof(struct mv88e6xxx_priv_state),
399 .probe = mv88e6123_61_65_probe, 399 .probe = mv88e6123_61_65_probe,
400 .setup = mv88e6123_61_65_setup, 400 .setup = mv88e6123_61_65_setup,
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index 374d46a0126..70fae2444cb 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -353,7 +353,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds)
353} 353}
354 354
355static struct dsa_switch_driver mv88e6131_switch_driver = { 355static struct dsa_switch_driver mv88e6131_switch_driver = {
356 .tag_protocol = __constant_htons(ETH_P_DSA), 356 .tag_protocol = cpu_to_be16(ETH_P_DSA),
357 .priv_size = sizeof(struct mv88e6xxx_priv_state), 357 .priv_size = sizeof(struct mv88e6xxx_priv_state),
358 .probe = mv88e6131_probe, 358 .probe = mv88e6131_probe,
359 .setup = mv88e6131_setup, 359 .setup = mv88e6131_setup,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index f99a019b939..63e532a69fd 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -176,7 +176,7 @@ out:
176} 176}
177 177
178static struct packet_type dsa_packet_type = { 178static struct packet_type dsa_packet_type = {
179 .type = __constant_htons(ETH_P_DSA), 179 .type = cpu_to_be16(ETH_P_DSA),
180 .func = dsa_rcv, 180 .func = dsa_rcv,
181}; 181};
182 182
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 328ec957f78..6197f9a7ef4 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -195,7 +195,7 @@ out:
195} 195}
196 196
197static struct packet_type edsa_packet_type = { 197static struct packet_type edsa_packet_type = {
198 .type = __constant_htons(ETH_P_EDSA), 198 .type = cpu_to_be16(ETH_P_EDSA),
199 .func = edsa_rcv, 199 .func = edsa_rcv,
200}; 200};
201 201
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index b59132878ad..d7e7f424ff0 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -112,7 +112,7 @@ out:
112} 112}
113 113
114static struct packet_type trailer_packet_type = { 114static struct packet_type trailer_packet_type = {
115 .type = __constant_htons(ETH_P_TRAILER), 115 .type = cpu_to_be16(ETH_P_TRAILER),
116 .func = trailer_rcv, 116 .func = trailer_rcv,
117}; 117};
118 118
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 8789d2bb1b0..7bf35582f65 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -1103,7 +1103,7 @@ drop:
1103} 1103}
1104 1104
1105static struct packet_type econet_packet_type = { 1105static struct packet_type econet_packet_type = {
1106 .type = __constant_htons(ETH_P_ECONET), 1106 .type = cpu_to_be16(ETH_P_ECONET),
1107 .func = econet_rcv, 1107 .func = econet_rcv,
1108}; 1108};
1109 1109
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 743f5542d65..627be4dc7fb 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -369,7 +369,6 @@ lookup_protocol:
369 sock_init_data(sock, sk); 369 sock_init_data(sock, sk);
370 370
371 sk->sk_destruct = inet_sock_destruct; 371 sk->sk_destruct = inet_sock_destruct;
372 sk->sk_family = PF_INET;
373 sk->sk_protocol = protocol; 372 sk->sk_protocol = protocol;
374 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 373 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
375 374
@@ -1253,10 +1252,10 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1253 int proto; 1252 int proto;
1254 int id; 1253 int id;
1255 1254
1256 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 1255 iph = skb_gro_header(skb, sizeof(*iph));
1256 if (unlikely(!iph))
1257 goto out; 1257 goto out;
1258 1258
1259 iph = ip_hdr(skb);
1260 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1259 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1261 1260
1262 rcu_read_lock(); 1261 rcu_read_lock();
@@ -1264,13 +1263,13 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1264 if (!ops || !ops->gro_receive) 1263 if (!ops || !ops->gro_receive)
1265 goto out_unlock; 1264 goto out_unlock;
1266 1265
1267 if (iph->version != 4 || iph->ihl != 5) 1266 if (*(u8 *)iph != 0x45)
1268 goto out_unlock; 1267 goto out_unlock;
1269 1268
1270 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1269 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1271 goto out_unlock; 1270 goto out_unlock;
1272 1271
1273 flush = ntohs(iph->tot_len) != skb->len || 1272 flush = ntohs(iph->tot_len) != skb_gro_len(skb) ||
1274 iph->frag_off != htons(IP_DF); 1273 iph->frag_off != htons(IP_DF);
1275 id = ntohs(iph->id); 1274 id = ntohs(iph->id);
1276 1275
@@ -1282,24 +1281,25 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1282 1281
1283 iph2 = ip_hdr(p); 1282 iph2 = ip_hdr(p);
1284 1283
1285 if (iph->protocol != iph2->protocol || 1284 if ((iph->protocol ^ iph2->protocol) |
1286 iph->tos != iph2->tos || 1285 (iph->tos ^ iph2->tos) |
1287 memcmp(&iph->saddr, &iph2->saddr, 8)) { 1286 (iph->saddr ^ iph2->saddr) |
1287 (iph->daddr ^ iph2->daddr)) {
1288 NAPI_GRO_CB(p)->same_flow = 0; 1288 NAPI_GRO_CB(p)->same_flow = 0;
1289 continue; 1289 continue;
1290 } 1290 }
1291 1291
1292 /* All fields must match except length and checksum. */ 1292 /* All fields must match except length and checksum. */
1293 NAPI_GRO_CB(p)->flush |= 1293 NAPI_GRO_CB(p)->flush |=
1294 memcmp(&iph->frag_off, &iph2->frag_off, 4) || 1294 (iph->ttl ^ iph2->ttl) |
1295 (u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) != id; 1295 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
1296 1296
1297 NAPI_GRO_CB(p)->flush |= flush; 1297 NAPI_GRO_CB(p)->flush |= flush;
1298 } 1298 }
1299 1299
1300 NAPI_GRO_CB(skb)->flush |= flush; 1300 NAPI_GRO_CB(skb)->flush |= flush;
1301 __skb_pull(skb, sizeof(*iph)); 1301 skb_gro_pull(skb, sizeof(*iph));
1302 skb_reset_transport_header(skb); 1302 skb_set_transport_header(skb, skb_gro_offset(skb));
1303 1303
1304 pp = ops->gro_receive(head, skb); 1304 pp = ops->gro_receive(head, skb);
1305 1305
@@ -1501,7 +1501,7 @@ static int ipv4_proc_init(void);
1501 */ 1501 */
1502 1502
1503static struct packet_type ip_packet_type = { 1503static struct packet_type ip_packet_type = {
1504 .type = __constant_htons(ETH_P_IP), 1504 .type = cpu_to_be16(ETH_P_IP),
1505 .func = ip_rcv, 1505 .func = ip_rcv,
1506 .gso_send_check = inet_gso_send_check, 1506 .gso_send_check = inet_gso_send_check,
1507 .gso_segment = inet_gso_segment, 1507 .gso_segment = inet_gso_segment,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 29a74c01d8d..3f6b7354699 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1226,7 +1226,7 @@ void arp_ifdown(struct net_device *dev)
1226 */ 1226 */
1227 1227
1228static struct packet_type arp_packet_type = { 1228static struct packet_type arp_packet_type = {
1229 .type = __constant_htons(ETH_P_ARP), 1229 .type = cpu_to_be16(ETH_P_ARP),
1230 .func = arp_rcv, 1230 .func = arp_rcv,
1231}; 1231};
1232 1232
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 309997edc8a..d519a6a6672 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1075,6 +1075,14 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1075 } 1075 }
1076 } 1076 }
1077 ip_mc_up(in_dev); 1077 ip_mc_up(in_dev);
1078 /* fall through */
1079 case NETDEV_CHANGEADDR:
1080 if (IN_DEV_ARP_NOTIFY(in_dev))
1081 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1082 in_dev->ifa_list->ifa_address,
1083 dev,
1084 in_dev->ifa_list->ifa_address,
1085 NULL, dev->dev_addr, NULL);
1078 break; 1086 break;
1079 case NETDEV_DOWN: 1087 case NETDEV_DOWN:
1080 ip_mc_down(in_dev); 1088 ip_mc_down(in_dev);
@@ -1439,6 +1447,7 @@ static struct devinet_sysctl_table {
1439 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"), 1447 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
1440 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1448 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1441 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1449 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1450 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1442 1451
1443 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1452 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1444 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1453 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f26ab38680d..22cd19ee44e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -93,24 +93,40 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
93 struct inet_bind_hashbucket *head; 93 struct inet_bind_hashbucket *head;
94 struct hlist_node *node; 94 struct hlist_node *node;
95 struct inet_bind_bucket *tb; 95 struct inet_bind_bucket *tb;
96 int ret; 96 int ret, attempts = 5;
97 struct net *net = sock_net(sk); 97 struct net *net = sock_net(sk);
98 int smallest_size = -1, smallest_rover;
98 99
99 local_bh_disable(); 100 local_bh_disable();
100 if (!snum) { 101 if (!snum) {
101 int remaining, rover, low, high; 102 int remaining, rover, low, high;
102 103
104again:
103 inet_get_local_port_range(&low, &high); 105 inet_get_local_port_range(&low, &high);
104 remaining = (high - low) + 1; 106 remaining = (high - low) + 1;
105 rover = net_random() % remaining + low; 107 smallest_rover = rover = net_random() % remaining + low;
106 108
109 smallest_size = -1;
107 do { 110 do {
108 head = &hashinfo->bhash[inet_bhashfn(net, rover, 111 head = &hashinfo->bhash[inet_bhashfn(net, rover,
109 hashinfo->bhash_size)]; 112 hashinfo->bhash_size)];
110 spin_lock(&head->lock); 113 spin_lock(&head->lock);
111 inet_bind_bucket_for_each(tb, node, &head->chain) 114 inet_bind_bucket_for_each(tb, node, &head->chain)
112 if (ib_net(tb) == net && tb->port == rover) 115 if (ib_net(tb) == net && tb->port == rover) {
116 if (tb->fastreuse > 0 &&
117 sk->sk_reuse &&
118 sk->sk_state != TCP_LISTEN &&
119 (tb->num_owners < smallest_size || smallest_size == -1)) {
120 smallest_size = tb->num_owners;
121 smallest_rover = rover;
122 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
123 spin_unlock(&head->lock);
124 snum = smallest_rover;
125 goto have_snum;
126 }
127 }
113 goto next; 128 goto next;
129 }
114 break; 130 break;
115 next: 131 next:
116 spin_unlock(&head->lock); 132 spin_unlock(&head->lock);
@@ -125,14 +141,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
125 * the top level, not from the 'break;' statement. 141 * the top level, not from the 'break;' statement.
126 */ 142 */
127 ret = 1; 143 ret = 1;
128 if (remaining <= 0) 144 if (remaining <= 0) {
145 if (smallest_size != -1) {
146 snum = smallest_rover;
147 goto have_snum;
148 }
129 goto fail; 149 goto fail;
130 150 }
131 /* OK, here is the one we will use. HEAD is 151 /* OK, here is the one we will use. HEAD is
132 * non-NULL and we hold it's mutex. 152 * non-NULL and we hold it's mutex.
133 */ 153 */
134 snum = rover; 154 snum = rover;
135 } else { 155 } else {
156have_snum:
136 head = &hashinfo->bhash[inet_bhashfn(net, snum, 157 head = &hashinfo->bhash[inet_bhashfn(net, snum,
137 hashinfo->bhash_size)]; 158 hashinfo->bhash_size)];
138 spin_lock(&head->lock); 159 spin_lock(&head->lock);
@@ -145,12 +166,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
145tb_found: 166tb_found:
146 if (!hlist_empty(&tb->owners)) { 167 if (!hlist_empty(&tb->owners)) {
147 if (tb->fastreuse > 0 && 168 if (tb->fastreuse > 0 &&
148 sk->sk_reuse && sk->sk_state != TCP_LISTEN) { 169 sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
170 smallest_size == -1) {
149 goto success; 171 goto success;
150 } else { 172 } else {
151 ret = 1; 173 ret = 1;
152 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) 174 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
175 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
176 smallest_size != -1 && --attempts >= 0) {
177 spin_unlock(&head->lock);
178 goto again;
179 }
153 goto fail_unlock; 180 goto fail_unlock;
181 }
154 } 182 }
155 } 183 }
156tb_not_found: 184tb_not_found:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 6a1045da48d..625cc5f64c9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -38,6 +38,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
38 write_pnet(&tb->ib_net, hold_net(net)); 38 write_pnet(&tb->ib_net, hold_net(net));
39 tb->port = snum; 39 tb->port = snum;
40 tb->fastreuse = 0; 40 tb->fastreuse = 0;
41 tb->num_owners = 0;
41 INIT_HLIST_HEAD(&tb->owners); 42 INIT_HLIST_HEAD(&tb->owners);
42 hlist_add_head(&tb->node, &head->chain); 43 hlist_add_head(&tb->node, &head->chain);
43 } 44 }
@@ -59,8 +60,13 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
59void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 60void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
60 const unsigned short snum) 61 const unsigned short snum)
61{ 62{
63 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
64
65 atomic_inc(&hashinfo->bsockets);
66
62 inet_sk(sk)->num = snum; 67 inet_sk(sk)->num = snum;
63 sk_add_bind_node(sk, &tb->owners); 68 sk_add_bind_node(sk, &tb->owners);
69 tb->num_owners++;
64 inet_csk(sk)->icsk_bind_hash = tb; 70 inet_csk(sk)->icsk_bind_hash = tb;
65} 71}
66 72
@@ -75,9 +81,12 @@ static void __inet_put_port(struct sock *sk)
75 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 81 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
76 struct inet_bind_bucket *tb; 82 struct inet_bind_bucket *tb;
77 83
84 atomic_dec(&hashinfo->bsockets);
85
78 spin_lock(&head->lock); 86 spin_lock(&head->lock);
79 tb = inet_csk(sk)->icsk_bind_hash; 87 tb = inet_csk(sk)->icsk_bind_hash;
80 __sk_del_bind_node(sk); 88 __sk_del_bind_node(sk);
89 tb->num_owners--;
81 inet_csk(sk)->icsk_bind_hash = NULL; 90 inet_csk(sk)->icsk_bind_hash = NULL;
82 inet_sk(sk)->num = 0; 91 inet_sk(sk)->num = 0;
83 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 92 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
@@ -444,9 +453,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
444 */ 453 */
445 inet_bind_bucket_for_each(tb, node, &head->chain) { 454 inet_bind_bucket_for_each(tb, node, &head->chain) {
446 if (ib_net(tb) == net && tb->port == port) { 455 if (ib_net(tb) == net && tb->port == port) {
447 WARN_ON(hlist_empty(&tb->owners));
448 if (tb->fastreuse >= 0) 456 if (tb->fastreuse >= 0)
449 goto next_port; 457 goto next_port;
458 WARN_ON(hlist_empty(&tb->owners));
450 if (!check_established(death_row, sk, 459 if (!check_established(death_row, sk,
451 port, &tw)) 460 port, &tw))
452 goto ok; 461 goto ok;
@@ -523,6 +532,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
523{ 532{
524 int i; 533 int i;
525 534
535 atomic_set(&h->bsockets, 0);
526 for (i = 0; i < INET_LHTABLE_SIZE; i++) { 536 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
527 spin_lock_init(&h->listening_hash[i].lock); 537 spin_lock_init(&h->listening_hash[i].lock);
528 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, 538 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0101521f366..07a188afb3a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -164,67 +164,124 @@ static DEFINE_RWLOCK(ipgre_lock);
164 164
165/* Given src, dst and key, find appropriate for input tunnel. */ 165/* Given src, dst and key, find appropriate for input tunnel. */
166 166
167static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net, 167static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
168 __be32 remote, __be32 local, 168 __be32 remote, __be32 local,
169 __be32 key, __be16 gre_proto) 169 __be32 key, __be16 gre_proto)
170{ 170{
171 struct net *net = dev_net(dev);
172 int link = dev->ifindex;
171 unsigned h0 = HASH(remote); 173 unsigned h0 = HASH(remote);
172 unsigned h1 = HASH(key); 174 unsigned h1 = HASH(key);
173 struct ip_tunnel *t; 175 struct ip_tunnel *t, *cand = NULL;
174 struct ip_tunnel *t2 = NULL;
175 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 176 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
176 int dev_type = (gre_proto == htons(ETH_P_TEB)) ? 177 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
177 ARPHRD_ETHER : ARPHRD_IPGRE; 178 ARPHRD_ETHER : ARPHRD_IPGRE;
179 int score, cand_score = 4;
178 180
179 for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { 181 for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
180 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) { 182 if (local != t->parms.iph.saddr ||
181 if (t->parms.i_key == key && t->dev->flags & IFF_UP) { 183 remote != t->parms.iph.daddr ||
182 if (t->dev->type == dev_type) 184 key != t->parms.i_key ||
183 return t; 185 !(t->dev->flags & IFF_UP))
184 if (t->dev->type == ARPHRD_IPGRE && !t2) 186 continue;
185 t2 = t; 187
186 } 188 if (t->dev->type != ARPHRD_IPGRE &&
189 t->dev->type != dev_type)
190 continue;
191
192 score = 0;
193 if (t->parms.link != link)
194 score |= 1;
195 if (t->dev->type != dev_type)
196 score |= 2;
197 if (score == 0)
198 return t;
199
200 if (score < cand_score) {
201 cand = t;
202 cand_score = score;
187 } 203 }
188 } 204 }
189 205
190 for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { 206 for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
191 if (remote == t->parms.iph.daddr) { 207 if (remote != t->parms.iph.daddr ||
192 if (t->parms.i_key == key && t->dev->flags & IFF_UP) { 208 key != t->parms.i_key ||
193 if (t->dev->type == dev_type) 209 !(t->dev->flags & IFF_UP))
194 return t; 210 continue;
195 if (t->dev->type == ARPHRD_IPGRE && !t2) 211
196 t2 = t; 212 if (t->dev->type != ARPHRD_IPGRE &&
197 } 213 t->dev->type != dev_type)
214 continue;
215
216 score = 0;
217 if (t->parms.link != link)
218 score |= 1;
219 if (t->dev->type != dev_type)
220 score |= 2;
221 if (score == 0)
222 return t;
223
224 if (score < cand_score) {
225 cand = t;
226 cand_score = score;
198 } 227 }
199 } 228 }
200 229
201 for (t = ign->tunnels_l[h1]; t; t = t->next) { 230 for (t = ign->tunnels_l[h1]; t; t = t->next) {
202 if (local == t->parms.iph.saddr || 231 if ((local != t->parms.iph.saddr &&
203 (local == t->parms.iph.daddr && 232 (local != t->parms.iph.daddr ||
204 ipv4_is_multicast(local))) { 233 !ipv4_is_multicast(local))) ||
205 if (t->parms.i_key == key && t->dev->flags & IFF_UP) { 234 key != t->parms.i_key ||
206 if (t->dev->type == dev_type) 235 !(t->dev->flags & IFF_UP))
207 return t; 236 continue;
208 if (t->dev->type == ARPHRD_IPGRE && !t2) 237
209 t2 = t; 238 if (t->dev->type != ARPHRD_IPGRE &&
210 } 239 t->dev->type != dev_type)
240 continue;
241
242 score = 0;
243 if (t->parms.link != link)
244 score |= 1;
245 if (t->dev->type != dev_type)
246 score |= 2;
247 if (score == 0)
248 return t;
249
250 if (score < cand_score) {
251 cand = t;
252 cand_score = score;
211 } 253 }
212 } 254 }
213 255
214 for (t = ign->tunnels_wc[h1]; t; t = t->next) { 256 for (t = ign->tunnels_wc[h1]; t; t = t->next) {
215 if (t->parms.i_key == key && t->dev->flags & IFF_UP) { 257 if (t->parms.i_key != key ||
216 if (t->dev->type == dev_type) 258 !(t->dev->flags & IFF_UP))
217 return t; 259 continue;
218 if (t->dev->type == ARPHRD_IPGRE && !t2) 260
219 t2 = t; 261 if (t->dev->type != ARPHRD_IPGRE &&
262 t->dev->type != dev_type)
263 continue;
264
265 score = 0;
266 if (t->parms.link != link)
267 score |= 1;
268 if (t->dev->type != dev_type)
269 score |= 2;
270 if (score == 0)
271 return t;
272
273 if (score < cand_score) {
274 cand = t;
275 cand_score = score;
220 } 276 }
221 } 277 }
222 278
223 if (t2) 279 if (cand != NULL)
224 return t2; 280 return cand;
225 281
226 if (ign->fb_tunnel_dev->flags&IFF_UP) 282 if (ign->fb_tunnel_dev->flags & IFF_UP)
227 return netdev_priv(ign->fb_tunnel_dev); 283 return netdev_priv(ign->fb_tunnel_dev);
284
228 return NULL; 285 return NULL;
229} 286}
230 287
@@ -284,6 +341,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
284 __be32 remote = parms->iph.daddr; 341 __be32 remote = parms->iph.daddr;
285 __be32 local = parms->iph.saddr; 342 __be32 local = parms->iph.saddr;
286 __be32 key = parms->i_key; 343 __be32 key = parms->i_key;
344 int link = parms->link;
287 struct ip_tunnel *t, **tp; 345 struct ip_tunnel *t, **tp;
288 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 346 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
289 347
@@ -291,6 +349,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
291 if (local == t->parms.iph.saddr && 349 if (local == t->parms.iph.saddr &&
292 remote == t->parms.iph.daddr && 350 remote == t->parms.iph.daddr &&
293 key == t->parms.i_key && 351 key == t->parms.i_key &&
352 link == t->parms.link &&
294 type == t->dev->type) 353 type == t->dev->type)
295 break; 354 break;
296 355
@@ -421,7 +480,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
421 } 480 }
422 481
423 read_lock(&ipgre_lock); 482 read_lock(&ipgre_lock);
424 t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr, 483 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
425 flags & GRE_KEY ? 484 flags & GRE_KEY ?
426 *(((__be32 *)p) + (grehlen / 4) - 1) : 0, 485 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
427 p[1]); 486 p[1]);
@@ -518,7 +577,7 @@ static int ipgre_rcv(struct sk_buff *skb)
518 gre_proto = *(__be16 *)(h + 2); 577 gre_proto = *(__be16 *)(h + 2);
519 578
520 read_lock(&ipgre_lock); 579 read_lock(&ipgre_lock);
521 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), 580 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
522 iph->saddr, iph->daddr, key, 581 iph->saddr, iph->daddr, key,
523 gre_proto))) { 582 gre_proto))) {
524 struct net_device_stats *stats = &tunnel->dev->stats; 583 struct net_device_stats *stats = &tunnel->dev->stats;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index d722013c1ca..90d22ae0a41 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -100,8 +100,8 @@
100#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers 100#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers
101 - '3' from resolv.h */ 101 - '3' from resolv.h */
102 102
103#define NONE __constant_htonl(INADDR_NONE) 103#define NONE cpu_to_be32(INADDR_NONE)
104#define ANY __constant_htonl(INADDR_ANY) 104#define ANY cpu_to_be32(INADDR_ANY)
105 105
106/* 106/*
107 * Public IP configuration 107 * Public IP configuration
@@ -406,7 +406,7 @@ static int __init ic_defaults(void)
406static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); 406static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
407 407
408static struct packet_type rarp_packet_type __initdata = { 408static struct packet_type rarp_packet_type __initdata = {
409 .type = __constant_htons(ETH_P_RARP), 409 .type = cpu_to_be16(ETH_P_RARP),
410 .func = ic_rarp_recv, 410 .func = ic_rarp_recv,
411}; 411};
412 412
@@ -568,7 +568,7 @@ struct bootp_pkt { /* BOOTP packet format */
568static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); 568static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
569 569
570static struct packet_type bootp_packet_type __initdata = { 570static struct packet_type bootp_packet_type __initdata = {
571 .type = __constant_htons(ETH_P_IP), 571 .type = cpu_to_be16(ETH_P_IP),
572 .func = ic_bootp_recv, 572 .func = ic_bootp_recv,
573}; 573};
574 574
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 14666449dc1..13e9dd3012b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -67,9 +67,6 @@
67#define CONFIG_IP_PIMSM 1 67#define CONFIG_IP_PIMSM 1
68#endif 68#endif
69 69
70static struct sock *mroute_socket;
71
72
73/* Big lock, protecting vif table, mrt cache and mroute socket state. 70/* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock. 71 Note that the changes are semaphored via rtnl_lock.
75 */ 72 */
@@ -80,18 +77,9 @@ static DEFINE_RWLOCK(mrt_lock);
80 * Multicast router control variables 77 * Multicast router control variables
81 */ 78 */
82 79
83static struct vif_device vif_table[MAXVIFS]; /* Devices */ 80#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
84static int maxvif;
85
86#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87
88static int mroute_do_assert; /* Set in PIM assert */
89static int mroute_do_pim;
90
91static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92 81
93static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ 82static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95 83
96/* Special spinlock for queue of unresolved entries */ 84/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock); 85static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -107,7 +95,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
107static struct kmem_cache *mrt_cachep __read_mostly; 95static struct kmem_cache *mrt_cachep __read_mostly;
108 96
109static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 97static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); 98static int ipmr_cache_report(struct net *net,
99 struct sk_buff *pkt, vifi_t vifi, int assert);
111static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); 100static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112 101
113#ifdef CONFIG_IP_PIMSM_V2 102#ifdef CONFIG_IP_PIMSM_V2
@@ -120,9 +109,11 @@ static struct timer_list ipmr_expire_timer;
120 109
121static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 110static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
122{ 111{
112 struct net *net = dev_net(dev);
113
123 dev_close(dev); 114 dev_close(dev);
124 115
125 dev = __dev_get_by_name(&init_net, "tunl0"); 116 dev = __dev_get_by_name(net, "tunl0");
126 if (dev) { 117 if (dev) {
127 const struct net_device_ops *ops = dev->netdev_ops; 118 const struct net_device_ops *ops = dev->netdev_ops;
128 struct ifreq ifr; 119 struct ifreq ifr;
@@ -148,11 +139,11 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
148} 139}
149 140
150static 141static
151struct net_device *ipmr_new_tunnel(struct vifctl *v) 142struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
152{ 143{
153 struct net_device *dev; 144 struct net_device *dev;
154 145
155 dev = __dev_get_by_name(&init_net, "tunl0"); 146 dev = __dev_get_by_name(net, "tunl0");
156 147
157 if (dev) { 148 if (dev) {
158 const struct net_device_ops *ops = dev->netdev_ops; 149 const struct net_device_ops *ops = dev->netdev_ops;
@@ -181,7 +172,8 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
181 172
182 dev = NULL; 173 dev = NULL;
183 174
184 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) { 175 if (err == 0 &&
176 (dev = __dev_get_by_name(net, p.name)) != NULL) {
185 dev->flags |= IFF_MULTICAST; 177 dev->flags |= IFF_MULTICAST;
186 178
187 in_dev = __in_dev_get_rtnl(dev); 179 in_dev = __in_dev_get_rtnl(dev);
@@ -209,14 +201,15 @@ failure:
209 201
210#ifdef CONFIG_IP_PIMSM 202#ifdef CONFIG_IP_PIMSM
211 203
212static int reg_vif_num = -1;
213
214static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 204static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
215{ 205{
206 struct net *net = dev_net(dev);
207
216 read_lock(&mrt_lock); 208 read_lock(&mrt_lock);
217 dev->stats.tx_bytes += skb->len; 209 dev->stats.tx_bytes += skb->len;
218 dev->stats.tx_packets++; 210 dev->stats.tx_packets++;
219 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 211 ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
212 IGMPMSG_WHOLEPKT);
220 read_unlock(&mrt_lock); 213 read_unlock(&mrt_lock);
221 kfree_skb(skb); 214 kfree_skb(skb);
222 return 0; 215 return 0;
@@ -283,16 +276,16 @@ failure:
283 * @notify: Set to 1, if the caller is a notifier_call 276 * @notify: Set to 1, if the caller is a notifier_call
284 */ 277 */
285 278
286static int vif_delete(int vifi, int notify) 279static int vif_delete(struct net *net, int vifi, int notify)
287{ 280{
288 struct vif_device *v; 281 struct vif_device *v;
289 struct net_device *dev; 282 struct net_device *dev;
290 struct in_device *in_dev; 283 struct in_device *in_dev;
291 284
292 if (vifi < 0 || vifi >= maxvif) 285 if (vifi < 0 || vifi >= net->ipv4.maxvif)
293 return -EADDRNOTAVAIL; 286 return -EADDRNOTAVAIL;
294 287
295 v = &vif_table[vifi]; 288 v = &net->ipv4.vif_table[vifi];
296 289
297 write_lock_bh(&mrt_lock); 290 write_lock_bh(&mrt_lock);
298 dev = v->dev; 291 dev = v->dev;
@@ -304,17 +297,17 @@ static int vif_delete(int vifi, int notify)
304 } 297 }
305 298
306#ifdef CONFIG_IP_PIMSM 299#ifdef CONFIG_IP_PIMSM
307 if (vifi == reg_vif_num) 300 if (vifi == net->ipv4.mroute_reg_vif_num)
308 reg_vif_num = -1; 301 net->ipv4.mroute_reg_vif_num = -1;
309#endif 302#endif
310 303
311 if (vifi+1 == maxvif) { 304 if (vifi+1 == net->ipv4.maxvif) {
312 int tmp; 305 int tmp;
313 for (tmp=vifi-1; tmp>=0; tmp--) { 306 for (tmp=vifi-1; tmp>=0; tmp--) {
314 if (VIF_EXISTS(tmp)) 307 if (VIF_EXISTS(net, tmp))
315 break; 308 break;
316 } 309 }
317 maxvif = tmp+1; 310 net->ipv4.maxvif = tmp+1;
318 } 311 }
319 312
320 write_unlock_bh(&mrt_lock); 313 write_unlock_bh(&mrt_lock);
@@ -333,6 +326,12 @@ static int vif_delete(int vifi, int notify)
333 return 0; 326 return 0;
334} 327}
335 328
329static inline void ipmr_cache_free(struct mfc_cache *c)
330{
331 release_net(mfc_net(c));
332 kmem_cache_free(mrt_cachep, c);
333}
334
336/* Destroy an unresolved cache entry, killing queued skbs 335/* Destroy an unresolved cache entry, killing queued skbs
337 and reporting error to netlink readers. 336 and reporting error to netlink readers.
338 */ 337 */
@@ -341,8 +340,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
341{ 340{
342 struct sk_buff *skb; 341 struct sk_buff *skb;
343 struct nlmsgerr *e; 342 struct nlmsgerr *e;
343 struct net *net = mfc_net(c);
344 344
345 atomic_dec(&cache_resolve_queue_len); 345 atomic_dec(&net->ipv4.cache_resolve_queue_len);
346 346
347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
348 if (ip_hdr(skb)->version == 0) { 348 if (ip_hdr(skb)->version == 0) {
@@ -354,12 +354,12 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
354 e->error = -ETIMEDOUT; 354 e->error = -ETIMEDOUT;
355 memset(&e->msg, 0, sizeof(e->msg)); 355 memset(&e->msg, 0, sizeof(e->msg));
356 356
357 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); 357 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
358 } else 358 } else
359 kfree_skb(skb); 359 kfree_skb(skb);
360 } 360 }
361 361
362 kmem_cache_free(mrt_cachep, c); 362 ipmr_cache_free(c);
363} 363}
364 364
365 365
@@ -376,7 +376,7 @@ static void ipmr_expire_process(unsigned long dummy)
376 return; 376 return;
377 } 377 }
378 378
379 if (atomic_read(&cache_resolve_queue_len) == 0) 379 if (mfc_unres_queue == NULL)
380 goto out; 380 goto out;
381 381
382 now = jiffies; 382 now = jiffies;
@@ -397,7 +397,7 @@ static void ipmr_expire_process(unsigned long dummy)
397 ipmr_destroy_unres(c); 397 ipmr_destroy_unres(c);
398 } 398 }
399 399
400 if (atomic_read(&cache_resolve_queue_len)) 400 if (mfc_unres_queue != NULL)
401 mod_timer(&ipmr_expire_timer, jiffies + expires); 401 mod_timer(&ipmr_expire_timer, jiffies + expires);
402 402
403out: 403out:
@@ -409,13 +409,15 @@ out:
409static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) 409static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
410{ 410{
411 int vifi; 411 int vifi;
412 struct net *net = mfc_net(cache);
412 413
413 cache->mfc_un.res.minvif = MAXVIFS; 414 cache->mfc_un.res.minvif = MAXVIFS;
414 cache->mfc_un.res.maxvif = 0; 415 cache->mfc_un.res.maxvif = 0;
415 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 416 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
416 417
417 for (vifi=0; vifi<maxvif; vifi++) { 418 for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
418 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) { 419 if (VIF_EXISTS(net, vifi) &&
420 ttls[vifi] && ttls[vifi] < 255) {
419 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 421 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
420 if (cache->mfc_un.res.minvif > vifi) 422 if (cache->mfc_un.res.minvif > vifi)
421 cache->mfc_un.res.minvif = vifi; 423 cache->mfc_un.res.minvif = vifi;
@@ -425,16 +427,16 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
425 } 427 }
426} 428}
427 429
428static int vif_add(struct vifctl *vifc, int mrtsock) 430static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
429{ 431{
430 int vifi = vifc->vifc_vifi; 432 int vifi = vifc->vifc_vifi;
431 struct vif_device *v = &vif_table[vifi]; 433 struct vif_device *v = &net->ipv4.vif_table[vifi];
432 struct net_device *dev; 434 struct net_device *dev;
433 struct in_device *in_dev; 435 struct in_device *in_dev;
434 int err; 436 int err;
435 437
436 /* Is vif busy ? */ 438 /* Is vif busy ? */
437 if (VIF_EXISTS(vifi)) 439 if (VIF_EXISTS(net, vifi))
438 return -EADDRINUSE; 440 return -EADDRINUSE;
439 441
440 switch (vifc->vifc_flags) { 442 switch (vifc->vifc_flags) {
@@ -444,7 +446,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
444 * Special Purpose VIF in PIM 446 * Special Purpose VIF in PIM
445 * All the packets will be sent to the daemon 447 * All the packets will be sent to the daemon
446 */ 448 */
447 if (reg_vif_num >= 0) 449 if (net->ipv4.mroute_reg_vif_num >= 0)
448 return -EADDRINUSE; 450 return -EADDRINUSE;
449 dev = ipmr_reg_vif(); 451 dev = ipmr_reg_vif();
450 if (!dev) 452 if (!dev)
@@ -458,7 +460,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
458 break; 460 break;
459#endif 461#endif
460 case VIFF_TUNNEL: 462 case VIFF_TUNNEL:
461 dev = ipmr_new_tunnel(vifc); 463 dev = ipmr_new_tunnel(net, vifc);
462 if (!dev) 464 if (!dev)
463 return -ENOBUFS; 465 return -ENOBUFS;
464 err = dev_set_allmulti(dev, 1); 466 err = dev_set_allmulti(dev, 1);
@@ -469,7 +471,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
469 } 471 }
470 break; 472 break;
471 case 0: 473 case 0:
472 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); 474 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
473 if (!dev) 475 if (!dev)
474 return -EADDRNOTAVAIL; 476 return -EADDRNOTAVAIL;
475 err = dev_set_allmulti(dev, 1); 477 err = dev_set_allmulti(dev, 1);
@@ -510,20 +512,22 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
510 v->dev = dev; 512 v->dev = dev;
511#ifdef CONFIG_IP_PIMSM 513#ifdef CONFIG_IP_PIMSM
512 if (v->flags&VIFF_REGISTER) 514 if (v->flags&VIFF_REGISTER)
513 reg_vif_num = vifi; 515 net->ipv4.mroute_reg_vif_num = vifi;
514#endif 516#endif
515 if (vifi+1 > maxvif) 517 if (vifi+1 > net->ipv4.maxvif)
516 maxvif = vifi+1; 518 net->ipv4.maxvif = vifi+1;
517 write_unlock_bh(&mrt_lock); 519 write_unlock_bh(&mrt_lock);
518 return 0; 520 return 0;
519} 521}
520 522
521static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) 523static struct mfc_cache *ipmr_cache_find(struct net *net,
524 __be32 origin,
525 __be32 mcastgrp)
522{ 526{
523 int line = MFC_HASH(mcastgrp, origin); 527 int line = MFC_HASH(mcastgrp, origin);
524 struct mfc_cache *c; 528 struct mfc_cache *c;
525 529
526 for (c=mfc_cache_array[line]; c; c = c->next) { 530 for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
527 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) 531 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
528 break; 532 break;
529 } 533 }
@@ -533,22 +537,24 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
533/* 537/*
534 * Allocate a multicast cache entry 538 * Allocate a multicast cache entry
535 */ 539 */
536static struct mfc_cache *ipmr_cache_alloc(void) 540static struct mfc_cache *ipmr_cache_alloc(struct net *net)
537{ 541{
538 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 542 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
539 if (c == NULL) 543 if (c == NULL)
540 return NULL; 544 return NULL;
541 c->mfc_un.res.minvif = MAXVIFS; 545 c->mfc_un.res.minvif = MAXVIFS;
546 mfc_net_set(c, net);
542 return c; 547 return c;
543} 548}
544 549
545static struct mfc_cache *ipmr_cache_alloc_unres(void) 550static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
546{ 551{
547 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 552 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
548 if (c == NULL) 553 if (c == NULL)
549 return NULL; 554 return NULL;
550 skb_queue_head_init(&c->mfc_un.unres.unresolved); 555 skb_queue_head_init(&c->mfc_un.unres.unresolved);
551 c->mfc_un.unres.expires = jiffies + 10*HZ; 556 c->mfc_un.unres.expires = jiffies + 10*HZ;
557 mfc_net_set(c, net);
552 return c; 558 return c;
553} 559}
554 560
@@ -581,7 +587,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
581 memset(&e->msg, 0, sizeof(e->msg)); 587 memset(&e->msg, 0, sizeof(e->msg));
582 } 588 }
583 589
584 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); 590 rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
585 } else 591 } else
586 ip_mr_forward(skb, c, 0); 592 ip_mr_forward(skb, c, 0);
587 } 593 }
@@ -594,7 +600,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
594 * Called under mrt_lock. 600 * Called under mrt_lock.
595 */ 601 */
596 602
597static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) 603static int ipmr_cache_report(struct net *net,
604 struct sk_buff *pkt, vifi_t vifi, int assert)
598{ 605{
599 struct sk_buff *skb; 606 struct sk_buff *skb;
600 const int ihl = ip_hdrlen(pkt); 607 const int ihl = ip_hdrlen(pkt);
@@ -626,7 +633,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
626 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 633 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
627 msg->im_msgtype = IGMPMSG_WHOLEPKT; 634 msg->im_msgtype = IGMPMSG_WHOLEPKT;
628 msg->im_mbz = 0; 635 msg->im_mbz = 0;
629 msg->im_vif = reg_vif_num; 636 msg->im_vif = net->ipv4.mroute_reg_vif_num;
630 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 637 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
631 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 638 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
632 sizeof(struct iphdr)); 639 sizeof(struct iphdr));
@@ -658,7 +665,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
658 skb->transport_header = skb->network_header; 665 skb->transport_header = skb->network_header;
659 } 666 }
660 667
661 if (mroute_socket == NULL) { 668 if (net->ipv4.mroute_sk == NULL) {
662 kfree_skb(skb); 669 kfree_skb(skb);
663 return -EINVAL; 670 return -EINVAL;
664 } 671 }
@@ -666,7 +673,8 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
666 /* 673 /*
667 * Deliver to mrouted 674 * Deliver to mrouted
668 */ 675 */
669 if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) { 676 ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
677 if (ret < 0) {
670 if (net_ratelimit()) 678 if (net_ratelimit())
671 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 679 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
672 kfree_skb(skb); 680 kfree_skb(skb);
@@ -680,7 +688,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
680 */ 688 */
681 689
682static int 690static int
683ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) 691ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
684{ 692{
685 int err; 693 int err;
686 struct mfc_cache *c; 694 struct mfc_cache *c;
@@ -688,7 +696,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
688 696
689 spin_lock_bh(&mfc_unres_lock); 697 spin_lock_bh(&mfc_unres_lock);
690 for (c=mfc_unres_queue; c; c=c->next) { 698 for (c=mfc_unres_queue; c; c=c->next) {
691 if (c->mfc_mcastgrp == iph->daddr && 699 if (net_eq(mfc_net(c), net) &&
700 c->mfc_mcastgrp == iph->daddr &&
692 c->mfc_origin == iph->saddr) 701 c->mfc_origin == iph->saddr)
693 break; 702 break;
694 } 703 }
@@ -698,8 +707,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
698 * Create a new entry if allowable 707 * Create a new entry if allowable
699 */ 708 */
700 709
701 if (atomic_read(&cache_resolve_queue_len) >= 10 || 710 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
702 (c=ipmr_cache_alloc_unres())==NULL) { 711 (c = ipmr_cache_alloc_unres(net)) == NULL) {
703 spin_unlock_bh(&mfc_unres_lock); 712 spin_unlock_bh(&mfc_unres_lock);
704 713
705 kfree_skb(skb); 714 kfree_skb(skb);
@@ -716,18 +725,19 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
716 /* 725 /*
717 * Reflect first query at mrouted. 726 * Reflect first query at mrouted.
718 */ 727 */
719 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { 728 err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
729 if (err < 0) {
720 /* If the report failed throw the cache entry 730 /* If the report failed throw the cache entry
721 out - Brad Parker 731 out - Brad Parker
722 */ 732 */
723 spin_unlock_bh(&mfc_unres_lock); 733 spin_unlock_bh(&mfc_unres_lock);
724 734
725 kmem_cache_free(mrt_cachep, c); 735 ipmr_cache_free(c);
726 kfree_skb(skb); 736 kfree_skb(skb);
727 return err; 737 return err;
728 } 738 }
729 739
730 atomic_inc(&cache_resolve_queue_len); 740 atomic_inc(&net->ipv4.cache_resolve_queue_len);
731 c->next = mfc_unres_queue; 741 c->next = mfc_unres_queue;
732 mfc_unres_queue = c; 742 mfc_unres_queue = c;
733 743
@@ -753,35 +763,37 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
753 * MFC cache manipulation by user space mroute daemon 763 * MFC cache manipulation by user space mroute daemon
754 */ 764 */
755 765
756static int ipmr_mfc_delete(struct mfcctl *mfc) 766static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
757{ 767{
758 int line; 768 int line;
759 struct mfc_cache *c, **cp; 769 struct mfc_cache *c, **cp;
760 770
761 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 771 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
762 772
763 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { 773 for (cp = &net->ipv4.mfc_cache_array[line];
774 (c = *cp) != NULL; cp = &c->next) {
764 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 775 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
765 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 776 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
766 write_lock_bh(&mrt_lock); 777 write_lock_bh(&mrt_lock);
767 *cp = c->next; 778 *cp = c->next;
768 write_unlock_bh(&mrt_lock); 779 write_unlock_bh(&mrt_lock);
769 780
770 kmem_cache_free(mrt_cachep, c); 781 ipmr_cache_free(c);
771 return 0; 782 return 0;
772 } 783 }
773 } 784 }
774 return -ENOENT; 785 return -ENOENT;
775} 786}
776 787
777static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) 788static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
778{ 789{
779 int line; 790 int line;
780 struct mfc_cache *uc, *c, **cp; 791 struct mfc_cache *uc, *c, **cp;
781 792
782 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 793 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
783 794
784 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { 795 for (cp = &net->ipv4.mfc_cache_array[line];
796 (c = *cp) != NULL; cp = &c->next) {
785 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 797 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
786 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) 798 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
787 break; 799 break;
@@ -800,7 +812,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
800 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 812 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
801 return -EINVAL; 813 return -EINVAL;
802 814
803 c = ipmr_cache_alloc(); 815 c = ipmr_cache_alloc(net);
804 if (c == NULL) 816 if (c == NULL)
805 return -ENOMEM; 817 return -ENOMEM;
806 818
@@ -812,8 +824,8 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
812 c->mfc_flags |= MFC_STATIC; 824 c->mfc_flags |= MFC_STATIC;
813 825
814 write_lock_bh(&mrt_lock); 826 write_lock_bh(&mrt_lock);
815 c->next = mfc_cache_array[line]; 827 c->next = net->ipv4.mfc_cache_array[line];
816 mfc_cache_array[line] = c; 828 net->ipv4.mfc_cache_array[line] = c;
817 write_unlock_bh(&mrt_lock); 829 write_unlock_bh(&mrt_lock);
818 830
819 /* 831 /*
@@ -823,19 +835,21 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
823 spin_lock_bh(&mfc_unres_lock); 835 spin_lock_bh(&mfc_unres_lock);
824 for (cp = &mfc_unres_queue; (uc=*cp) != NULL; 836 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
825 cp = &uc->next) { 837 cp = &uc->next) {
826 if (uc->mfc_origin == c->mfc_origin && 838 if (net_eq(mfc_net(uc), net) &&
839 uc->mfc_origin == c->mfc_origin &&
827 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 840 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
828 *cp = uc->next; 841 *cp = uc->next;
829 if (atomic_dec_and_test(&cache_resolve_queue_len)) 842 atomic_dec(&net->ipv4.cache_resolve_queue_len);
830 del_timer(&ipmr_expire_timer);
831 break; 843 break;
832 } 844 }
833 } 845 }
846 if (mfc_unres_queue == NULL)
847 del_timer(&ipmr_expire_timer);
834 spin_unlock_bh(&mfc_unres_lock); 848 spin_unlock_bh(&mfc_unres_lock);
835 849
836 if (uc) { 850 if (uc) {
837 ipmr_cache_resolve(uc, c); 851 ipmr_cache_resolve(uc, c);
838 kmem_cache_free(mrt_cachep, uc); 852 ipmr_cache_free(uc);
839 } 853 }
840 return 0; 854 return 0;
841} 855}
@@ -844,16 +858,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
844 * Close the multicast socket, and clear the vif tables etc 858 * Close the multicast socket, and clear the vif tables etc
845 */ 859 */
846 860
847static void mroute_clean_tables(struct sock *sk) 861static void mroute_clean_tables(struct net *net)
848{ 862{
849 int i; 863 int i;
850 864
851 /* 865 /*
852 * Shut down all active vif entries 866 * Shut down all active vif entries
853 */ 867 */
854 for (i=0; i<maxvif; i++) { 868 for (i = 0; i < net->ipv4.maxvif; i++) {
855 if (!(vif_table[i].flags&VIFF_STATIC)) 869 if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
856 vif_delete(i, 0); 870 vif_delete(net, i, 0);
857 } 871 }
858 872
859 /* 873 /*
@@ -862,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk)
862 for (i=0; i<MFC_LINES; i++) { 876 for (i=0; i<MFC_LINES; i++) {
863 struct mfc_cache *c, **cp; 877 struct mfc_cache *c, **cp;
864 878
865 cp = &mfc_cache_array[i]; 879 cp = &net->ipv4.mfc_cache_array[i];
866 while ((c = *cp) != NULL) { 880 while ((c = *cp) != NULL) {
867 if (c->mfc_flags&MFC_STATIC) { 881 if (c->mfc_flags&MFC_STATIC) {
868 cp = &c->next; 882 cp = &c->next;
@@ -872,22 +886,23 @@ static void mroute_clean_tables(struct sock *sk)
872 *cp = c->next; 886 *cp = c->next;
873 write_unlock_bh(&mrt_lock); 887 write_unlock_bh(&mrt_lock);
874 888
875 kmem_cache_free(mrt_cachep, c); 889 ipmr_cache_free(c);
876 } 890 }
877 } 891 }
878 892
879 if (atomic_read(&cache_resolve_queue_len) != 0) { 893 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
880 struct mfc_cache *c; 894 struct mfc_cache *c, **cp;
881 895
882 spin_lock_bh(&mfc_unres_lock); 896 spin_lock_bh(&mfc_unres_lock);
883 while (mfc_unres_queue != NULL) { 897 cp = &mfc_unres_queue;
884 c = mfc_unres_queue; 898 while ((c = *cp) != NULL) {
885 mfc_unres_queue = c->next; 899 if (!net_eq(mfc_net(c), net)) {
886 spin_unlock_bh(&mfc_unres_lock); 900 cp = &c->next;
901 continue;
902 }
903 *cp = c->next;
887 904
888 ipmr_destroy_unres(c); 905 ipmr_destroy_unres(c);
889
890 spin_lock_bh(&mfc_unres_lock);
891 } 906 }
892 spin_unlock_bh(&mfc_unres_lock); 907 spin_unlock_bh(&mfc_unres_lock);
893 } 908 }
@@ -895,15 +910,17 @@ static void mroute_clean_tables(struct sock *sk)
895 910
896static void mrtsock_destruct(struct sock *sk) 911static void mrtsock_destruct(struct sock *sk)
897{ 912{
913 struct net *net = sock_net(sk);
914
898 rtnl_lock(); 915 rtnl_lock();
899 if (sk == mroute_socket) { 916 if (sk == net->ipv4.mroute_sk) {
900 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; 917 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
901 918
902 write_lock_bh(&mrt_lock); 919 write_lock_bh(&mrt_lock);
903 mroute_socket = NULL; 920 net->ipv4.mroute_sk = NULL;
904 write_unlock_bh(&mrt_lock); 921 write_unlock_bh(&mrt_lock);
905 922
906 mroute_clean_tables(sk); 923 mroute_clean_tables(net);
907 } 924 }
908 rtnl_unlock(); 925 rtnl_unlock();
909} 926}
@@ -920,9 +937,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
920 int ret; 937 int ret;
921 struct vifctl vif; 938 struct vifctl vif;
922 struct mfcctl mfc; 939 struct mfcctl mfc;
940 struct net *net = sock_net(sk);
923 941
924 if (optname != MRT_INIT) { 942 if (optname != MRT_INIT) {
925 if (sk != mroute_socket && !capable(CAP_NET_ADMIN)) 943 if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
926 return -EACCES; 944 return -EACCES;
927 } 945 }
928 946
@@ -935,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
935 return -ENOPROTOOPT; 953 return -ENOPROTOOPT;
936 954
937 rtnl_lock(); 955 rtnl_lock();
938 if (mroute_socket) { 956 if (net->ipv4.mroute_sk) {
939 rtnl_unlock(); 957 rtnl_unlock();
940 return -EADDRINUSE; 958 return -EADDRINUSE;
941 } 959 }
@@ -943,15 +961,15 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
943 ret = ip_ra_control(sk, 1, mrtsock_destruct); 961 ret = ip_ra_control(sk, 1, mrtsock_destruct);
944 if (ret == 0) { 962 if (ret == 0) {
945 write_lock_bh(&mrt_lock); 963 write_lock_bh(&mrt_lock);
946 mroute_socket = sk; 964 net->ipv4.mroute_sk = sk;
947 write_unlock_bh(&mrt_lock); 965 write_unlock_bh(&mrt_lock);
948 966
949 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; 967 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
950 } 968 }
951 rtnl_unlock(); 969 rtnl_unlock();
952 return ret; 970 return ret;
953 case MRT_DONE: 971 case MRT_DONE:
954 if (sk != mroute_socket) 972 if (sk != net->ipv4.mroute_sk)
955 return -EACCES; 973 return -EACCES;
956 return ip_ra_control(sk, 0, NULL); 974 return ip_ra_control(sk, 0, NULL);
957 case MRT_ADD_VIF: 975 case MRT_ADD_VIF:
@@ -964,9 +982,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
964 return -ENFILE; 982 return -ENFILE;
965 rtnl_lock(); 983 rtnl_lock();
966 if (optname == MRT_ADD_VIF) { 984 if (optname == MRT_ADD_VIF) {
967 ret = vif_add(&vif, sk==mroute_socket); 985 ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
968 } else { 986 } else {
969 ret = vif_delete(vif.vifc_vifi, 0); 987 ret = vif_delete(net, vif.vifc_vifi, 0);
970 } 988 }
971 rtnl_unlock(); 989 rtnl_unlock();
972 return ret; 990 return ret;
@@ -983,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
983 return -EFAULT; 1001 return -EFAULT;
984 rtnl_lock(); 1002 rtnl_lock();
985 if (optname == MRT_DEL_MFC) 1003 if (optname == MRT_DEL_MFC)
986 ret = ipmr_mfc_delete(&mfc); 1004 ret = ipmr_mfc_delete(net, &mfc);
987 else 1005 else
988 ret = ipmr_mfc_add(&mfc, sk==mroute_socket); 1006 ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
989 rtnl_unlock(); 1007 rtnl_unlock();
990 return ret; 1008 return ret;
991 /* 1009 /*
@@ -996,7 +1014,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
996 int v; 1014 int v;
997 if (get_user(v,(int __user *)optval)) 1015 if (get_user(v,(int __user *)optval))
998 return -EFAULT; 1016 return -EFAULT;
999 mroute_do_assert=(v)?1:0; 1017 net->ipv4.mroute_do_assert = (v) ? 1 : 0;
1000 return 0; 1018 return 0;
1001 } 1019 }
1002#ifdef CONFIG_IP_PIMSM 1020#ifdef CONFIG_IP_PIMSM
@@ -1010,11 +1028,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1010 1028
1011 rtnl_lock(); 1029 rtnl_lock();
1012 ret = 0; 1030 ret = 0;
1013 if (v != mroute_do_pim) { 1031 if (v != net->ipv4.mroute_do_pim) {
1014 mroute_do_pim = v; 1032 net->ipv4.mroute_do_pim = v;
1015 mroute_do_assert = v; 1033 net->ipv4.mroute_do_assert = v;
1016#ifdef CONFIG_IP_PIMSM_V2 1034#ifdef CONFIG_IP_PIMSM_V2
1017 if (mroute_do_pim) 1035 if (net->ipv4.mroute_do_pim)
1018 ret = inet_add_protocol(&pim_protocol, 1036 ret = inet_add_protocol(&pim_protocol,
1019 IPPROTO_PIM); 1037 IPPROTO_PIM);
1020 else 1038 else
@@ -1045,6 +1063,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1045{ 1063{
1046 int olr; 1064 int olr;
1047 int val; 1065 int val;
1066 struct net *net = sock_net(sk);
1048 1067
1049 if (optname != MRT_VERSION && 1068 if (optname != MRT_VERSION &&
1050#ifdef CONFIG_IP_PIMSM 1069#ifdef CONFIG_IP_PIMSM
@@ -1066,10 +1085,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1066 val = 0x0305; 1085 val = 0x0305;
1067#ifdef CONFIG_IP_PIMSM 1086#ifdef CONFIG_IP_PIMSM
1068 else if (optname == MRT_PIM) 1087 else if (optname == MRT_PIM)
1069 val = mroute_do_pim; 1088 val = net->ipv4.mroute_do_pim;
1070#endif 1089#endif
1071 else 1090 else
1072 val = mroute_do_assert; 1091 val = net->ipv4.mroute_do_assert;
1073 if (copy_to_user(optval, &val, olr)) 1092 if (copy_to_user(optval, &val, olr))
1074 return -EFAULT; 1093 return -EFAULT;
1075 return 0; 1094 return 0;
@@ -1085,16 +1104,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1085 struct sioc_vif_req vr; 1104 struct sioc_vif_req vr;
1086 struct vif_device *vif; 1105 struct vif_device *vif;
1087 struct mfc_cache *c; 1106 struct mfc_cache *c;
1107 struct net *net = sock_net(sk);
1088 1108
1089 switch (cmd) { 1109 switch (cmd) {
1090 case SIOCGETVIFCNT: 1110 case SIOCGETVIFCNT:
1091 if (copy_from_user(&vr, arg, sizeof(vr))) 1111 if (copy_from_user(&vr, arg, sizeof(vr)))
1092 return -EFAULT; 1112 return -EFAULT;
1093 if (vr.vifi >= maxvif) 1113 if (vr.vifi >= net->ipv4.maxvif)
1094 return -EINVAL; 1114 return -EINVAL;
1095 read_lock(&mrt_lock); 1115 read_lock(&mrt_lock);
1096 vif=&vif_table[vr.vifi]; 1116 vif = &net->ipv4.vif_table[vr.vifi];
1097 if (VIF_EXISTS(vr.vifi)) { 1117 if (VIF_EXISTS(net, vr.vifi)) {
1098 vr.icount = vif->pkt_in; 1118 vr.icount = vif->pkt_in;
1099 vr.ocount = vif->pkt_out; 1119 vr.ocount = vif->pkt_out;
1100 vr.ibytes = vif->bytes_in; 1120 vr.ibytes = vif->bytes_in;
@@ -1112,7 +1132,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1112 return -EFAULT; 1132 return -EFAULT;
1113 1133
1114 read_lock(&mrt_lock); 1134 read_lock(&mrt_lock);
1115 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr); 1135 c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
1116 if (c) { 1136 if (c) {
1117 sr.pktcnt = c->mfc_un.res.pkt; 1137 sr.pktcnt = c->mfc_un.res.pkt;
1118 sr.bytecnt = c->mfc_un.res.bytes; 1138 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1134,18 +1154,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1134static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1154static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1135{ 1155{
1136 struct net_device *dev = ptr; 1156 struct net_device *dev = ptr;
1157 struct net *net = dev_net(dev);
1137 struct vif_device *v; 1158 struct vif_device *v;
1138 int ct; 1159 int ct;
1139 1160
1140 if (!net_eq(dev_net(dev), &init_net)) 1161 if (!net_eq(dev_net(dev), net))
1141 return NOTIFY_DONE; 1162 return NOTIFY_DONE;
1142 1163
1143 if (event != NETDEV_UNREGISTER) 1164 if (event != NETDEV_UNREGISTER)
1144 return NOTIFY_DONE; 1165 return NOTIFY_DONE;
1145 v=&vif_table[0]; 1166 v = &net->ipv4.vif_table[0];
1146 for (ct=0; ct<maxvif; ct++,v++) { 1167 for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
1147 if (v->dev == dev) 1168 if (v->dev == dev)
1148 vif_delete(ct, 1); 1169 vif_delete(net, ct, 1);
1149 } 1170 }
1150 return NOTIFY_DONE; 1171 return NOTIFY_DONE;
1151} 1172}
@@ -1205,8 +1226,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1205 1226
1206static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1227static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1207{ 1228{
1229 struct net *net = mfc_net(c);
1208 const struct iphdr *iph = ip_hdr(skb); 1230 const struct iphdr *iph = ip_hdr(skb);
1209 struct vif_device *vif = &vif_table[vifi]; 1231 struct vif_device *vif = &net->ipv4.vif_table[vifi];
1210 struct net_device *dev; 1232 struct net_device *dev;
1211 struct rtable *rt; 1233 struct rtable *rt;
1212 int encap = 0; 1234 int encap = 0;
@@ -1220,9 +1242,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1220 vif->bytes_out += skb->len; 1242 vif->bytes_out += skb->len;
1221 vif->dev->stats.tx_bytes += skb->len; 1243 vif->dev->stats.tx_bytes += skb->len;
1222 vif->dev->stats.tx_packets++; 1244 vif->dev->stats.tx_packets++;
1223 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1245 ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
1224 kfree_skb(skb); 1246 goto out_free;
1225 return;
1226 } 1247 }
1227#endif 1248#endif
1228 1249
@@ -1233,7 +1254,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1233 .saddr = vif->local, 1254 .saddr = vif->local,
1234 .tos = RT_TOS(iph->tos) } }, 1255 .tos = RT_TOS(iph->tos) } },
1235 .proto = IPPROTO_IPIP }; 1256 .proto = IPPROTO_IPIP };
1236 if (ip_route_output_key(&init_net, &rt, &fl)) 1257 if (ip_route_output_key(net, &rt, &fl))
1237 goto out_free; 1258 goto out_free;
1238 encap = sizeof(struct iphdr); 1259 encap = sizeof(struct iphdr);
1239 } else { 1260 } else {
@@ -1242,7 +1263,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1242 { .daddr = iph->daddr, 1263 { .daddr = iph->daddr,
1243 .tos = RT_TOS(iph->tos) } }, 1264 .tos = RT_TOS(iph->tos) } },
1244 .proto = IPPROTO_IPIP }; 1265 .proto = IPPROTO_IPIP };
1245 if (ip_route_output_key(&init_net, &rt, &fl)) 1266 if (ip_route_output_key(net, &rt, &fl))
1246 goto out_free; 1267 goto out_free;
1247 } 1268 }
1248 1269
@@ -1306,9 +1327,10 @@ out_free:
1306 1327
1307static int ipmr_find_vif(struct net_device *dev) 1328static int ipmr_find_vif(struct net_device *dev)
1308{ 1329{
1330 struct net *net = dev_net(dev);
1309 int ct; 1331 int ct;
1310 for (ct=maxvif-1; ct>=0; ct--) { 1332 for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
1311 if (vif_table[ct].dev == dev) 1333 if (net->ipv4.vif_table[ct].dev == dev)
1312 break; 1334 break;
1313 } 1335 }
1314 return ct; 1336 return ct;
@@ -1320,6 +1342,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1320{ 1342{
1321 int psend = -1; 1343 int psend = -1;
1322 int vif, ct; 1344 int vif, ct;
1345 struct net *net = mfc_net(cache);
1323 1346
1324 vif = cache->mfc_parent; 1347 vif = cache->mfc_parent;
1325 cache->mfc_un.res.pkt++; 1348 cache->mfc_un.res.pkt++;
@@ -1328,7 +1351,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1328 /* 1351 /*
1329 * Wrong interface: drop packet and (maybe) send PIM assert. 1352 * Wrong interface: drop packet and (maybe) send PIM assert.
1330 */ 1353 */
1331 if (vif_table[vif].dev != skb->dev) { 1354 if (net->ipv4.vif_table[vif].dev != skb->dev) {
1332 int true_vifi; 1355 int true_vifi;
1333 1356
1334 if (skb->rtable->fl.iif == 0) { 1357 if (skb->rtable->fl.iif == 0) {
@@ -1349,23 +1372,24 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1349 cache->mfc_un.res.wrong_if++; 1372 cache->mfc_un.res.wrong_if++;
1350 true_vifi = ipmr_find_vif(skb->dev); 1373 true_vifi = ipmr_find_vif(skb->dev);
1351 1374
1352 if (true_vifi >= 0 && mroute_do_assert && 1375 if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
1353 /* pimsm uses asserts, when switching from RPT to SPT, 1376 /* pimsm uses asserts, when switching from RPT to SPT,
1354 so that we cannot check that packet arrived on an oif. 1377 so that we cannot check that packet arrived on an oif.
1355 It is bad, but otherwise we would need to move pretty 1378 It is bad, but otherwise we would need to move pretty
1356 large chunk of pimd to kernel. Ough... --ANK 1379 large chunk of pimd to kernel. Ough... --ANK
1357 */ 1380 */
1358 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && 1381 (net->ipv4.mroute_do_pim ||
1382 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1359 time_after(jiffies, 1383 time_after(jiffies,
1360 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1384 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1361 cache->mfc_un.res.last_assert = jiffies; 1385 cache->mfc_un.res.last_assert = jiffies;
1362 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); 1386 ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
1363 } 1387 }
1364 goto dont_forward; 1388 goto dont_forward;
1365 } 1389 }
1366 1390
1367 vif_table[vif].pkt_in++; 1391 net->ipv4.vif_table[vif].pkt_in++;
1368 vif_table[vif].bytes_in += skb->len; 1392 net->ipv4.vif_table[vif].bytes_in += skb->len;
1369 1393
1370 /* 1394 /*
1371 * Forward the frame 1395 * Forward the frame
@@ -1405,6 +1429,7 @@ dont_forward:
1405int ip_mr_input(struct sk_buff *skb) 1429int ip_mr_input(struct sk_buff *skb)
1406{ 1430{
1407 struct mfc_cache *cache; 1431 struct mfc_cache *cache;
1432 struct net *net = dev_net(skb->dev);
1408 int local = skb->rtable->rt_flags&RTCF_LOCAL; 1433 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1409 1434
1410 /* Packet is looped back after forward, it should not be 1435 /* Packet is looped back after forward, it should not be
@@ -1425,9 +1450,9 @@ int ip_mr_input(struct sk_buff *skb)
1425 that we can forward NO IGMP messages. 1450 that we can forward NO IGMP messages.
1426 */ 1451 */
1427 read_lock(&mrt_lock); 1452 read_lock(&mrt_lock);
1428 if (mroute_socket) { 1453 if (net->ipv4.mroute_sk) {
1429 nf_reset(skb); 1454 nf_reset(skb);
1430 raw_rcv(mroute_socket, skb); 1455 raw_rcv(net->ipv4.mroute_sk, skb);
1431 read_unlock(&mrt_lock); 1456 read_unlock(&mrt_lock);
1432 return 0; 1457 return 0;
1433 } 1458 }
@@ -1436,7 +1461,7 @@ int ip_mr_input(struct sk_buff *skb)
1436 } 1461 }
1437 1462
1438 read_lock(&mrt_lock); 1463 read_lock(&mrt_lock);
1439 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1464 cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1440 1465
1441 /* 1466 /*
1442 * No usable cache entry 1467 * No usable cache entry
@@ -1456,7 +1481,7 @@ int ip_mr_input(struct sk_buff *skb)
1456 1481
1457 vif = ipmr_find_vif(skb->dev); 1482 vif = ipmr_find_vif(skb->dev);
1458 if (vif >= 0) { 1483 if (vif >= 0) {
1459 int err = ipmr_cache_unresolved(vif, skb); 1484 int err = ipmr_cache_unresolved(net, vif, skb);
1460 read_unlock(&mrt_lock); 1485 read_unlock(&mrt_lock);
1461 1486
1462 return err; 1487 return err;
@@ -1487,6 +1512,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1487{ 1512{
1488 struct net_device *reg_dev = NULL; 1513 struct net_device *reg_dev = NULL;
1489 struct iphdr *encap; 1514 struct iphdr *encap;
1515 struct net *net = dev_net(skb->dev);
1490 1516
1491 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1517 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1492 /* 1518 /*
@@ -1501,8 +1527,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1501 return 1; 1527 return 1;
1502 1528
1503 read_lock(&mrt_lock); 1529 read_lock(&mrt_lock);
1504 if (reg_vif_num >= 0) 1530 if (net->ipv4.mroute_reg_vif_num >= 0)
1505 reg_dev = vif_table[reg_vif_num].dev; 1531 reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
1506 if (reg_dev) 1532 if (reg_dev)
1507 dev_hold(reg_dev); 1533 dev_hold(reg_dev);
1508 read_unlock(&mrt_lock); 1534 read_unlock(&mrt_lock);
@@ -1537,13 +1563,14 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1537int pim_rcv_v1(struct sk_buff * skb) 1563int pim_rcv_v1(struct sk_buff * skb)
1538{ 1564{
1539 struct igmphdr *pim; 1565 struct igmphdr *pim;
1566 struct net *net = dev_net(skb->dev);
1540 1567
1541 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1568 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1542 goto drop; 1569 goto drop;
1543 1570
1544 pim = igmp_hdr(skb); 1571 pim = igmp_hdr(skb);
1545 1572
1546 if (!mroute_do_pim || 1573 if (!net->ipv4.mroute_do_pim ||
1547 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1574 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1548 goto drop; 1575 goto drop;
1549 1576
@@ -1583,7 +1610,8 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1583{ 1610{
1584 int ct; 1611 int ct;
1585 struct rtnexthop *nhp; 1612 struct rtnexthop *nhp;
1586 struct net_device *dev = vif_table[c->mfc_parent].dev; 1613 struct net *net = mfc_net(c);
1614 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1587 u8 *b = skb_tail_pointer(skb); 1615 u8 *b = skb_tail_pointer(skb);
1588 struct rtattr *mp_head; 1616 struct rtattr *mp_head;
1589 1617
@@ -1599,7 +1627,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1599 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1627 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1600 nhp->rtnh_flags = 0; 1628 nhp->rtnh_flags = 0;
1601 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1629 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1602 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; 1630 nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
1603 nhp->rtnh_len = sizeof(*nhp); 1631 nhp->rtnh_len = sizeof(*nhp);
1604 } 1632 }
1605 } 1633 }
@@ -1613,14 +1641,15 @@ rtattr_failure:
1613 return -EMSGSIZE; 1641 return -EMSGSIZE;
1614} 1642}
1615 1643
1616int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1644int ipmr_get_route(struct net *net,
1645 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1617{ 1646{
1618 int err; 1647 int err;
1619 struct mfc_cache *cache; 1648 struct mfc_cache *cache;
1620 struct rtable *rt = skb->rtable; 1649 struct rtable *rt = skb->rtable;
1621 1650
1622 read_lock(&mrt_lock); 1651 read_lock(&mrt_lock);
1623 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1652 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
1624 1653
1625 if (cache == NULL) { 1654 if (cache == NULL) {
1626 struct sk_buff *skb2; 1655 struct sk_buff *skb2;
@@ -1651,7 +1680,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1651 iph->saddr = rt->rt_src; 1680 iph->saddr = rt->rt_src;
1652 iph->daddr = rt->rt_dst; 1681 iph->daddr = rt->rt_dst;
1653 iph->version = 0; 1682 iph->version = 0;
1654 err = ipmr_cache_unresolved(vif, skb2); 1683 err = ipmr_cache_unresolved(net, vif, skb2);
1655 read_unlock(&mrt_lock); 1684 read_unlock(&mrt_lock);
1656 return err; 1685 return err;
1657 } 1686 }
@@ -1668,17 +1697,19 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1668 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 1697 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1669 */ 1698 */
1670struct ipmr_vif_iter { 1699struct ipmr_vif_iter {
1700 struct seq_net_private p;
1671 int ct; 1701 int ct;
1672}; 1702};
1673 1703
1674static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, 1704static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1705 struct ipmr_vif_iter *iter,
1675 loff_t pos) 1706 loff_t pos)
1676{ 1707{
1677 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { 1708 for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) {
1678 if (!VIF_EXISTS(iter->ct)) 1709 if (!VIF_EXISTS(net, iter->ct))
1679 continue; 1710 continue;
1680 if (pos-- == 0) 1711 if (pos-- == 0)
1681 return &vif_table[iter->ct]; 1712 return &net->ipv4.vif_table[iter->ct];
1682 } 1713 }
1683 return NULL; 1714 return NULL;
1684} 1715}
@@ -1686,23 +1717,26 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1686static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 1717static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1687 __acquires(mrt_lock) 1718 __acquires(mrt_lock)
1688{ 1719{
1720 struct net *net = seq_file_net(seq);
1721
1689 read_lock(&mrt_lock); 1722 read_lock(&mrt_lock);
1690 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) 1723 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
1691 : SEQ_START_TOKEN; 1724 : SEQ_START_TOKEN;
1692} 1725}
1693 1726
1694static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1727static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1695{ 1728{
1696 struct ipmr_vif_iter *iter = seq->private; 1729 struct ipmr_vif_iter *iter = seq->private;
1730 struct net *net = seq_file_net(seq);
1697 1731
1698 ++*pos; 1732 ++*pos;
1699 if (v == SEQ_START_TOKEN) 1733 if (v == SEQ_START_TOKEN)
1700 return ipmr_vif_seq_idx(iter, 0); 1734 return ipmr_vif_seq_idx(net, iter, 0);
1701 1735
1702 while (++iter->ct < maxvif) { 1736 while (++iter->ct < net->ipv4.maxvif) {
1703 if (!VIF_EXISTS(iter->ct)) 1737 if (!VIF_EXISTS(net, iter->ct))
1704 continue; 1738 continue;
1705 return &vif_table[iter->ct]; 1739 return &net->ipv4.vif_table[iter->ct];
1706 } 1740 }
1707 return NULL; 1741 return NULL;
1708} 1742}
@@ -1715,6 +1749,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1715 1749
1716static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 1750static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1717{ 1751{
1752 struct net *net = seq_file_net(seq);
1753
1718 if (v == SEQ_START_TOKEN) { 1754 if (v == SEQ_START_TOKEN) {
1719 seq_puts(seq, 1755 seq_puts(seq,
1720 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 1756 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
@@ -1724,7 +1760,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1724 1760
1725 seq_printf(seq, 1761 seq_printf(seq,
1726 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 1762 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1727 vif - vif_table, 1763 vif - net->ipv4.vif_table,
1728 name, vif->bytes_in, vif->pkt_in, 1764 name, vif->bytes_in, vif->pkt_in,
1729 vif->bytes_out, vif->pkt_out, 1765 vif->bytes_out, vif->pkt_out,
1730 vif->flags, vif->local, vif->remote); 1766 vif->flags, vif->local, vif->remote);
@@ -1741,8 +1777,8 @@ static const struct seq_operations ipmr_vif_seq_ops = {
1741 1777
1742static int ipmr_vif_open(struct inode *inode, struct file *file) 1778static int ipmr_vif_open(struct inode *inode, struct file *file)
1743{ 1779{
1744 return seq_open_private(file, &ipmr_vif_seq_ops, 1780 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
1745 sizeof(struct ipmr_vif_iter)); 1781 sizeof(struct ipmr_vif_iter));
1746} 1782}
1747 1783
1748static const struct file_operations ipmr_vif_fops = { 1784static const struct file_operations ipmr_vif_fops = {
@@ -1750,23 +1786,26 @@ static const struct file_operations ipmr_vif_fops = {
1750 .open = ipmr_vif_open, 1786 .open = ipmr_vif_open,
1751 .read = seq_read, 1787 .read = seq_read,
1752 .llseek = seq_lseek, 1788 .llseek = seq_lseek,
1753 .release = seq_release_private, 1789 .release = seq_release_net,
1754}; 1790};
1755 1791
1756struct ipmr_mfc_iter { 1792struct ipmr_mfc_iter {
1793 struct seq_net_private p;
1757 struct mfc_cache **cache; 1794 struct mfc_cache **cache;
1758 int ct; 1795 int ct;
1759}; 1796};
1760 1797
1761 1798
1762static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) 1799static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1800 struct ipmr_mfc_iter *it, loff_t pos)
1763{ 1801{
1764 struct mfc_cache *mfc; 1802 struct mfc_cache *mfc;
1765 1803
1766 it->cache = mfc_cache_array; 1804 it->cache = net->ipv4.mfc_cache_array;
1767 read_lock(&mrt_lock); 1805 read_lock(&mrt_lock);
1768 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) 1806 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1769 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) 1807 for (mfc = net->ipv4.mfc_cache_array[it->ct];
1808 mfc; mfc = mfc->next)
1770 if (pos-- == 0) 1809 if (pos-- == 0)
1771 return mfc; 1810 return mfc;
1772 read_unlock(&mrt_lock); 1811 read_unlock(&mrt_lock);
@@ -1774,7 +1813,8 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1774 it->cache = &mfc_unres_queue; 1813 it->cache = &mfc_unres_queue;
1775 spin_lock_bh(&mfc_unres_lock); 1814 spin_lock_bh(&mfc_unres_lock);
1776 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) 1815 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1777 if (pos-- == 0) 1816 if (net_eq(mfc_net(mfc), net) &&
1817 pos-- == 0)
1778 return mfc; 1818 return mfc;
1779 spin_unlock_bh(&mfc_unres_lock); 1819 spin_unlock_bh(&mfc_unres_lock);
1780 1820
@@ -1786,9 +1826,11 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1786static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 1826static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1787{ 1827{
1788 struct ipmr_mfc_iter *it = seq->private; 1828 struct ipmr_mfc_iter *it = seq->private;
1829 struct net *net = seq_file_net(seq);
1830
1789 it->cache = NULL; 1831 it->cache = NULL;
1790 it->ct = 0; 1832 it->ct = 0;
1791 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) 1833 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
1792 : SEQ_START_TOKEN; 1834 : SEQ_START_TOKEN;
1793} 1835}
1794 1836
@@ -1796,11 +1838,12 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1796{ 1838{
1797 struct mfc_cache *mfc = v; 1839 struct mfc_cache *mfc = v;
1798 struct ipmr_mfc_iter *it = seq->private; 1840 struct ipmr_mfc_iter *it = seq->private;
1841 struct net *net = seq_file_net(seq);
1799 1842
1800 ++*pos; 1843 ++*pos;
1801 1844
1802 if (v == SEQ_START_TOKEN) 1845 if (v == SEQ_START_TOKEN)
1803 return ipmr_mfc_seq_idx(seq->private, 0); 1846 return ipmr_mfc_seq_idx(net, seq->private, 0);
1804 1847
1805 if (mfc->next) 1848 if (mfc->next)
1806 return mfc->next; 1849 return mfc->next;
@@ -1808,10 +1851,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1808 if (it->cache == &mfc_unres_queue) 1851 if (it->cache == &mfc_unres_queue)
1809 goto end_of_list; 1852 goto end_of_list;
1810 1853
1811 BUG_ON(it->cache != mfc_cache_array); 1854 BUG_ON(it->cache != net->ipv4.mfc_cache_array);
1812 1855
1813 while (++it->ct < MFC_LINES) { 1856 while (++it->ct < MFC_LINES) {
1814 mfc = mfc_cache_array[it->ct]; 1857 mfc = net->ipv4.mfc_cache_array[it->ct];
1815 if (mfc) 1858 if (mfc)
1816 return mfc; 1859 return mfc;
1817 } 1860 }
@@ -1823,6 +1866,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1823 1866
1824 spin_lock_bh(&mfc_unres_lock); 1867 spin_lock_bh(&mfc_unres_lock);
1825 mfc = mfc_unres_queue; 1868 mfc = mfc_unres_queue;
1869 while (mfc && !net_eq(mfc_net(mfc), net))
1870 mfc = mfc->next;
1826 if (mfc) 1871 if (mfc)
1827 return mfc; 1872 return mfc;
1828 1873
@@ -1836,16 +1881,18 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1836static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 1881static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1837{ 1882{
1838 struct ipmr_mfc_iter *it = seq->private; 1883 struct ipmr_mfc_iter *it = seq->private;
1884 struct net *net = seq_file_net(seq);
1839 1885
1840 if (it->cache == &mfc_unres_queue) 1886 if (it->cache == &mfc_unres_queue)
1841 spin_unlock_bh(&mfc_unres_lock); 1887 spin_unlock_bh(&mfc_unres_lock);
1842 else if (it->cache == mfc_cache_array) 1888 else if (it->cache == net->ipv4.mfc_cache_array)
1843 read_unlock(&mrt_lock); 1889 read_unlock(&mrt_lock);
1844} 1890}
1845 1891
1846static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 1892static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1847{ 1893{
1848 int n; 1894 int n;
1895 struct net *net = seq_file_net(seq);
1849 1896
1850 if (v == SEQ_START_TOKEN) { 1897 if (v == SEQ_START_TOKEN) {
1851 seq_puts(seq, 1898 seq_puts(seq,
@@ -1866,9 +1913,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1866 mfc->mfc_un.res.wrong_if); 1913 mfc->mfc_un.res.wrong_if);
1867 for (n = mfc->mfc_un.res.minvif; 1914 for (n = mfc->mfc_un.res.minvif;
1868 n < mfc->mfc_un.res.maxvif; n++ ) { 1915 n < mfc->mfc_un.res.maxvif; n++ ) {
1869 if (VIF_EXISTS(n) 1916 if (VIF_EXISTS(net, n) &&
1870 && mfc->mfc_un.res.ttls[n] < 255) 1917 mfc->mfc_un.res.ttls[n] < 255)
1871 seq_printf(seq, 1918 seq_printf(seq,
1872 " %2d:%-3d", 1919 " %2d:%-3d",
1873 n, mfc->mfc_un.res.ttls[n]); 1920 n, mfc->mfc_un.res.ttls[n]);
1874 } 1921 }
@@ -1892,8 +1939,8 @@ static const struct seq_operations ipmr_mfc_seq_ops = {
1892 1939
1893static int ipmr_mfc_open(struct inode *inode, struct file *file) 1940static int ipmr_mfc_open(struct inode *inode, struct file *file)
1894{ 1941{
1895 return seq_open_private(file, &ipmr_mfc_seq_ops, 1942 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
1896 sizeof(struct ipmr_mfc_iter)); 1943 sizeof(struct ipmr_mfc_iter));
1897} 1944}
1898 1945
1899static const struct file_operations ipmr_mfc_fops = { 1946static const struct file_operations ipmr_mfc_fops = {
@@ -1901,7 +1948,7 @@ static const struct file_operations ipmr_mfc_fops = {
1901 .open = ipmr_mfc_open, 1948 .open = ipmr_mfc_open,
1902 .read = seq_read, 1949 .read = seq_read,
1903 .llseek = seq_lseek, 1950 .llseek = seq_lseek,
1904 .release = seq_release_private, 1951 .release = seq_release_net,
1905}; 1952};
1906#endif 1953#endif
1907 1954
@@ -1915,6 +1962,65 @@ static struct net_protocol pim_protocol = {
1915/* 1962/*
1916 * Setup for IP multicast routing 1963 * Setup for IP multicast routing
1917 */ 1964 */
1965static int __net_init ipmr_net_init(struct net *net)
1966{
1967 int err = 0;
1968
1969 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
1970 GFP_KERNEL);
1971 if (!net->ipv4.vif_table) {
1972 err = -ENOMEM;
1973 goto fail;
1974 }
1975
1976 /* Forwarding cache */
1977 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
1978 sizeof(struct mfc_cache *),
1979 GFP_KERNEL);
1980 if (!net->ipv4.mfc_cache_array) {
1981 err = -ENOMEM;
1982 goto fail_mfc_cache;
1983 }
1984
1985#ifdef CONFIG_IP_PIMSM
1986 net->ipv4.mroute_reg_vif_num = -1;
1987#endif
1988
1989#ifdef CONFIG_PROC_FS
1990 err = -ENOMEM;
1991 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
1992 goto proc_vif_fail;
1993 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1994 goto proc_cache_fail;
1995#endif
1996 return 0;
1997
1998#ifdef CONFIG_PROC_FS
1999proc_cache_fail:
2000 proc_net_remove(net, "ip_mr_vif");
2001proc_vif_fail:
2002 kfree(net->ipv4.mfc_cache_array);
2003#endif
2004fail_mfc_cache:
2005 kfree(net->ipv4.vif_table);
2006fail:
2007 return err;
2008}
2009
2010static void __net_exit ipmr_net_exit(struct net *net)
2011{
2012#ifdef CONFIG_PROC_FS
2013 proc_net_remove(net, "ip_mr_cache");
2014 proc_net_remove(net, "ip_mr_vif");
2015#endif
2016 kfree(net->ipv4.mfc_cache_array);
2017 kfree(net->ipv4.vif_table);
2018}
2019
2020static struct pernet_operations ipmr_net_ops = {
2021 .init = ipmr_net_init,
2022 .exit = ipmr_net_exit,
2023};
1918 2024
1919int __init ip_mr_init(void) 2025int __init ip_mr_init(void)
1920{ 2026{
@@ -1927,26 +2033,20 @@ int __init ip_mr_init(void)
1927 if (!mrt_cachep) 2033 if (!mrt_cachep)
1928 return -ENOMEM; 2034 return -ENOMEM;
1929 2035
2036 err = register_pernet_subsys(&ipmr_net_ops);
2037 if (err)
2038 goto reg_pernet_fail;
2039
1930 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); 2040 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1931 err = register_netdevice_notifier(&ip_mr_notifier); 2041 err = register_netdevice_notifier(&ip_mr_notifier);
1932 if (err) 2042 if (err)
1933 goto reg_notif_fail; 2043 goto reg_notif_fail;
1934#ifdef CONFIG_PROC_FS
1935 err = -ENOMEM;
1936 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1937 goto proc_vif_fail;
1938 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1939 goto proc_cache_fail;
1940#endif
1941 return 0; 2044 return 0;
1942#ifdef CONFIG_PROC_FS 2045
1943proc_cache_fail:
1944 proc_net_remove(&init_net, "ip_mr_vif");
1945proc_vif_fail:
1946 unregister_netdevice_notifier(&ip_mr_notifier);
1947#endif
1948reg_notif_fail: 2046reg_notif_fail:
1949 del_timer(&ipmr_expire_timer); 2047 del_timer(&ipmr_expire_timer);
2048 unregister_pernet_subsys(&ipmr_net_ops);
2049reg_pernet_fail:
1950 kmem_cache_destroy(mrt_cachep); 2050 kmem_cache_destroy(mrt_cachep);
1951 return err; 2051 return err;
1952} 2052}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 182f845de92..d9521f6f9ed 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1292,7 +1292,7 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = {
1292 .expect_policy = &snmp_exp_policy, 1292 .expect_policy = &snmp_exp_policy,
1293 .name = "snmp", 1293 .name = "snmp",
1294 .tuple.src.l3num = AF_INET, 1294 .tuple.src.l3num = AF_INET,
1295 .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), 1295 .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
1296 .tuple.dst.protonum = IPPROTO_UDP, 1296 .tuple.dst.protonum = IPPROTO_UDP,
1297}; 1297};
1298 1298
@@ -1302,7 +1302,7 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1302 .expect_policy = &snmp_exp_policy, 1302 .expect_policy = &snmp_exp_policy,
1303 .name = "snmp_trap", 1303 .name = "snmp_trap",
1304 .tuple.src.l3num = AF_INET, 1304 .tuple.src.l3num = AF_INET,
1305 .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), 1305 .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT),
1306 .tuple.dst.protonum = IPPROTO_UDP, 1306 .tuple.dst.protonum = IPPROTO_UDP,
1307}; 1307};
1308 1308
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 97f71153584..5caee609be0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -151,7 +151,7 @@ static void rt_emergency_hash_rebuild(struct net *net);
151 151
152static struct dst_ops ipv4_dst_ops = { 152static struct dst_ops ipv4_dst_ops = {
153 .family = AF_INET, 153 .family = AF_INET,
154 .protocol = __constant_htons(ETH_P_IP), 154 .protocol = cpu_to_be16(ETH_P_IP),
155 .gc = rt_garbage_collect, 155 .gc = rt_garbage_collect,
156 .check = ipv4_dst_check, 156 .check = ipv4_dst_check,
157 .destroy = ipv4_dst_destroy, 157 .destroy = ipv4_dst_destroy,
@@ -2696,7 +2696,7 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2696 2696
2697static struct dst_ops ipv4_dst_blackhole_ops = { 2697static struct dst_ops ipv4_dst_blackhole_ops = {
2698 .family = AF_INET, 2698 .family = AF_INET,
2699 .protocol = __constant_htons(ETH_P_IP), 2699 .protocol = cpu_to_be16(ETH_P_IP),
2700 .destroy = ipv4_dst_destroy, 2700 .destroy = ipv4_dst_destroy,
2701 .check = ipv4_dst_check, 2701 .check = ipv4_dst_check,
2702 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2702 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
@@ -2779,7 +2779,8 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2779 return ip_route_output_flow(net, rp, flp, NULL, 0); 2779 return ip_route_output_flow(net, rp, flp, NULL, 0);
2780} 2780}
2781 2781
2782static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 2782static int rt_fill_info(struct net *net,
2783 struct sk_buff *skb, u32 pid, u32 seq, int event,
2783 int nowait, unsigned int flags) 2784 int nowait, unsigned int flags)
2784{ 2785{
2785 struct rtable *rt = skb->rtable; 2786 struct rtable *rt = skb->rtable;
@@ -2844,8 +2845,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2844 __be32 dst = rt->rt_dst; 2845 __be32 dst = rt->rt_dst;
2845 2846
2846 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2847 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2847 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) { 2848 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2848 int err = ipmr_get_route(skb, r, nowait); 2849 int err = ipmr_get_route(net, skb, r, nowait);
2849 if (err <= 0) { 2850 if (err <= 0) {
2850 if (!nowait) { 2851 if (!nowait) {
2851 if (err == 0) 2852 if (err == 0)
@@ -2950,7 +2951,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2950 if (rtm->rtm_flags & RTM_F_NOTIFY) 2951 if (rtm->rtm_flags & RTM_F_NOTIFY)
2951 rt->rt_flags |= RTCF_NOTIFY; 2952 rt->rt_flags |= RTCF_NOTIFY;
2952 2953
2953 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2954 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2954 RTM_NEWROUTE, 0, 0); 2955 RTM_NEWROUTE, 0, 0);
2955 if (err <= 0) 2956 if (err <= 0)
2956 goto errout_free; 2957 goto errout_free;
@@ -2988,7 +2989,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2988 if (rt_is_expired(rt)) 2989 if (rt_is_expired(rt))
2989 continue; 2990 continue;
2990 skb->dst = dst_clone(&rt->u.dst); 2991 skb->dst = dst_clone(&rt->u.dst);
2991 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2992 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2992 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2993 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2993 1, NLM_F_MULTI) <= 0) { 2994 1, NLM_F_MULTI) <= 0) {
2994 dst_release(xchg(&skb->dst, NULL)); 2995 dst_release(xchg(&skb->dst, NULL));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 76b148bcb0d..90b2f3c192f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2478,23 +2478,23 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2478 struct tcphdr *th2; 2478 struct tcphdr *th2;
2479 unsigned int thlen; 2479 unsigned int thlen;
2480 unsigned int flags; 2480 unsigned int flags;
2481 unsigned int total;
2482 unsigned int mss = 1; 2481 unsigned int mss = 1;
2483 int flush = 1; 2482 int flush = 1;
2483 int i;
2484 2484
2485 if (!pskb_may_pull(skb, sizeof(*th))) 2485 th = skb_gro_header(skb, sizeof(*th));
2486 if (unlikely(!th))
2486 goto out; 2487 goto out;
2487 2488
2488 th = tcp_hdr(skb);
2489 thlen = th->doff * 4; 2489 thlen = th->doff * 4;
2490 if (thlen < sizeof(*th)) 2490 if (thlen < sizeof(*th))
2491 goto out; 2491 goto out;
2492 2492
2493 if (!pskb_may_pull(skb, thlen)) 2493 th = skb_gro_header(skb, thlen);
2494 if (unlikely(!th))
2494 goto out; 2495 goto out;
2495 2496
2496 th = tcp_hdr(skb); 2497 skb_gro_pull(skb, thlen);
2497 __skb_pull(skb, thlen);
2498 2498
2499 flags = tcp_flag_word(th); 2499 flags = tcp_flag_word(th);
2500 2500
@@ -2504,7 +2504,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2504 2504
2505 th2 = tcp_hdr(p); 2505 th2 = tcp_hdr(p);
2506 2506
2507 if (th->source != th2->source || th->dest != th2->dest) { 2507 if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) {
2508 NAPI_GRO_CB(p)->same_flow = 0; 2508 NAPI_GRO_CB(p)->same_flow = 0;
2509 continue; 2509 continue;
2510 } 2510 }
@@ -2519,14 +2519,15 @@ found:
2519 flush |= flags & TCP_FLAG_CWR; 2519 flush |= flags & TCP_FLAG_CWR;
2520 flush |= (flags ^ tcp_flag_word(th2)) & 2520 flush |= (flags ^ tcp_flag_word(th2)) &
2521 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2521 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2522 flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; 2522 flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window);
2523 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); 2523 for (i = sizeof(*th); !flush && i < thlen; i += 4)
2524 flush |= *(u32 *)((u8 *)th + i) ^
2525 *(u32 *)((u8 *)th2 + i);
2524 2526
2525 total = p->len;
2526 mss = skb_shinfo(p)->gso_size; 2527 mss = skb_shinfo(p)->gso_size;
2527 2528
2528 flush |= skb->len > mss || skb->len <= 0; 2529 flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb);
2529 flush |= ntohl(th2->seq) + total != ntohl(th->seq); 2530 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2530 2531
2531 if (flush || skb_gro_receive(head, skb)) { 2532 if (flush || skb_gro_receive(head, skb)) {
2532 mss = 1; 2533 mss = 1;
@@ -2538,7 +2539,7 @@ found:
2538 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 2539 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2539 2540
2540out_check_final: 2541out_check_final:
2541 flush = skb->len < mss; 2542 flush = skb_gro_len(skb) < mss;
2542 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2543 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2543 TCP_FLAG_SYN | TCP_FLAG_FIN); 2544 TCP_FLAG_SYN | TCP_FLAG_FIN);
2544 2545
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19d7b429a26..f6b962f56ab 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2355,7 +2355,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2355 2355
2356 switch (skb->ip_summed) { 2356 switch (skb->ip_summed) {
2357 case CHECKSUM_COMPLETE: 2357 case CHECKSUM_COMPLETE:
2358 if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, 2358 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2359 skb->csum)) { 2359 skb->csum)) {
2360 skb->ip_summed = CHECKSUM_UNNECESSARY; 2360 skb->ip_summed = CHECKSUM_UNNECESSARY;
2361 break; 2361 break;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 2ad24ba31f9..60d918c96a4 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -241,7 +241,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
241 241
242static struct dst_ops xfrm4_dst_ops = { 242static struct dst_ops xfrm4_dst_ops = {
243 .family = AF_INET, 243 .family = AF_INET,
244 .protocol = __constant_htons(ETH_P_IP), 244 .protocol = cpu_to_be16(ETH_P_IP),
245 .gc = xfrm4_garbage_collect, 245 .gc = xfrm4_garbage_collect,
246 .update_pmtu = xfrm4_update_pmtu, 246 .update_pmtu = xfrm4_update_pmtu,
247 .destroy = xfrm4_dst_destroy, 247 .destroy = xfrm4_dst_destroy,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f9afb452249..03e2a1ad71e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2224,10 +2224,24 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
2224 return err; 2224 return err;
2225} 2225}
2226 2226
2227static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2228 int plen, int scope)
2229{
2230 struct inet6_ifaddr *ifp;
2231
2232 ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
2233 if (!IS_ERR(ifp)) {
2234 spin_lock_bh(&ifp->lock);
2235 ifp->flags &= ~IFA_F_TENTATIVE;
2236 spin_unlock_bh(&ifp->lock);
2237 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2238 in6_ifa_put(ifp);
2239 }
2240}
2241
2227#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2242#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2228static void sit_add_v4_addrs(struct inet6_dev *idev) 2243static void sit_add_v4_addrs(struct inet6_dev *idev)
2229{ 2244{
2230 struct inet6_ifaddr * ifp;
2231 struct in6_addr addr; 2245 struct in6_addr addr;
2232 struct net_device *dev; 2246 struct net_device *dev;
2233 struct net *net = dev_net(idev->dev); 2247 struct net *net = dev_net(idev->dev);
@@ -2246,14 +2260,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2246 } 2260 }
2247 2261
2248 if (addr.s6_addr32[3]) { 2262 if (addr.s6_addr32[3]) {
2249 ifp = ipv6_add_addr(idev, &addr, 128, scope, IFA_F_PERMANENT); 2263 add_addr(idev, &addr, 128, scope);
2250 if (!IS_ERR(ifp)) {
2251 spin_lock_bh(&ifp->lock);
2252 ifp->flags &= ~IFA_F_TENTATIVE;
2253 spin_unlock_bh(&ifp->lock);
2254 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2255 in6_ifa_put(ifp);
2256 }
2257 return; 2264 return;
2258 } 2265 }
2259 2266
@@ -2281,15 +2288,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2281 else 2288 else
2282 plen = 96; 2289 plen = 96;
2283 2290
2284 ifp = ipv6_add_addr(idev, &addr, plen, flag, 2291 add_addr(idev, &addr, plen, flag);
2285 IFA_F_PERMANENT);
2286 if (!IS_ERR(ifp)) {
2287 spin_lock_bh(&ifp->lock);
2288 ifp->flags &= ~IFA_F_TENTATIVE;
2289 spin_unlock_bh(&ifp->lock);
2290 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2291 in6_ifa_put(ifp);
2292 }
2293 } 2292 }
2294 } 2293 }
2295 } 2294 }
@@ -2299,7 +2298,6 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2299static void init_loopback(struct net_device *dev) 2298static void init_loopback(struct net_device *dev)
2300{ 2299{
2301 struct inet6_dev *idev; 2300 struct inet6_dev *idev;
2302 struct inet6_ifaddr * ifp;
2303 2301
2304 /* ::1 */ 2302 /* ::1 */
2305 2303
@@ -2310,14 +2308,7 @@ static void init_loopback(struct net_device *dev)
2310 return; 2308 return;
2311 } 2309 }
2312 2310
2313 ifp = ipv6_add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFA_F_PERMANENT); 2311 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2314 if (!IS_ERR(ifp)) {
2315 spin_lock_bh(&ifp->lock);
2316 ifp->flags &= ~IFA_F_TENTATIVE;
2317 spin_unlock_bh(&ifp->lock);
2318 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2319 in6_ifa_put(ifp);
2320 }
2321} 2312}
2322 2313
2323static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) 2314static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c802bc1658a..fa2ac7ee662 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -799,24 +799,34 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
799 int proto; 799 int proto;
800 __wsum csum; 800 __wsum csum;
801 801
802 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 802 iph = skb_gro_header(skb, sizeof(*iph));
803 if (unlikely(!iph))
803 goto out; 804 goto out;
804 805
805 iph = ipv6_hdr(skb); 806 skb_gro_pull(skb, sizeof(*iph));
806 __skb_pull(skb, sizeof(*iph)); 807 skb_set_transport_header(skb, skb_gro_offset(skb));
807 808
808 flush += ntohs(iph->payload_len) != skb->len; 809 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
809 810
810 rcu_read_lock(); 811 rcu_read_lock();
811 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); 812 proto = iph->nexthdr;
812 iph = ipv6_hdr(skb);
813 IPV6_GRO_CB(skb)->proto = proto;
814 ops = rcu_dereference(inet6_protos[proto]); 813 ops = rcu_dereference(inet6_protos[proto]);
815 if (!ops || !ops->gro_receive) 814 if (!ops || !ops->gro_receive) {
816 goto out_unlock; 815 __pskb_pull(skb, skb_gro_offset(skb));
816 proto = ipv6_gso_pull_exthdrs(skb, proto);
817 skb_gro_pull(skb, -skb_transport_offset(skb));
818 skb_reset_transport_header(skb);
819 __skb_push(skb, skb_gro_offset(skb));
820
821 if (!ops || !ops->gro_receive)
822 goto out_unlock;
823
824 iph = ipv6_hdr(skb);
825 }
826
827 IPV6_GRO_CB(skb)->proto = proto;
817 828
818 flush--; 829 flush--;
819 skb_reset_transport_header(skb);
820 nlen = skb_network_header_len(skb); 830 nlen = skb_network_header_len(skb);
821 831
822 for (p = *head; p; p = p->next) { 832 for (p = *head; p; p = p->next) {
@@ -880,7 +890,7 @@ out_unlock:
880} 890}
881 891
882static struct packet_type ipv6_packet_type = { 892static struct packet_type ipv6_packet_type = {
883 .type = __constant_htons(ETH_P_IPV6), 893 .type = cpu_to_be16(ETH_P_IPV6),
884 .func = ipv6_rcv, 894 .func = ipv6_rcv,
885 .gso_send_check = ipv6_gso_send_check, 895 .gso_send_check = ipv6_gso_send_check,
886 .gso_segment = ipv6_gso_segment, 896 .gso_segment = ipv6_gso_segment,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3e2970841bd..3cd83b85e9e 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1538,13 +1538,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1538 if (rt->rt6i_flags & RTF_GATEWAY) { 1538 if (rt->rt6i_flags & RTF_GATEWAY) {
1539 ND_PRINTK2(KERN_WARNING 1539 ND_PRINTK2(KERN_WARNING
1540 "ICMPv6 Redirect: destination is not a neighbour.\n"); 1540 "ICMPv6 Redirect: destination is not a neighbour.\n");
1541 dst_release(dst); 1541 goto release;
1542 return;
1543 }
1544 if (!xrlim_allow(dst, 1*HZ)) {
1545 dst_release(dst);
1546 return;
1547 } 1542 }
1543 if (!xrlim_allow(dst, 1*HZ))
1544 goto release;
1548 1545
1549 if (dev->addr_len) { 1546 if (dev->addr_len) {
1550 read_lock_bh(&neigh->lock); 1547 read_lock_bh(&neigh->lock);
@@ -1570,8 +1567,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1570 ND_PRINTK0(KERN_ERR 1567 ND_PRINTK0(KERN_ERR
1571 "ICMPv6 Redirect: %s() failed to allocate an skb.\n", 1568 "ICMPv6 Redirect: %s() failed to allocate an skb.\n",
1572 __func__); 1569 __func__);
1573 dst_release(dst); 1570 goto release;
1574 return;
1575 } 1571 }
1576 1572
1577 skb_reserve(buff, LL_RESERVED_SPACE(dev)); 1573 skb_reserve(buff, LL_RESERVED_SPACE(dev));
@@ -1631,6 +1627,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1631 1627
1632 if (likely(idev != NULL)) 1628 if (likely(idev != NULL))
1633 in6_dev_put(idev); 1629 in6_dev_put(idev);
1630 return;
1631
1632release:
1633 dst_release(dst);
1634} 1634}
1635 1635
1636static void pndisc_redo(struct sk_buff *skb) 1636static void pndisc_redo(struct sk_buff *skb)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9c574235c90..c3d486a3eda 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -98,7 +98,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
98 98
99static struct dst_ops ip6_dst_ops_template = { 99static struct dst_ops ip6_dst_ops_template = {
100 .family = AF_INET6, 100 .family = AF_INET6,
101 .protocol = __constant_htons(ETH_P_IPV6), 101 .protocol = cpu_to_be16(ETH_P_IPV6),
102 .gc = ip6_dst_gc, 102 .gc = ip6_dst_gc,
103 .gc_thresh = 1024, 103 .gc_thresh = 1024,
104 .check = ip6_dst_check, 104 .check = ip6_dst_check,
@@ -117,7 +117,7 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
117 117
118static struct dst_ops ip6_dst_blackhole_ops = { 118static struct dst_ops ip6_dst_blackhole_ops = {
119 .family = AF_INET6, 119 .family = AF_INET6,
120 .protocol = __constant_htons(ETH_P_IPV6), 120 .protocol = cpu_to_be16(ETH_P_IPV6),
121 .destroy = ip6_dst_destroy, 121 .destroy = ip6_dst_destroy,
122 .check = ip6_dst_check, 122 .check = ip6_dst_check,
123 .update_pmtu = ip6_rt_blackhole_update_pmtu, 123 .update_pmtu = ip6_rt_blackhole_update_pmtu,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e5b85d45bee..00f1269e11e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -948,7 +948,7 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
948 948
949 switch (skb->ip_summed) { 949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE: 950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, 951 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
952 skb->csum)) { 952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY; 953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break; 954 break;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 97ab068e8cc..b4b16a43f27 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -272,7 +272,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
272 272
273static struct dst_ops xfrm6_dst_ops = { 273static struct dst_ops xfrm6_dst_ops = {
274 .family = AF_INET6, 274 .family = AF_INET6,
275 .protocol = __constant_htons(ETH_P_IPV6), 275 .protocol = cpu_to_be16(ETH_P_IPV6),
276 .gc = xfrm6_garbage_collect, 276 .gc = xfrm6_garbage_collect,
277 .update_pmtu = xfrm6_update_pmtu, 277 .update_pmtu = xfrm6_update_pmtu,
278 .destroy = xfrm6_dst_destroy, 278 .destroy = xfrm6_dst_destroy,
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index b6e70f92e7f..43d0ffc6d56 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1959,12 +1959,12 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
1959SOCKOPS_WRAP(ipx_dgram, PF_IPX); 1959SOCKOPS_WRAP(ipx_dgram, PF_IPX);
1960 1960
1961static struct packet_type ipx_8023_packet_type = { 1961static struct packet_type ipx_8023_packet_type = {
1962 .type = __constant_htons(ETH_P_802_3), 1962 .type = cpu_to_be16(ETH_P_802_3),
1963 .func = ipx_rcv, 1963 .func = ipx_rcv,
1964}; 1964};
1965 1965
1966static struct packet_type ipx_dix_packet_type = { 1966static struct packet_type ipx_dix_packet_type = {
1967 .type = __constant_htons(ETH_P_IPX), 1967 .type = cpu_to_be16(ETH_P_IPX),
1968 .func = ipx_rcv, 1968 .func = ipx_rcv,
1969}; 1969};
1970 1970
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 4c487a88372..1bb607f2f5c 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(irda_debug);
56 * Tell the kernel how IrDA packets should be handled. 56 * Tell the kernel how IrDA packets should be handled.
57 */ 57 */
58static struct packet_type irda_packet_type = { 58static struct packet_type irda_packet_type = {
59 .type = __constant_htons(ETH_P_IRDA), 59 .type = cpu_to_be16(ETH_P_IRDA),
60 .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */ 60 .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */
61}; 61};
62 62
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 50d5b10e23a..a7fe1adc378 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -148,12 +148,12 @@ void llc_sap_close(struct llc_sap *sap)
148} 148}
149 149
150static struct packet_type llc_packet_type = { 150static struct packet_type llc_packet_type = {
151 .type = __constant_htons(ETH_P_802_2), 151 .type = cpu_to_be16(ETH_P_802_2),
152 .func = llc_rcv, 152 .func = llc_rcv,
153}; 153};
154 154
155static struct packet_type llc_tr_packet_type = { 155static struct packet_type llc_tr_packet_type = {
156 .type = __constant_htons(ETH_P_TR_802_2), 156 .type = cpu_to_be16(ETH_P_TR_802_2),
157 .func = llc_rcv, 157 .func = llc_rcv,
158}; 158};
159 159
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 7d4971aa443..3503a3d2131 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -8,13 +8,14 @@ mac80211-y := \
8 wep.o \ 8 wep.o \
9 wpa.o \ 9 wpa.o \
10 scan.o \ 10 scan.o \
11 ht.o \ 11 ht.o agg-tx.o agg-rx.o \
12 mlme.o \ 12 mlme.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
16 tkip.o \ 16 tkip.o \
17 aes_ccm.o \ 17 aes_ccm.o \
18 aes_cmac.o \
18 cfg.o \ 19 cfg.o \
19 rx.o \ 20 rx.o \
20 spectmgmt.o \ 21 spectmgmt.o \
@@ -37,6 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
37 mesh_plink.o \ 38 mesh_plink.o \
38 mesh_hwmp.o 39 mesh_hwmp.o
39 40
41mac80211-$(CONFIG_PM) += pm.o
42
40# objects for PID algorithm 43# objects for PID algorithm
41rc80211_pid-y := rc80211_pid_algo.o 44rc80211_pid-y := rc80211_pid_algo.o
42rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o 45rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
new file mode 100644
index 00000000000..3d097b3d7b6
--- /dev/null
+++ b/net/mac80211/aes_cmac.c
@@ -0,0 +1,135 @@
1/*
2 * AES-128-CMAC with TLen 16 for IEEE 802.11w BIP
3 * Copyright 2008, Jouni Malinen <j@w1.fi>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/crypto.h>
13#include <linux/err.h>
14
15#include <net/mac80211.h>
16#include "key.h"
17#include "aes_cmac.h"
18
19#define AES_BLOCK_SIZE 16
20#define AES_CMAC_KEY_LEN 16
21#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
22#define AAD_LEN 20
23
24
25static void gf_mulx(u8 *pad)
26{
27 int i, carry;
28
29 carry = pad[0] & 0x80;
30 for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
31 pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
32 pad[AES_BLOCK_SIZE - 1] <<= 1;
33 if (carry)
34 pad[AES_BLOCK_SIZE - 1] ^= 0x87;
35}
36
37
38static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch,
39 size_t num_elem,
40 const u8 *addr[], const size_t *len, u8 *mac)
41{
42 u8 *cbc, *pad;
43 const u8 *pos, *end;
44 size_t i, e, left, total_len;
45
46 cbc = scratch;
47 pad = scratch + AES_BLOCK_SIZE;
48
49 memset(cbc, 0, AES_BLOCK_SIZE);
50
51 total_len = 0;
52 for (e = 0; e < num_elem; e++)
53 total_len += len[e];
54 left = total_len;
55
56 e = 0;
57 pos = addr[0];
58 end = pos + len[0];
59
60 while (left >= AES_BLOCK_SIZE) {
61 for (i = 0; i < AES_BLOCK_SIZE; i++) {
62 cbc[i] ^= *pos++;
63 if (pos >= end) {
64 e++;
65 pos = addr[e];
66 end = pos + len[e];
67 }
68 }
69 if (left > AES_BLOCK_SIZE)
70 crypto_cipher_encrypt_one(tfm, cbc, cbc);
71 left -= AES_BLOCK_SIZE;
72 }
73
74 memset(pad, 0, AES_BLOCK_SIZE);
75 crypto_cipher_encrypt_one(tfm, pad, pad);
76 gf_mulx(pad);
77
78 if (left || total_len == 0) {
79 for (i = 0; i < left; i++) {
80 cbc[i] ^= *pos++;
81 if (pos >= end) {
82 e++;
83 pos = addr[e];
84 end = pos + len[e];
85 }
86 }
87 cbc[left] ^= 0x80;
88 gf_mulx(pad);
89 }
90
91 for (i = 0; i < AES_BLOCK_SIZE; i++)
92 pad[i] ^= cbc[i];
93 crypto_cipher_encrypt_one(tfm, pad, pad);
94 memcpy(mac, pad, CMAC_TLEN);
95}
96
97
98void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
99 const u8 *data, size_t data_len, u8 *mic)
100{
101 const u8 *addr[3];
102 size_t len[3];
103 u8 zero[CMAC_TLEN];
104
105 memset(zero, 0, CMAC_TLEN);
106 addr[0] = aad;
107 len[0] = AAD_LEN;
108 addr[1] = data;
109 len[1] = data_len - CMAC_TLEN;
110 addr[2] = zero;
111 len[2] = CMAC_TLEN;
112
113 aes_128_cmac_vector(tfm, scratch, 3, addr, len, mic);
114}
115
116
117struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
118{
119 struct crypto_cipher *tfm;
120
121 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
122 if (IS_ERR(tfm))
123 return NULL;
124
125 crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
126
127 return tfm;
128}
129
130
131void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
132{
133 if (tfm)
134 crypto_free_cipher(tfm);
135}
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
new file mode 100644
index 00000000000..0eb9a483150
--- /dev/null
+++ b/net/mac80211/aes_cmac.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright 2008, Jouni Malinen <j@w1.fi>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef AES_CMAC_H
10#define AES_CMAC_H
11
12#include <linux/crypto.h>
13
14struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]);
15void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
16 const u8 *data, size_t data_len, u8 *mic);
17void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
18
19#endif /* AES_CMAC_H */
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
new file mode 100644
index 00000000000..3112bfd441b
--- /dev/null
+++ b/net/mac80211/agg-rx.c
@@ -0,0 +1,302 @@
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19
20void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
21 u16 initiator, u16 reason)
22{
23 struct ieee80211_local *local = sta->local;
24 struct ieee80211_hw *hw = &local->hw;
25 int i;
26
27 /* check if TID is in operational state */
28 spin_lock_bh(&sta->lock);
29 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) {
30 spin_unlock_bh(&sta->lock);
31 return;
32 }
33
34 sta->ampdu_mlme.tid_state_rx[tid] =
35 HT_AGG_STATE_REQ_STOP_BA_MSK |
36 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
37 spin_unlock_bh(&sta->lock);
38
39#ifdef CONFIG_MAC80211_HT_DEBUG
40 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */
43
44 if (local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
45 &sta->sta, tid, NULL))
46 printk(KERN_DEBUG "HW problem - can not stop rx "
47 "aggregation for tid %d\n", tid);
48
49 /* shutdown timer has not expired */
50 if (initiator != WLAN_BACK_TIMER)
51 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
52
53 /* check if this is a self generated aggregation halt */
54 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
55 ieee80211_send_delba(sta->sdata, sta->sta.addr,
56 tid, 0, reason);
57
58 /* free the reordering buffer */
59 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
60 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
61 /* release the reordered frames */
62 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
63 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
64 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
65 }
66 }
67
68 spin_lock_bh(&sta->lock);
69 /* free resources */
70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
71
72 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
73 kfree(sta->ampdu_mlme.tid_rx[tid]);
74 sta->ampdu_mlme.tid_rx[tid] = NULL;
75 }
76
77 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
78 spin_unlock_bh(&sta->lock);
79}
80
81void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
82 u16 initiator, u16 reason)
83{
84 struct ieee80211_local *local = sdata->local;
85 struct sta_info *sta;
86
87 /* stop HW Rx aggregation. ampdu_action existence
88 * already verified in session init so we add the BUG_ON */
89 BUG_ON(!local->ops->ampdu_action);
90
91 rcu_read_lock();
92
93 sta = sta_info_get(local, ra);
94 if (!sta) {
95 rcu_read_unlock();
96 return;
97 }
98
99 __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
100
101 rcu_read_unlock();
102}
103
104/*
105 * After accepting the AddBA Request we activated a timer,
106 * resetting it after each frame that arrives from the originator.
107 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
108 */
109static void sta_rx_agg_session_timer_expired(unsigned long data)
110{
111 /* not an elegant detour, but there is no choice as the timer passes
112 * only one argument, and various sta_info are needed here, so init
113 * flow in sta_info_create gives the TID as data, while the timer_to_id
114 * array gives the sta through container_of */
115 u8 *ptid = (u8 *)data;
116 u8 *timer_to_id = ptid - *ptid;
117 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
118 timer_to_tid[0]);
119
120#ifdef CONFIG_MAC80211_HT_DEBUG
121 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
122#endif
123 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
124 (u16)*ptid, WLAN_BACK_TIMER,
125 WLAN_REASON_QSTA_TIMEOUT);
126}
127
128static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
129 u8 dialog_token, u16 status, u16 policy,
130 u16 buf_size, u16 timeout)
131{
132 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
133 struct ieee80211_local *local = sdata->local;
134 struct sk_buff *skb;
135 struct ieee80211_mgmt *mgmt;
136 u16 capab;
137
138 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
139
140 if (!skb) {
141 printk(KERN_DEBUG "%s: failed to allocate buffer "
142 "for addba resp frame\n", sdata->dev->name);
143 return;
144 }
145
146 skb_reserve(skb, local->hw.extra_tx_headroom);
147 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
148 memset(mgmt, 0, 24);
149 memcpy(mgmt->da, da, ETH_ALEN);
150 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
151 if (sdata->vif.type == NL80211_IFTYPE_AP ||
152 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
153 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
154 else
155 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
156 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
157 IEEE80211_STYPE_ACTION);
158
159 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
160 mgmt->u.action.category = WLAN_CATEGORY_BACK;
161 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
162 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
163
164 capab = (u16)(policy << 1); /* bit 1 aggregation policy */
165 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
166 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
167
168 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
169 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
170 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
171
172 ieee80211_tx_skb(sdata, skb, 1);
173}
174
175void ieee80211_process_addba_request(struct ieee80211_local *local,
176 struct sta_info *sta,
177 struct ieee80211_mgmt *mgmt,
178 size_t len)
179{
180 struct ieee80211_hw *hw = &local->hw;
181 struct ieee80211_conf *conf = &hw->conf;
182 struct tid_ampdu_rx *tid_agg_rx;
183 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
184 u8 dialog_token;
185 int ret = -EOPNOTSUPP;
186
187 /* extract session parameters from addba request frame */
188 dialog_token = mgmt->u.action.u.addba_req.dialog_token;
189 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
190 start_seq_num =
191 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
192
193 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
194 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
195 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
196 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
197
198 status = WLAN_STATUS_REQUEST_DECLINED;
199
200 /* sanity check for incoming parameters:
201 * check if configuration can support the BA policy
202 * and if buffer size does not exceeds max value */
203 /* XXX: check own ht delayed BA capability?? */
204 if (((ba_policy != 1)
205 && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA)))
206 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
207 status = WLAN_STATUS_INVALID_QOS_PARAM;
208#ifdef CONFIG_MAC80211_HT_DEBUG
209 if (net_ratelimit())
210 printk(KERN_DEBUG "AddBA Req with bad params from "
211 "%pM on tid %u. policy %d, buffer size %d\n",
212 mgmt->sa, tid, ba_policy,
213 buf_size);
214#endif /* CONFIG_MAC80211_HT_DEBUG */
215 goto end_no_lock;
216 }
217 /* determine default buffer size */
218 if (buf_size == 0) {
219 struct ieee80211_supported_band *sband;
220
221 sband = local->hw.wiphy->bands[conf->channel->band];
222 buf_size = IEEE80211_MIN_AMPDU_BUF;
223 buf_size = buf_size << sband->ht_cap.ampdu_factor;
224 }
225
226
227 /* examine state machine */
228 spin_lock_bh(&sta->lock);
229
230 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
231#ifdef CONFIG_MAC80211_HT_DEBUG
232 if (net_ratelimit())
233 printk(KERN_DEBUG "unexpected AddBA Req from "
234 "%pM on tid %u\n",
235 mgmt->sa, tid);
236#endif /* CONFIG_MAC80211_HT_DEBUG */
237 goto end;
238 }
239
240 /* prepare A-MPDU MLME for Rx aggregation */
241 sta->ampdu_mlme.tid_rx[tid] =
242 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
243 if (!sta->ampdu_mlme.tid_rx[tid]) {
244#ifdef CONFIG_MAC80211_HT_DEBUG
245 if (net_ratelimit())
246 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
247 tid);
248#endif
249 goto end;
250 }
251 /* rx timer */
252 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
253 sta_rx_agg_session_timer_expired;
254 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
255 (unsigned long)&sta->timer_to_tid[tid];
256 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
257
258 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
259
260 /* prepare reordering buffer */
261 tid_agg_rx->reorder_buf =
262 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
263 if (!tid_agg_rx->reorder_buf) {
264#ifdef CONFIG_MAC80211_HT_DEBUG
265 if (net_ratelimit())
266 printk(KERN_ERR "can not allocate reordering buffer "
267 "to tid %d\n", tid);
268#endif
269 kfree(sta->ampdu_mlme.tid_rx[tid]);
270 goto end;
271 }
272
273 if (local->ops->ampdu_action)
274 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
275 &sta->sta, tid, &start_seq_num);
276#ifdef CONFIG_MAC80211_HT_DEBUG
277 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
278#endif /* CONFIG_MAC80211_HT_DEBUG */
279
280 if (ret) {
281 kfree(tid_agg_rx->reorder_buf);
282 kfree(tid_agg_rx);
283 sta->ampdu_mlme.tid_rx[tid] = NULL;
284 goto end;
285 }
286
287 /* change state and send addba resp */
288 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
289 tid_agg_rx->dialog_token = dialog_token;
290 tid_agg_rx->ssn = start_seq_num;
291 tid_agg_rx->head_seq_num = start_seq_num;
292 tid_agg_rx->buf_size = buf_size;
293 tid_agg_rx->timeout = timeout;
294 tid_agg_rx->stored_mpdu_num = 0;
295 status = WLAN_STATUS_SUCCESS;
296end:
297 spin_unlock_bh(&sta->lock);
298
299end_no_lock:
300 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
301 dialog_token, status, 1, buf_size, timeout);
302}
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
new file mode 100644
index 00000000000..1232d9f01ca
--- /dev/null
+++ b/net/mac80211/agg-tx.c
@@ -0,0 +1,636 @@
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2009, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
21/**
22 * DOC: TX aggregation
23 *
24 * Aggregation on the TX side requires setting the hardware flag
25 * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
26 * hardware parameter to the number of hardware AMPDU queues. If there are no
27 * hardware queues then the driver will (currently) have to do all frame
28 * buffering.
29 *
30 * When TX aggregation is started by some subsystem (usually the rate control
31 * algorithm would be appropriate) by calling the
32 * ieee80211_start_tx_ba_session() function, the driver will be notified via
33 * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
34 *
35 * In response to that, the driver is later required to call the
36 * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
37 * function, which will start the aggregation session.
38 *
39 * Similarly, when the aggregation session is stopped by
40 * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
41 * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
42 * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
43 * (or ieee80211_stop_tx_ba_cb_irqsafe()).
44 */
45
46static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
47 const u8 *da, u16 tid,
48 u8 dialog_token, u16 start_seq_num,
49 u16 agg_size, u16 timeout)
50{
51 struct ieee80211_local *local = sdata->local;
52 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
53 struct sk_buff *skb;
54 struct ieee80211_mgmt *mgmt;
55 u16 capab;
56
57 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
58
59 if (!skb) {
60 printk(KERN_ERR "%s: failed to allocate buffer "
61 "for addba request frame\n", sdata->dev->name);
62 return;
63 }
64 skb_reserve(skb, local->hw.extra_tx_headroom);
65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
66 memset(mgmt, 0, 24);
67 memcpy(mgmt->da, da, ETH_ALEN);
68 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
69 if (sdata->vif.type == NL80211_IFTYPE_AP ||
70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
71 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
72 else
73 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
74
75 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
76 IEEE80211_STYPE_ACTION);
77
78 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
79
80 mgmt->u.action.category = WLAN_CATEGORY_BACK;
81 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
82
83 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
84 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
85 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
86 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
87
88 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
89
90 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
91 mgmt->u.action.u.addba_req.start_seq_num =
92 cpu_to_le16(start_seq_num << 4);
93
94 ieee80211_tx_skb(sdata, skb, 1);
95}
96
97void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
98{
99 struct ieee80211_local *local = sdata->local;
100 struct sk_buff *skb;
101 struct ieee80211_bar *bar;
102 u16 bar_control = 0;
103
104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
105 if (!skb) {
106 printk(KERN_ERR "%s: failed to allocate buffer for "
107 "bar frame\n", sdata->dev->name);
108 return;
109 }
110 skb_reserve(skb, local->hw.extra_tx_headroom);
111 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
112 memset(bar, 0, sizeof(*bar));
113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
114 IEEE80211_STYPE_BACK_REQ);
115 memcpy(bar->ra, ra, ETH_ALEN);
116 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
119 bar_control |= (u16)(tid << 12);
120 bar->control = cpu_to_le16(bar_control);
121 bar->start_seq_num = cpu_to_le16(ssn);
122
123 ieee80211_tx_skb(sdata, skb, 0);
124}
125
126static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
127 enum ieee80211_back_parties initiator)
128{
129 struct ieee80211_local *local = sta->local;
130 int ret;
131 u8 *state;
132
133 state = &sta->ampdu_mlme.tid_state_tx[tid];
134
135 if (local->hw.ampdu_queues)
136 ieee80211_stop_queue(&local->hw, sta->tid_to_tx_q[tid]);
137
138 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
139 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
140
141 ret = local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_STOP,
142 &sta->sta, tid, NULL);
143
144 /* HW shall not deny going back to legacy */
145 if (WARN_ON(ret)) {
146 *state = HT_AGG_STATE_OPERATIONAL;
147 if (local->hw.ampdu_queues)
148 ieee80211_wake_queue(&local->hw, sta->tid_to_tx_q[tid]);
149 }
150
151 return ret;
152}
153
154/*
155 * After sending add Block Ack request we activated a timer until
156 * add Block Ack response will arrive from the recipient.
157 * If this timer expires sta_addba_resp_timer_expired will be executed.
158 */
159static void sta_addba_resp_timer_expired(unsigned long data)
160{
161 /* not an elegant detour, but there is no choice as the timer passes
162 * only one argument, and both sta_info and TID are needed, so init
163 * flow in sta_info_create gives the TID as data, while the timer_to_id
164 * array gives the sta through container_of */
165 u16 tid = *(u8 *)data;
166 struct sta_info *sta = container_of((void *)data,
167 struct sta_info, timer_to_tid[tid]);
168 u8 *state;
169
170 state = &sta->ampdu_mlme.tid_state_tx[tid];
171
172 /* check if the TID waits for addBA response */
173 spin_lock_bh(&sta->lock);
174 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
175 spin_unlock_bh(&sta->lock);
176 *state = HT_AGG_STATE_IDLE;
177#ifdef CONFIG_MAC80211_HT_DEBUG
178 printk(KERN_DEBUG "timer expired on tid %d but we are not "
179 "expecting addBA response there", tid);
180#endif
181 return;
182 }
183
184#ifdef CONFIG_MAC80211_HT_DEBUG
185 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
186#endif
187
188 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
189 spin_unlock_bh(&sta->lock);
190}
191
192int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
193{
194 struct ieee80211_local *local = hw_to_local(hw);
195 struct sta_info *sta;
196 struct ieee80211_sub_if_data *sdata;
197 u16 start_seq_num;
198 u8 *state;
199 int ret = 0;
200
201 if (WARN_ON(!local->ops->ampdu_action))
202 return -EINVAL;
203
204 if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
205 return -EINVAL;
206
207#ifdef CONFIG_MAC80211_HT_DEBUG
208 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
209 ra, tid);
210#endif /* CONFIG_MAC80211_HT_DEBUG */
211
212 rcu_read_lock();
213
214 sta = sta_info_get(local, ra);
215 if (!sta) {
216#ifdef CONFIG_MAC80211_HT_DEBUG
217 printk(KERN_DEBUG "Could not find the station\n");
218#endif
219 ret = -ENOENT;
220 goto exit;
221 }
222
223 /*
224 * The aggregation code is not prepared to handle
225 * anything but STA/AP due to the BSSID handling.
226 * IBSS could work in the code but isn't supported
227 * by drivers or the standard.
228 */
229 if (sta->sdata->vif.type != NL80211_IFTYPE_STATION &&
230 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
231 sta->sdata->vif.type != NL80211_IFTYPE_AP) {
232 ret = -EINVAL;
233 goto exit;
234 }
235
236 spin_lock_bh(&sta->lock);
237
238 /* we have tried too many times, receiver does not want A-MPDU */
239 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
240 ret = -EBUSY;
241 goto err_unlock_sta;
242 }
243
244 state = &sta->ampdu_mlme.tid_state_tx[tid];
245 /* check if the TID is not in aggregation flow already */
246 if (*state != HT_AGG_STATE_IDLE) {
247#ifdef CONFIG_MAC80211_HT_DEBUG
248 printk(KERN_DEBUG "BA request denied - session is not "
249 "idle on tid %u\n", tid);
250#endif /* CONFIG_MAC80211_HT_DEBUG */
251 ret = -EAGAIN;
252 goto err_unlock_sta;
253 }
254
255 /* prepare A-MPDU MLME for Tx aggregation */
256 sta->ampdu_mlme.tid_tx[tid] =
257 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
258 if (!sta->ampdu_mlme.tid_tx[tid]) {
259#ifdef CONFIG_MAC80211_HT_DEBUG
260 if (net_ratelimit())
261 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
262 tid);
263#endif
264 ret = -ENOMEM;
265 goto err_unlock_sta;
266 }
267 /* Tx timer */
268 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
269 sta_addba_resp_timer_expired;
270 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
271 (unsigned long)&sta->timer_to_tid[tid];
272 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
273
274 if (hw->ampdu_queues) {
275 /* create a new queue for this aggregation */
276 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
277
278 /* case no queue is available to aggregation
279 * don't switch to aggregation */
280 if (ret) {
281#ifdef CONFIG_MAC80211_HT_DEBUG
282 printk(KERN_DEBUG "BA request denied - "
283 "queue unavailable for tid %d\n", tid);
284#endif /* CONFIG_MAC80211_HT_DEBUG */
285 goto err_unlock_queue;
286 }
287 }
288 sdata = sta->sdata;
289
290 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
291 * call back right away, it must see that the flow has begun */
292 *state |= HT_ADDBA_REQUESTED_MSK;
293
294 /* This is slightly racy because the queue isn't stopped */
295 start_seq_num = sta->tid_seq[tid];
296
297 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
298 &sta->sta, tid, &start_seq_num);
299
300 if (ret) {
301 /* No need to requeue the packets in the agg queue, since we
302 * held the tx lock: no packet could be enqueued to the newly
303 * allocated queue */
304 if (hw->ampdu_queues)
305 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
306#ifdef CONFIG_MAC80211_HT_DEBUG
307 printk(KERN_DEBUG "BA request denied - HW unavailable for"
308 " tid %d\n", tid);
309#endif /* CONFIG_MAC80211_HT_DEBUG */
310 *state = HT_AGG_STATE_IDLE;
311 goto err_unlock_queue;
312 }
313
314 /* Will put all the packets in the new SW queue */
315 if (hw->ampdu_queues)
316 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
317 spin_unlock_bh(&sta->lock);
318
319 /* send an addBA request */
320 sta->ampdu_mlme.dialog_token_allocator++;
321 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
322 sta->ampdu_mlme.dialog_token_allocator;
323 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
324
325
326 ieee80211_send_addba_request(sta->sdata, ra, tid,
327 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
328 sta->ampdu_mlme.tid_tx[tid]->ssn,
329 0x40, 5000);
330 /* activate the timer for the recipient's addBA response */
331 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
332 jiffies + ADDBA_RESP_INTERVAL;
333 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
334#ifdef CONFIG_MAC80211_HT_DEBUG
335 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
336#endif
337 goto exit;
338
339err_unlock_queue:
340 kfree(sta->ampdu_mlme.tid_tx[tid]);
341 sta->ampdu_mlme.tid_tx[tid] = NULL;
342 ret = -EBUSY;
343err_unlock_sta:
344 spin_unlock_bh(&sta->lock);
345exit:
346 rcu_read_unlock();
347 return ret;
348}
349EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
350
351void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
352{
353 struct ieee80211_local *local = hw_to_local(hw);
354 struct sta_info *sta;
355 u8 *state;
356
357 if (tid >= STA_TID_NUM) {
358#ifdef CONFIG_MAC80211_HT_DEBUG
359 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
360 tid, STA_TID_NUM);
361#endif
362 return;
363 }
364
365 rcu_read_lock();
366 sta = sta_info_get(local, ra);
367 if (!sta) {
368 rcu_read_unlock();
369#ifdef CONFIG_MAC80211_HT_DEBUG
370 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
371#endif
372 return;
373 }
374
375 state = &sta->ampdu_mlme.tid_state_tx[tid];
376 spin_lock_bh(&sta->lock);
377
378 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
379#ifdef CONFIG_MAC80211_HT_DEBUG
380 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
381 *state);
382#endif
383 spin_unlock_bh(&sta->lock);
384 rcu_read_unlock();
385 return;
386 }
387
388 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
389
390 *state |= HT_ADDBA_DRV_READY_MSK;
391
392 if (*state == HT_AGG_STATE_OPERATIONAL) {
393#ifdef CONFIG_MAC80211_HT_DEBUG
394 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
395#endif
396 if (hw->ampdu_queues)
397 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
398 }
399 spin_unlock_bh(&sta->lock);
400 rcu_read_unlock();
401}
402EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
403
404void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
405 const u8 *ra, u16 tid)
406{
407 struct ieee80211_local *local = hw_to_local(hw);
408 struct ieee80211_ra_tid *ra_tid;
409 struct sk_buff *skb = dev_alloc_skb(0);
410
411 if (unlikely(!skb)) {
412#ifdef CONFIG_MAC80211_HT_DEBUG
413 if (net_ratelimit())
414 printk(KERN_WARNING "%s: Not enough memory, "
415 "dropping start BA session", skb->dev->name);
416#endif
417 return;
418 }
419 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
420 memcpy(&ra_tid->ra, ra, ETH_ALEN);
421 ra_tid->tid = tid;
422
423 skb->pkt_type = IEEE80211_ADDBA_MSG;
424 skb_queue_tail(&local->skb_queue, skb);
425 tasklet_schedule(&local->tasklet);
426}
427EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
428
429int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
430 enum ieee80211_back_parties initiator)
431{
432 u8 *state;
433 int ret;
434
435 /* check if the TID is in aggregation */
436 state = &sta->ampdu_mlme.tid_state_tx[tid];
437 spin_lock_bh(&sta->lock);
438
439 if (*state != HT_AGG_STATE_OPERATIONAL) {
440 ret = -ENOENT;
441 goto unlock;
442 }
443
444#ifdef CONFIG_MAC80211_HT_DEBUG
445 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
446 sta->sta.addr, tid);
447#endif /* CONFIG_MAC80211_HT_DEBUG */
448
449 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
450
451 unlock:
452 spin_unlock_bh(&sta->lock);
453 return ret;
454}
455
456int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
457 u8 *ra, u16 tid,
458 enum ieee80211_back_parties initiator)
459{
460 struct ieee80211_local *local = hw_to_local(hw);
461 struct sta_info *sta;
462 int ret = 0;
463
464 if (WARN_ON(!local->ops->ampdu_action))
465 return -EINVAL;
466
467 if (tid >= STA_TID_NUM)
468 return -EINVAL;
469
470 rcu_read_lock();
471 sta = sta_info_get(local, ra);
472 if (!sta) {
473 rcu_read_unlock();
474 return -ENOENT;
475 }
476
477 ret = __ieee80211_stop_tx_ba_session(sta, tid, initiator);
478 rcu_read_unlock();
479 return ret;
480}
481EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
482
483void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
484{
485 struct ieee80211_local *local = hw_to_local(hw);
486 struct sta_info *sta;
487 u8 *state;
488 int agg_queue;
489
490 if (tid >= STA_TID_NUM) {
491#ifdef CONFIG_MAC80211_HT_DEBUG
492 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
493 tid, STA_TID_NUM);
494#endif
495 return;
496 }
497
498#ifdef CONFIG_MAC80211_HT_DEBUG
499 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
500 ra, tid);
501#endif /* CONFIG_MAC80211_HT_DEBUG */
502
503 rcu_read_lock();
504 sta = sta_info_get(local, ra);
505 if (!sta) {
506#ifdef CONFIG_MAC80211_HT_DEBUG
507 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
508#endif
509 rcu_read_unlock();
510 return;
511 }
512 state = &sta->ampdu_mlme.tid_state_tx[tid];
513
514 /* NOTE: no need to use sta->lock in this state check, as
515 * ieee80211_stop_tx_ba_session will let only one stop call to
516 * pass through per sta/tid
517 */
518 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
519#ifdef CONFIG_MAC80211_HT_DEBUG
520 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
521#endif
522 rcu_read_unlock();
523 return;
524 }
525
526 if (*state & HT_AGG_STATE_INITIATOR_MSK)
527 ieee80211_send_delba(sta->sdata, ra, tid,
528 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
529
530 if (hw->ampdu_queues) {
531 agg_queue = sta->tid_to_tx_q[tid];
532 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
533
534 /* We just requeued the all the frames that were in the
535 * removed queue, and since we might miss a softirq we do
536 * netif_schedule_queue. ieee80211_wake_queue is not used
537 * here as this queue is not necessarily stopped
538 */
539 netif_schedule_queue(netdev_get_tx_queue(local->mdev,
540 agg_queue));
541 }
542 spin_lock_bh(&sta->lock);
543 *state = HT_AGG_STATE_IDLE;
544 sta->ampdu_mlme.addba_req_num[tid] = 0;
545 kfree(sta->ampdu_mlme.tid_tx[tid]);
546 sta->ampdu_mlme.tid_tx[tid] = NULL;
547 spin_unlock_bh(&sta->lock);
548
549 rcu_read_unlock();
550}
551EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
552
553void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
554 const u8 *ra, u16 tid)
555{
556 struct ieee80211_local *local = hw_to_local(hw);
557 struct ieee80211_ra_tid *ra_tid;
558 struct sk_buff *skb = dev_alloc_skb(0);
559
560 if (unlikely(!skb)) {
561#ifdef CONFIG_MAC80211_HT_DEBUG
562 if (net_ratelimit())
563 printk(KERN_WARNING "%s: Not enough memory, "
564 "dropping stop BA session", skb->dev->name);
565#endif
566 return;
567 }
568 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
569 memcpy(&ra_tid->ra, ra, ETH_ALEN);
570 ra_tid->tid = tid;
571
572 skb->pkt_type = IEEE80211_DELBA_MSG;
573 skb_queue_tail(&local->skb_queue, skb);
574 tasklet_schedule(&local->tasklet);
575}
576EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
577
578
579void ieee80211_process_addba_resp(struct ieee80211_local *local,
580 struct sta_info *sta,
581 struct ieee80211_mgmt *mgmt,
582 size_t len)
583{
584 struct ieee80211_hw *hw = &local->hw;
585 u16 capab;
586 u16 tid, start_seq_num;
587 u8 *state;
588
589 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
590 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
591
592 state = &sta->ampdu_mlme.tid_state_tx[tid];
593
594 spin_lock_bh(&sta->lock);
595
596 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
597 spin_unlock_bh(&sta->lock);
598 return;
599 }
600
601 if (mgmt->u.action.u.addba_resp.dialog_token !=
602 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
603 spin_unlock_bh(&sta->lock);
604#ifdef CONFIG_MAC80211_HT_DEBUG
605 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
606#endif /* CONFIG_MAC80211_HT_DEBUG */
607 return;
608 }
609
610 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
611#ifdef CONFIG_MAC80211_HT_DEBUG
612 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
613#endif /* CONFIG_MAC80211_HT_DEBUG */
614 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
615 == WLAN_STATUS_SUCCESS) {
616 *state |= HT_ADDBA_RECEIVED_MSK;
617 sta->ampdu_mlme.addba_req_num[tid] = 0;
618
619 if (*state == HT_AGG_STATE_OPERATIONAL &&
620 local->hw.ampdu_queues)
621 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
622
623 if (local->ops->ampdu_action) {
624 (void)local->ops->ampdu_action(hw,
625 IEEE80211_AMPDU_TX_RESUME,
626 &sta->sta, tid, &start_seq_num);
627 }
628#ifdef CONFIG_MAC80211_HT_DEBUG
629 printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid);
630#endif /* CONFIG_MAC80211_HT_DEBUG */
631 } else {
632 sta->ampdu_mlme.addba_req_num[tid]++;
633 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
634 }
635 spin_unlock_bh(&sta->lock);
636}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9d4e4d846ec..c8d969be440 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -133,6 +133,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
133 case WLAN_CIPHER_SUITE_CCMP: 133 case WLAN_CIPHER_SUITE_CCMP:
134 alg = ALG_CCMP; 134 alg = ALG_CCMP;
135 break; 135 break;
136 case WLAN_CIPHER_SUITE_AES_CMAC:
137 alg = ALG_AES_CMAC;
138 break;
136 default: 139 default:
137 return -EINVAL; 140 return -EINVAL;
138 } 141 }
@@ -275,6 +278,17 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
275 else 278 else
276 params.cipher = WLAN_CIPHER_SUITE_WEP104; 279 params.cipher = WLAN_CIPHER_SUITE_WEP104;
277 break; 280 break;
281 case ALG_AES_CMAC:
282 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
283 seq[0] = key->u.aes_cmac.tx_pn[5];
284 seq[1] = key->u.aes_cmac.tx_pn[4];
285 seq[2] = key->u.aes_cmac.tx_pn[3];
286 seq[3] = key->u.aes_cmac.tx_pn[2];
287 seq[4] = key->u.aes_cmac.tx_pn[1];
288 seq[5] = key->u.aes_cmac.tx_pn[0];
289 params.seq = seq;
290 params.seq_len = 6;
291 break;
278 } 292 }
279 293
280 params.key = key->conf.key; 294 params.key = key->conf.key;
@@ -304,6 +318,22 @@ static int ieee80211_config_default_key(struct wiphy *wiphy,
304 return 0; 318 return 0;
305} 319}
306 320
321static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
322 struct net_device *dev,
323 u8 key_idx)
324{
325 struct ieee80211_sub_if_data *sdata;
326
327 rcu_read_lock();
328
329 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
330 ieee80211_set_default_mgmt_key(sdata, key_idx);
331
332 rcu_read_unlock();
333
334 return 0;
335}
336
307static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 337static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
308{ 338{
309 struct ieee80211_sub_if_data *sdata = sta->sdata; 339 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -493,7 +523,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
493 523
494 kfree(old); 524 kfree(old);
495 525
496 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 526 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON |
527 IEEE80211_IFCC_BEACON_ENABLED);
497} 528}
498 529
499static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 530static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -553,7 +584,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
553 synchronize_rcu(); 584 synchronize_rcu();
554 kfree(old); 585 kfree(old);
555 586
556 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 587 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON_ENABLED);
557} 588}
558 589
559/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ 590/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
@@ -630,6 +661,10 @@ static void sta_apply_parameters(struct ieee80211_local *local,
630 sta->flags &= ~WLAN_STA_WME; 661 sta->flags &= ~WLAN_STA_WME;
631 if (params->station_flags & STATION_FLAG_WME) 662 if (params->station_flags & STATION_FLAG_WME)
632 sta->flags |= WLAN_STA_WME; 663 sta->flags |= WLAN_STA_WME;
664
665 sta->flags &= ~WLAN_STA_MFP;
666 if (params->station_flags & STATION_FLAG_MFP)
667 sta->flags |= WLAN_STA_MFP;
633 spin_unlock_bh(&sta->lock); 668 spin_unlock_bh(&sta->lock);
634 } 669 }
635 670
@@ -1141,6 +1176,126 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
1141 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 1176 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
1142} 1177}
1143 1178
1179static int set_mgmt_extra_ie_sta(struct ieee80211_sub_if_data *sdata,
1180 u8 subtype, u8 *ies, size_t ies_len)
1181{
1182 struct ieee80211_local *local = sdata->local;
1183 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1184
1185 switch (subtype) {
1186 case IEEE80211_STYPE_PROBE_REQ >> 4:
1187 if (local->ops->hw_scan)
1188 break;
1189 kfree(ifsta->ie_probereq);
1190 ifsta->ie_probereq = ies;
1191 ifsta->ie_probereq_len = ies_len;
1192 return 0;
1193 case IEEE80211_STYPE_PROBE_RESP >> 4:
1194 kfree(ifsta->ie_proberesp);
1195 ifsta->ie_proberesp = ies;
1196 ifsta->ie_proberesp_len = ies_len;
1197 return 0;
1198 case IEEE80211_STYPE_AUTH >> 4:
1199 kfree(ifsta->ie_auth);
1200 ifsta->ie_auth = ies;
1201 ifsta->ie_auth_len = ies_len;
1202 return 0;
1203 case IEEE80211_STYPE_ASSOC_REQ >> 4:
1204 kfree(ifsta->ie_assocreq);
1205 ifsta->ie_assocreq = ies;
1206 ifsta->ie_assocreq_len = ies_len;
1207 return 0;
1208 case IEEE80211_STYPE_REASSOC_REQ >> 4:
1209 kfree(ifsta->ie_reassocreq);
1210 ifsta->ie_reassocreq = ies;
1211 ifsta->ie_reassocreq_len = ies_len;
1212 return 0;
1213 case IEEE80211_STYPE_DEAUTH >> 4:
1214 kfree(ifsta->ie_deauth);
1215 ifsta->ie_deauth = ies;
1216 ifsta->ie_deauth_len = ies_len;
1217 return 0;
1218 case IEEE80211_STYPE_DISASSOC >> 4:
1219 kfree(ifsta->ie_disassoc);
1220 ifsta->ie_disassoc = ies;
1221 ifsta->ie_disassoc_len = ies_len;
1222 return 0;
1223 }
1224
1225 return -EOPNOTSUPP;
1226}
1227
1228static int ieee80211_set_mgmt_extra_ie(struct wiphy *wiphy,
1229 struct net_device *dev,
1230 struct mgmt_extra_ie_params *params)
1231{
1232 struct ieee80211_sub_if_data *sdata;
1233 u8 *ies;
1234 size_t ies_len;
1235 int ret = -EOPNOTSUPP;
1236
1237 if (params->ies) {
1238 ies = kmemdup(params->ies, params->ies_len, GFP_KERNEL);
1239 if (ies == NULL)
1240 return -ENOMEM;
1241 ies_len = params->ies_len;
1242 } else {
1243 ies = NULL;
1244 ies_len = 0;
1245 }
1246
1247 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1248
1249 switch (sdata->vif.type) {
1250 case NL80211_IFTYPE_STATION:
1251 case NL80211_IFTYPE_ADHOC:
1252 ret = set_mgmt_extra_ie_sta(sdata, params->subtype,
1253 ies, ies_len);
1254 break;
1255 default:
1256 ret = -EOPNOTSUPP;
1257 break;
1258 }
1259
1260 if (ret)
1261 kfree(ies);
1262 return ret;
1263}
1264
1265#ifdef CONFIG_PM
1266static int ieee80211_suspend(struct wiphy *wiphy)
1267{
1268 return __ieee80211_suspend(wiphy_priv(wiphy));
1269}
1270
1271static int ieee80211_resume(struct wiphy *wiphy)
1272{
1273 return __ieee80211_resume(wiphy_priv(wiphy));
1274}
1275#else
1276#define ieee80211_suspend NULL
1277#define ieee80211_resume NULL
1278#endif
1279
1280static int ieee80211_scan(struct wiphy *wiphy,
1281 struct net_device *dev,
1282 struct cfg80211_scan_request *req)
1283{
1284 struct ieee80211_sub_if_data *sdata;
1285
1286 if (!netif_running(dev))
1287 return -ENETDOWN;
1288
1289 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1290
1291 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1292 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1293 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1294 return -EOPNOTSUPP;
1295
1296 return ieee80211_request_scan(sdata, req);
1297}
1298
1144struct cfg80211_ops mac80211_config_ops = { 1299struct cfg80211_ops mac80211_config_ops = {
1145 .add_virtual_intf = ieee80211_add_iface, 1300 .add_virtual_intf = ieee80211_add_iface,
1146 .del_virtual_intf = ieee80211_del_iface, 1301 .del_virtual_intf = ieee80211_del_iface,
@@ -1149,6 +1304,7 @@ struct cfg80211_ops mac80211_config_ops = {
1149 .del_key = ieee80211_del_key, 1304 .del_key = ieee80211_del_key,
1150 .get_key = ieee80211_get_key, 1305 .get_key = ieee80211_get_key,
1151 .set_default_key = ieee80211_config_default_key, 1306 .set_default_key = ieee80211_config_default_key,
1307 .set_default_mgmt_key = ieee80211_config_default_mgmt_key,
1152 .add_beacon = ieee80211_add_beacon, 1308 .add_beacon = ieee80211_add_beacon,
1153 .set_beacon = ieee80211_set_beacon, 1309 .set_beacon = ieee80211_set_beacon,
1154 .del_beacon = ieee80211_del_beacon, 1310 .del_beacon = ieee80211_del_beacon,
@@ -1169,4 +1325,8 @@ struct cfg80211_ops mac80211_config_ops = {
1169 .change_bss = ieee80211_change_bss, 1325 .change_bss = ieee80211_change_bss,
1170 .set_txq_params = ieee80211_set_txq_params, 1326 .set_txq_params = ieee80211_set_txq_params,
1171 .set_channel = ieee80211_set_channel, 1327 .set_channel = ieee80211_set_channel,
1328 .set_mgmt_extra_ie = ieee80211_set_mgmt_extra_ie,
1329 .suspend = ieee80211_suspend,
1330 .resume = ieee80211_resume,
1331 .scan = ieee80211_scan,
1172}; 1332};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 2697a2fe608..e37f557de3f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -57,11 +57,62 @@ DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
57 local->hw.conf.long_frame_max_tx_count); 57 local->hw.conf.long_frame_max_tx_count);
58DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 58DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
59 local->total_ps_buffered); 59 local->total_ps_buffered);
60DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", 60DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
61 local->wep_iv & 0xffffff); 61 local->wep_iv & 0xffffff);
62DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 62DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
63 local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>"); 63 local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>");
64 64
65static ssize_t tsf_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct ieee80211_local *local = file->private_data;
69 u64 tsf = 0;
70 char buf[100];
71
72 if (local->ops->get_tsf)
73 tsf = local->ops->get_tsf(local_to_hw(local));
74
75 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf);
76
77 return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
78}
79
80static ssize_t tsf_write(struct file *file,
81 const char __user *user_buf,
82 size_t count, loff_t *ppos)
83{
84 struct ieee80211_local *local = file->private_data;
85 unsigned long long tsf;
86 char buf[100];
87 size_t len;
88
89 len = min(count, sizeof(buf) - 1);
90 if (copy_from_user(buf, user_buf, len))
91 return -EFAULT;
92 buf[len] = '\0';
93
94 if (strncmp(buf, "reset", 5) == 0) {
95 if (local->ops->reset_tsf) {
96 local->ops->reset_tsf(local_to_hw(local));
97 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy));
98 }
99 } else {
100 tsf = simple_strtoul(buf, NULL, 0);
101 if (local->ops->set_tsf) {
102 local->ops->set_tsf(local_to_hw(local), tsf);
103 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf);
104 }
105 }
106
107 return count;
108}
109
110static const struct file_operations tsf_ops = {
111 .read = tsf_read,
112 .write = tsf_write,
113 .open = mac80211_open_file_generic
114};
115
65/* statistics stuff */ 116/* statistics stuff */
66 117
67#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 118#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -136,8 +187,6 @@ DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u",
136 local->dot11MulticastReceivedFrameCount); 187 local->dot11MulticastReceivedFrameCount);
137DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u", 188DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u",
138 local->dot11TransmittedFrameCount); 189 local->dot11TransmittedFrameCount);
139DEBUGFS_STATS_FILE(wep_undecryptable_count, 20, "%u",
140 local->dot11WEPUndecryptableCount);
141#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 190#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
142DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u", 191DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u",
143 local->tx_handlers_drop); 192 local->tx_handlers_drop);
@@ -204,6 +253,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
204 DEBUGFS_ADD(long_retry_limit); 253 DEBUGFS_ADD(long_retry_limit);
205 DEBUGFS_ADD(total_ps_buffered); 254 DEBUGFS_ADD(total_ps_buffered);
206 DEBUGFS_ADD(wep_iv); 255 DEBUGFS_ADD(wep_iv);
256 DEBUGFS_ADD(tsf);
207 257
208 statsd = debugfs_create_dir("statistics", phyd); 258 statsd = debugfs_create_dir("statistics", phyd);
209 local->debugfs.statistics = statsd; 259 local->debugfs.statistics = statsd;
@@ -221,7 +271,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
221 DEBUGFS_STATS_ADD(received_fragment_count); 271 DEBUGFS_STATS_ADD(received_fragment_count);
222 DEBUGFS_STATS_ADD(multicast_received_frame_count); 272 DEBUGFS_STATS_ADD(multicast_received_frame_count);
223 DEBUGFS_STATS_ADD(transmitted_frame_count); 273 DEBUGFS_STATS_ADD(transmitted_frame_count);
224 DEBUGFS_STATS_ADD(wep_undecryptable_count);
225#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 274#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
226 DEBUGFS_STATS_ADD(tx_handlers_drop); 275 DEBUGFS_STATS_ADD(tx_handlers_drop);
227 DEBUGFS_STATS_ADD(tx_handlers_queued); 276 DEBUGFS_STATS_ADD(tx_handlers_queued);
@@ -258,6 +307,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
258 DEBUGFS_DEL(long_retry_limit); 307 DEBUGFS_DEL(long_retry_limit);
259 DEBUGFS_DEL(total_ps_buffered); 308 DEBUGFS_DEL(total_ps_buffered);
260 DEBUGFS_DEL(wep_iv); 309 DEBUGFS_DEL(wep_iv);
310 DEBUGFS_DEL(tsf);
261 311
262 DEBUGFS_STATS_DEL(transmitted_fragment_count); 312 DEBUGFS_STATS_DEL(transmitted_fragment_count);
263 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 313 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
@@ -268,7 +318,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
268 DEBUGFS_STATS_DEL(received_fragment_count); 318 DEBUGFS_STATS_DEL(received_fragment_count);
269 DEBUGFS_STATS_DEL(multicast_received_frame_count); 319 DEBUGFS_STATS_DEL(multicast_received_frame_count);
270 DEBUGFS_STATS_DEL(transmitted_frame_count); 320 DEBUGFS_STATS_DEL(transmitted_frame_count);
271 DEBUGFS_STATS_DEL(wep_undecryptable_count);
272 DEBUGFS_STATS_DEL(num_scans); 321 DEBUGFS_STATS_DEL(num_scans);
273#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 322#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
274 DEBUGFS_STATS_DEL(tx_handlers_drop); 323 DEBUGFS_STATS_DEL(tx_handlers_drop);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 6424ac565ae..99c752588b3 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -76,6 +76,9 @@ static ssize_t key_algorithm_read(struct file *file,
76 case ALG_CCMP: 76 case ALG_CCMP:
77 alg = "CCMP\n"; 77 alg = "CCMP\n";
78 break; 78 break;
79 case ALG_AES_CMAC:
80 alg = "AES-128-CMAC\n";
81 break;
79 default: 82 default:
80 return 0; 83 return 0;
81 } 84 }
@@ -105,6 +108,12 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
105 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 108 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
106 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); 109 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]);
107 break; 110 break;
111 case ALG_AES_CMAC:
112 tpn = key->u.aes_cmac.tx_pn;
113 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
114 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4],
115 tpn[5]);
116 break;
108 default: 117 default:
109 return 0; 118 return 0;
110 } 119 }
@@ -142,6 +151,14 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
142 } 151 }
143 len = p - buf; 152 len = p - buf;
144 break; 153 break;
154 case ALG_AES_CMAC:
155 rpn = key->u.aes_cmac.rx_pn;
156 p += scnprintf(p, sizeof(buf)+buf-p,
157 "%02x%02x%02x%02x%02x%02x\n",
158 rpn[0], rpn[1], rpn[2],
159 rpn[3], rpn[4], rpn[5]);
160 len = p - buf;
161 break;
145 default: 162 default:
146 return 0; 163 return 0;
147 } 164 }
@@ -156,13 +173,40 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
156 char buf[20]; 173 char buf[20];
157 int len; 174 int len;
158 175
159 if (key->conf.alg != ALG_CCMP) 176 switch (key->conf.alg) {
177 case ALG_CCMP:
178 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
179 break;
180 case ALG_AES_CMAC:
181 len = scnprintf(buf, sizeof(buf), "%u\n",
182 key->u.aes_cmac.replays);
183 break;
184 default:
160 return 0; 185 return 0;
161 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); 186 }
162 return simple_read_from_buffer(userbuf, count, ppos, buf, len); 187 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
163} 188}
164KEY_OPS(replays); 189KEY_OPS(replays);
165 190
191static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
192 size_t count, loff_t *ppos)
193{
194 struct ieee80211_key *key = file->private_data;
195 char buf[20];
196 int len;
197
198 switch (key->conf.alg) {
199 case ALG_AES_CMAC:
200 len = scnprintf(buf, sizeof(buf), "%u\n",
201 key->u.aes_cmac.icverrors);
202 break;
203 default:
204 return 0;
205 }
206 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
207}
208KEY_OPS(icverrors);
209
166static ssize_t key_key_read(struct file *file, char __user *userbuf, 210static ssize_t key_key_read(struct file *file, char __user *userbuf,
167 size_t count, loff_t *ppos) 211 size_t count, loff_t *ppos)
168{ 212{
@@ -222,6 +266,7 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
222 DEBUGFS_ADD(tx_spec); 266 DEBUGFS_ADD(tx_spec);
223 DEBUGFS_ADD(rx_spec); 267 DEBUGFS_ADD(rx_spec);
224 DEBUGFS_ADD(replays); 268 DEBUGFS_ADD(replays);
269 DEBUGFS_ADD(icverrors);
225 DEBUGFS_ADD(key); 270 DEBUGFS_ADD(key);
226 DEBUGFS_ADD(ifindex); 271 DEBUGFS_ADD(ifindex);
227}; 272};
@@ -243,6 +288,7 @@ void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
243 DEBUGFS_DEL(tx_spec); 288 DEBUGFS_DEL(tx_spec);
244 DEBUGFS_DEL(rx_spec); 289 DEBUGFS_DEL(rx_spec);
245 DEBUGFS_DEL(replays); 290 DEBUGFS_DEL(replays);
291 DEBUGFS_DEL(icverrors);
246 DEBUGFS_DEL(key); 292 DEBUGFS_DEL(key);
247 DEBUGFS_DEL(ifindex); 293 DEBUGFS_DEL(ifindex);
248 294
@@ -280,6 +326,35 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
280 sdata->common_debugfs.default_key = NULL; 326 sdata->common_debugfs.default_key = NULL;
281} 327}
282 328
329void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
330{
331 char buf[50];
332 struct ieee80211_key *key;
333
334 if (!sdata->debugfsdir)
335 return;
336
337 /* this is running under the key lock */
338
339 key = sdata->default_mgmt_key;
340 if (key) {
341 sprintf(buf, "../keys/%d", key->debugfs.cnt);
342 sdata->common_debugfs.default_mgmt_key =
343 debugfs_create_symlink("default_mgmt_key",
344 sdata->debugfsdir, buf);
345 } else
346 ieee80211_debugfs_key_remove_mgmt_default(sdata);
347}
348
349void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata)
350{
351 if (!sdata)
352 return;
353
354 debugfs_remove(sdata->common_debugfs.default_mgmt_key);
355 sdata->common_debugfs.default_mgmt_key = NULL;
356}
357
283void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 358void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
284 struct sta_info *sta) 359 struct sta_info *sta)
285{ 360{
diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h
index b1a3754ee24..54717b4e137 100644
--- a/net/mac80211/debugfs_key.h
+++ b/net/mac80211/debugfs_key.h
@@ -6,6 +6,10 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key);
6void ieee80211_debugfs_key_remove(struct ieee80211_key *key); 6void ieee80211_debugfs_key_remove(struct ieee80211_key *key);
7void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_key_add_mgmt_default(
10 struct ieee80211_sub_if_data *sdata);
11void ieee80211_debugfs_key_remove_mgmt_default(
12 struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 13void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
10 struct sta_info *sta); 14 struct sta_info *sta);
11#else 15#else
@@ -19,6 +23,12 @@ static inline void ieee80211_debugfs_key_add_default(
19static inline void ieee80211_debugfs_key_remove_default( 23static inline void ieee80211_debugfs_key_remove_default(
20 struct ieee80211_sub_if_data *sdata) 24 struct ieee80211_sub_if_data *sdata)
21{} 25{}
26static inline void ieee80211_debugfs_key_add_mgmt_default(
27 struct ieee80211_sub_if_data *sdata)
28{}
29static inline void ieee80211_debugfs_key_remove_mgmt_default(
30 struct ieee80211_sub_if_data *sdata)
31{}
22static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 32static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
23 struct sta_info *sta) 33 struct sta_info *sta)
24{} 34{}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a2fbe013131..90230c718b5 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -67,14 +67,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
67 char buf[100]; 67 char buf[100];
68 struct sta_info *sta = file->private_data; 68 struct sta_info *sta = file->private_data;
69 u32 staflags = get_sta_flags(sta); 69 u32 staflags = get_sta_flags(sta);
70 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", 70 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s",
71 staflags & WLAN_STA_AUTH ? "AUTH\n" : "", 71 staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
72 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 72 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
73 staflags & WLAN_STA_PS ? "PS\n" : "", 73 staflags & WLAN_STA_PS ? "PS\n" : "",
74 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 74 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
75 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 75 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
76 staflags & WLAN_STA_WME ? "WME\n" : "", 76 staflags & WLAN_STA_WME ? "WME\n" : "",
77 staflags & WLAN_STA_WDS ? "WDS\n" : ""); 77 staflags & WLAN_STA_WDS ? "WDS\n" : "",
78 staflags & WLAN_STA_MFP ? "MFP\n" : "");
78 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 79 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
79} 80}
80STA_OPS(flags); 81STA_OPS(flags);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index c5c0c527109..82ea0b63a38 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -17,8 +17,6 @@
17#include <net/wireless.h> 17#include <net/wireless.h>
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "sta_info.h"
21#include "wme.h"
22 20
23void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 21void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
24 struct ieee80211_ht_cap *ht_cap_ie, 22 struct ieee80211_ht_cap *ht_cap_ie,
@@ -130,14 +128,15 @@ u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
130 } 128 }
131 } 129 }
132 130
133 ht_changed = local->hw.conf.ht.enabled != enable_ht || 131 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
134 channel_type != local->hw.conf.ht.channel_type; 132 channel_type != local->hw.conf.channel_type;
135 133
136 local->oper_channel_type = channel_type; 134 local->oper_channel_type = channel_type;
137 local->hw.conf.ht.enabled = enable_ht;
138 135
139 if (ht_changed) 136 if (ht_changed) {
140 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_HT); 137 /* channel_type change automatically detected */
138 ieee80211_hw_config(local, 0);
139 }
141 140
142 /* disable HT */ 141 /* disable HT */
143 if (!enable_ht) 142 if (!enable_ht)
@@ -154,105 +153,20 @@ u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
154 return changed; 153 return changed;
155} 154}
156 155
157static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 156void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
158 const u8 *da, u16 tid,
159 u8 dialog_token, u16 start_seq_num,
160 u16 agg_size, u16 timeout)
161{ 157{
162 struct ieee80211_local *local = sdata->local; 158 int i;
163 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
164 struct sk_buff *skb;
165 struct ieee80211_mgmt *mgmt;
166 u16 capab;
167
168 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
169
170 if (!skb) {
171 printk(KERN_ERR "%s: failed to allocate buffer "
172 "for addba request frame\n", sdata->dev->name);
173 return;
174 }
175 skb_reserve(skb, local->hw.extra_tx_headroom);
176 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
177 memset(mgmt, 0, 24);
178 memcpy(mgmt->da, da, ETH_ALEN);
179 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
180 if (sdata->vif.type == NL80211_IFTYPE_AP)
181 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
182 else
183 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
184
185 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
186 IEEE80211_STYPE_ACTION);
187
188 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
189
190 mgmt->u.action.category = WLAN_CATEGORY_BACK;
191 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
192
193 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
194 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
195 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
196 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
197
198 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
199
200 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
201 mgmt->u.action.u.addba_req.start_seq_num =
202 cpu_to_le16(start_seq_num << 4);
203
204 ieee80211_tx_skb(sdata, skb, 0);
205}
206
207static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
208 u8 dialog_token, u16 status, u16 policy,
209 u16 buf_size, u16 timeout)
210{
211 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
212 struct ieee80211_local *local = sdata->local;
213 struct sk_buff *skb;
214 struct ieee80211_mgmt *mgmt;
215 u16 capab;
216
217 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
218 159
219 if (!skb) { 160 for (i = 0; i < STA_TID_NUM; i++) {
220 printk(KERN_DEBUG "%s: failed to allocate buffer " 161 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR);
221 "for addba resp frame\n", sdata->dev->name); 162 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
222 return; 163 WLAN_REASON_QSTA_LEAVE_QBSS);
223 } 164 }
224
225 skb_reserve(skb, local->hw.extra_tx_headroom);
226 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
227 memset(mgmt, 0, 24);
228 memcpy(mgmt->da, da, ETH_ALEN);
229 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
230 if (sdata->vif.type == NL80211_IFTYPE_AP)
231 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
232 else
233 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
234 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
235 IEEE80211_STYPE_ACTION);
236
237 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
238 mgmt->u.action.category = WLAN_CATEGORY_BACK;
239 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
240 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
241
242 capab = (u16)(policy << 1); /* bit 1 aggregation policy */
243 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
244 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
245
246 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
247 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
248 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
249
250 ieee80211_tx_skb(sdata, skb, 0);
251} 165}
252 166
253static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 167void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
254 const u8 *da, u16 tid, 168 const u8 *da, u16 tid,
255 u16 initiator, u16 reason_code) 169 u16 initiator, u16 reason_code)
256{ 170{
257 struct ieee80211_local *local = sdata->local; 171 struct ieee80211_local *local = sdata->local;
258 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 172 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
@@ -273,7 +187,8 @@ static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
273 memset(mgmt, 0, 24); 187 memset(mgmt, 0, 24);
274 memcpy(mgmt->da, da, ETH_ALEN); 188 memcpy(mgmt->da, da, ETH_ALEN);
275 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 189 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
276 if (sdata->vif.type == NL80211_IFTYPE_AP) 190 if (sdata->vif.type == NL80211_IFTYPE_AP ||
191 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
277 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 192 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
278 else 193 else
279 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 194 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
@@ -290,770 +205,7 @@ static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
290 mgmt->u.action.u.delba.params = cpu_to_le16(params); 205 mgmt->u.action.u.delba.params = cpu_to_le16(params);
291 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); 206 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
292 207
293 ieee80211_tx_skb(sdata, skb, 0); 208 ieee80211_tx_skb(sdata, skb, 1);
294}
295
296void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
297{
298 struct ieee80211_local *local = sdata->local;
299 struct sk_buff *skb;
300 struct ieee80211_bar *bar;
301 u16 bar_control = 0;
302
303 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
304 if (!skb) {
305 printk(KERN_ERR "%s: failed to allocate buffer for "
306 "bar frame\n", sdata->dev->name);
307 return;
308 }
309 skb_reserve(skb, local->hw.extra_tx_headroom);
310 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
311 memset(bar, 0, sizeof(*bar));
312 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
313 IEEE80211_STYPE_BACK_REQ);
314 memcpy(bar->ra, ra, ETH_ALEN);
315 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
316 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
317 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
318 bar_control |= (u16)(tid << 12);
319 bar->control = cpu_to_le16(bar_control);
320 bar->start_seq_num = cpu_to_le16(ssn);
321
322 ieee80211_tx_skb(sdata, skb, 0);
323}
324
325void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
326 u16 initiator, u16 reason)
327{
328 struct ieee80211_local *local = sdata->local;
329 struct ieee80211_hw *hw = &local->hw;
330 struct sta_info *sta;
331 int ret, i;
332
333 rcu_read_lock();
334
335 sta = sta_info_get(local, ra);
336 if (!sta) {
337 rcu_read_unlock();
338 return;
339 }
340
341 /* check if TID is in operational state */
342 spin_lock_bh(&sta->lock);
343 if (sta->ampdu_mlme.tid_state_rx[tid]
344 != HT_AGG_STATE_OPERATIONAL) {
345 spin_unlock_bh(&sta->lock);
346 rcu_read_unlock();
347 return;
348 }
349 sta->ampdu_mlme.tid_state_rx[tid] =
350 HT_AGG_STATE_REQ_STOP_BA_MSK |
351 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
352 spin_unlock_bh(&sta->lock);
353
354 /* stop HW Rx aggregation. ampdu_action existence
355 * already verified in session init so we add the BUG_ON */
356 BUG_ON(!local->ops->ampdu_action);
357
358#ifdef CONFIG_MAC80211_HT_DEBUG
359 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
360 ra, tid);
361#endif /* CONFIG_MAC80211_HT_DEBUG */
362
363 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
364 &sta->sta, tid, NULL);
365 if (ret)
366 printk(KERN_DEBUG "HW problem - can not stop rx "
367 "aggregation for tid %d\n", tid);
368
369 /* shutdown timer has not expired */
370 if (initiator != WLAN_BACK_TIMER)
371 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
372
373 /* check if this is a self generated aggregation halt */
374 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
375 ieee80211_send_delba(sdata, ra, tid, 0, reason);
376
377 /* free the reordering buffer */
378 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
379 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
380 /* release the reordered frames */
381 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
382 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
383 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
384 }
385 }
386 /* free resources */
387 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
388 kfree(sta->ampdu_mlme.tid_rx[tid]);
389 sta->ampdu_mlme.tid_rx[tid] = NULL;
390 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
391
392 rcu_read_unlock();
393}
394
395
396/*
397 * After sending add Block Ack request we activated a timer until
398 * add Block Ack response will arrive from the recipient.
399 * If this timer expires sta_addba_resp_timer_expired will be executed.
400 */
401static void sta_addba_resp_timer_expired(unsigned long data)
402{
403 /* not an elegant detour, but there is no choice as the timer passes
404 * only one argument, and both sta_info and TID are needed, so init
405 * flow in sta_info_create gives the TID as data, while the timer_to_id
406 * array gives the sta through container_of */
407 u16 tid = *(u8 *)data;
408 struct sta_info *temp_sta = container_of((void *)data,
409 struct sta_info, timer_to_tid[tid]);
410
411 struct ieee80211_local *local = temp_sta->local;
412 struct ieee80211_hw *hw = &local->hw;
413 struct sta_info *sta;
414 u8 *state;
415
416 rcu_read_lock();
417
418 sta = sta_info_get(local, temp_sta->sta.addr);
419 if (!sta) {
420 rcu_read_unlock();
421 return;
422 }
423
424 state = &sta->ampdu_mlme.tid_state_tx[tid];
425 /* check if the TID waits for addBA response */
426 spin_lock_bh(&sta->lock);
427 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
428 spin_unlock_bh(&sta->lock);
429 *state = HT_AGG_STATE_IDLE;
430#ifdef CONFIG_MAC80211_HT_DEBUG
431 printk(KERN_DEBUG "timer expired on tid %d but we are not "
432 "expecting addBA response there", tid);
433#endif
434 goto timer_expired_exit;
435 }
436
437#ifdef CONFIG_MAC80211_HT_DEBUG
438 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
439#endif
440
441 /* go through the state check in stop_BA_session */
442 *state = HT_AGG_STATE_OPERATIONAL;
443 spin_unlock_bh(&sta->lock);
444 ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid,
445 WLAN_BACK_INITIATOR);
446
447timer_expired_exit:
448 rcu_read_unlock();
449}
450
451void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr)
452{
453 struct ieee80211_local *local = sdata->local;
454 int i;
455
456 for (i = 0; i < STA_TID_NUM; i++) {
457 ieee80211_stop_tx_ba_session(&local->hw, addr, i,
458 WLAN_BACK_INITIATOR);
459 ieee80211_sta_stop_rx_ba_session(sdata, addr, i,
460 WLAN_BACK_RECIPIENT,
461 WLAN_REASON_QSTA_LEAVE_QBSS);
462 }
463}
464
465int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
466{
467 struct ieee80211_local *local = hw_to_local(hw);
468 struct sta_info *sta;
469 struct ieee80211_sub_if_data *sdata;
470 u16 start_seq_num;
471 u8 *state;
472 int ret = 0;
473
474 if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
475 return -EINVAL;
476
477#ifdef CONFIG_MAC80211_HT_DEBUG
478 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
479 ra, tid);
480#endif /* CONFIG_MAC80211_HT_DEBUG */
481
482 rcu_read_lock();
483
484 sta = sta_info_get(local, ra);
485 if (!sta) {
486#ifdef CONFIG_MAC80211_HT_DEBUG
487 printk(KERN_DEBUG "Could not find the station\n");
488#endif
489 ret = -ENOENT;
490 goto exit;
491 }
492
493 spin_lock_bh(&sta->lock);
494
495 /* we have tried too many times, receiver does not want A-MPDU */
496 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
497 ret = -EBUSY;
498 goto err_unlock_sta;
499 }
500
501 state = &sta->ampdu_mlme.tid_state_tx[tid];
502 /* check if the TID is not in aggregation flow already */
503 if (*state != HT_AGG_STATE_IDLE) {
504#ifdef CONFIG_MAC80211_HT_DEBUG
505 printk(KERN_DEBUG "BA request denied - session is not "
506 "idle on tid %u\n", tid);
507#endif /* CONFIG_MAC80211_HT_DEBUG */
508 ret = -EAGAIN;
509 goto err_unlock_sta;
510 }
511
512 /* prepare A-MPDU MLME for Tx aggregation */
513 sta->ampdu_mlme.tid_tx[tid] =
514 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
515 if (!sta->ampdu_mlme.tid_tx[tid]) {
516#ifdef CONFIG_MAC80211_HT_DEBUG
517 if (net_ratelimit())
518 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
519 tid);
520#endif
521 ret = -ENOMEM;
522 goto err_unlock_sta;
523 }
524 /* Tx timer */
525 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
526 sta_addba_resp_timer_expired;
527 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
528 (unsigned long)&sta->timer_to_tid[tid];
529 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
530
531 if (hw->ampdu_queues) {
532 /* create a new queue for this aggregation */
533 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
534
535 /* case no queue is available to aggregation
536 * don't switch to aggregation */
537 if (ret) {
538#ifdef CONFIG_MAC80211_HT_DEBUG
539 printk(KERN_DEBUG "BA request denied - "
540 "queue unavailable for tid %d\n", tid);
541#endif /* CONFIG_MAC80211_HT_DEBUG */
542 goto err_unlock_queue;
543 }
544 }
545 sdata = sta->sdata;
546
547 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
548 * call back right away, it must see that the flow has begun */
549 *state |= HT_ADDBA_REQUESTED_MSK;
550
551 /* This is slightly racy because the queue isn't stopped */
552 start_seq_num = sta->tid_seq[tid];
553
554 if (local->ops->ampdu_action)
555 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
556 &sta->sta, tid, &start_seq_num);
557
558 if (ret) {
559 /* No need to requeue the packets in the agg queue, since we
560 * held the tx lock: no packet could be enqueued to the newly
561 * allocated queue */
562 if (hw->ampdu_queues)
563 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
564#ifdef CONFIG_MAC80211_HT_DEBUG
565 printk(KERN_DEBUG "BA request denied - HW unavailable for"
566 " tid %d\n", tid);
567#endif /* CONFIG_MAC80211_HT_DEBUG */
568 *state = HT_AGG_STATE_IDLE;
569 goto err_unlock_queue;
570 }
571
572 /* Will put all the packets in the new SW queue */
573 if (hw->ampdu_queues)
574 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
575 spin_unlock_bh(&sta->lock);
576
577 /* send an addBA request */
578 sta->ampdu_mlme.dialog_token_allocator++;
579 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
580 sta->ampdu_mlme.dialog_token_allocator;
581 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
582
583
584 ieee80211_send_addba_request(sta->sdata, ra, tid,
585 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
586 sta->ampdu_mlme.tid_tx[tid]->ssn,
587 0x40, 5000);
588 /* activate the timer for the recipient's addBA response */
589 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
590 jiffies + ADDBA_RESP_INTERVAL;
591 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
592#ifdef CONFIG_MAC80211_HT_DEBUG
593 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
594#endif
595 goto exit;
596
597err_unlock_queue:
598 kfree(sta->ampdu_mlme.tid_tx[tid]);
599 sta->ampdu_mlme.tid_tx[tid] = NULL;
600 ret = -EBUSY;
601err_unlock_sta:
602 spin_unlock_bh(&sta->lock);
603exit:
604 rcu_read_unlock();
605 return ret;
606}
607EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
608
609int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
610 u8 *ra, u16 tid,
611 enum ieee80211_back_parties initiator)
612{
613 struct ieee80211_local *local = hw_to_local(hw);
614 struct sta_info *sta;
615 u8 *state;
616 int ret = 0;
617
618 if (tid >= STA_TID_NUM)
619 return -EINVAL;
620
621 rcu_read_lock();
622 sta = sta_info_get(local, ra);
623 if (!sta) {
624 rcu_read_unlock();
625 return -ENOENT;
626 }
627
628 /* check if the TID is in aggregation */
629 state = &sta->ampdu_mlme.tid_state_tx[tid];
630 spin_lock_bh(&sta->lock);
631
632 if (*state != HT_AGG_STATE_OPERATIONAL) {
633 ret = -ENOENT;
634 goto stop_BA_exit;
635 }
636
637#ifdef CONFIG_MAC80211_HT_DEBUG
638 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
639 ra, tid);
640#endif /* CONFIG_MAC80211_HT_DEBUG */
641
642 if (hw->ampdu_queues)
643 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
644
645 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
646 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
647
648 if (local->ops->ampdu_action)
649 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
650 &sta->sta, tid, NULL);
651
652 /* case HW denied going back to legacy */
653 if (ret) {
654 WARN_ON(ret != -EBUSY);
655 *state = HT_AGG_STATE_OPERATIONAL;
656 if (hw->ampdu_queues)
657 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
658 goto stop_BA_exit;
659 }
660
661stop_BA_exit:
662 spin_unlock_bh(&sta->lock);
663 rcu_read_unlock();
664 return ret;
665}
666EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
667
668void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
669{
670 struct ieee80211_local *local = hw_to_local(hw);
671 struct sta_info *sta;
672 u8 *state;
673
674 if (tid >= STA_TID_NUM) {
675#ifdef CONFIG_MAC80211_HT_DEBUG
676 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
677 tid, STA_TID_NUM);
678#endif
679 return;
680 }
681
682 rcu_read_lock();
683 sta = sta_info_get(local, ra);
684 if (!sta) {
685 rcu_read_unlock();
686#ifdef CONFIG_MAC80211_HT_DEBUG
687 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
688#endif
689 return;
690 }
691
692 state = &sta->ampdu_mlme.tid_state_tx[tid];
693 spin_lock_bh(&sta->lock);
694
695 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
696#ifdef CONFIG_MAC80211_HT_DEBUG
697 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
698 *state);
699#endif
700 spin_unlock_bh(&sta->lock);
701 rcu_read_unlock();
702 return;
703 }
704
705 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
706
707 *state |= HT_ADDBA_DRV_READY_MSK;
708
709 if (*state == HT_AGG_STATE_OPERATIONAL) {
710#ifdef CONFIG_MAC80211_HT_DEBUG
711 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
712#endif
713 if (hw->ampdu_queues)
714 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
715 }
716 spin_unlock_bh(&sta->lock);
717 rcu_read_unlock();
718}
719EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
720
721void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
722{
723 struct ieee80211_local *local = hw_to_local(hw);
724 struct sta_info *sta;
725 u8 *state;
726 int agg_queue;
727
728 if (tid >= STA_TID_NUM) {
729#ifdef CONFIG_MAC80211_HT_DEBUG
730 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
731 tid, STA_TID_NUM);
732#endif
733 return;
734 }
735
736#ifdef CONFIG_MAC80211_HT_DEBUG
737 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
738 ra, tid);
739#endif /* CONFIG_MAC80211_HT_DEBUG */
740
741 rcu_read_lock();
742 sta = sta_info_get(local, ra);
743 if (!sta) {
744#ifdef CONFIG_MAC80211_HT_DEBUG
745 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
746#endif
747 rcu_read_unlock();
748 return;
749 }
750 state = &sta->ampdu_mlme.tid_state_tx[tid];
751
752 /* NOTE: no need to use sta->lock in this state check, as
753 * ieee80211_stop_tx_ba_session will let only one stop call to
754 * pass through per sta/tid
755 */
756 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
757#ifdef CONFIG_MAC80211_HT_DEBUG
758 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
759#endif
760 rcu_read_unlock();
761 return;
762 }
763
764 if (*state & HT_AGG_STATE_INITIATOR_MSK)
765 ieee80211_send_delba(sta->sdata, ra, tid,
766 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
767
768 if (hw->ampdu_queues) {
769 agg_queue = sta->tid_to_tx_q[tid];
770 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
771
772 /* We just requeued the all the frames that were in the
773 * removed queue, and since we might miss a softirq we do
774 * netif_schedule_queue. ieee80211_wake_queue is not used
775 * here as this queue is not necessarily stopped
776 */
777 netif_schedule_queue(netdev_get_tx_queue(local->mdev,
778 agg_queue));
779 }
780 spin_lock_bh(&sta->lock);
781 *state = HT_AGG_STATE_IDLE;
782 sta->ampdu_mlme.addba_req_num[tid] = 0;
783 kfree(sta->ampdu_mlme.tid_tx[tid]);
784 sta->ampdu_mlme.tid_tx[tid] = NULL;
785 spin_unlock_bh(&sta->lock);
786
787 rcu_read_unlock();
788}
789EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
790
791void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
792 const u8 *ra, u16 tid)
793{
794 struct ieee80211_local *local = hw_to_local(hw);
795 struct ieee80211_ra_tid *ra_tid;
796 struct sk_buff *skb = dev_alloc_skb(0);
797
798 if (unlikely(!skb)) {
799#ifdef CONFIG_MAC80211_HT_DEBUG
800 if (net_ratelimit())
801 printk(KERN_WARNING "%s: Not enough memory, "
802 "dropping start BA session", skb->dev->name);
803#endif
804 return;
805 }
806 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
807 memcpy(&ra_tid->ra, ra, ETH_ALEN);
808 ra_tid->tid = tid;
809
810 skb->pkt_type = IEEE80211_ADDBA_MSG;
811 skb_queue_tail(&local->skb_queue, skb);
812 tasklet_schedule(&local->tasklet);
813}
814EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
815
816void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
817 const u8 *ra, u16 tid)
818{
819 struct ieee80211_local *local = hw_to_local(hw);
820 struct ieee80211_ra_tid *ra_tid;
821 struct sk_buff *skb = dev_alloc_skb(0);
822
823 if (unlikely(!skb)) {
824#ifdef CONFIG_MAC80211_HT_DEBUG
825 if (net_ratelimit())
826 printk(KERN_WARNING "%s: Not enough memory, "
827 "dropping stop BA session", skb->dev->name);
828#endif
829 return;
830 }
831 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
832 memcpy(&ra_tid->ra, ra, ETH_ALEN);
833 ra_tid->tid = tid;
834
835 skb->pkt_type = IEEE80211_DELBA_MSG;
836 skb_queue_tail(&local->skb_queue, skb);
837 tasklet_schedule(&local->tasklet);
838}
839EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
840
841/*
842 * After accepting the AddBA Request we activated a timer,
843 * resetting it after each frame that arrives from the originator.
844 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
845 */
846static void sta_rx_agg_session_timer_expired(unsigned long data)
847{
848 /* not an elegant detour, but there is no choice as the timer passes
849 * only one argument, and various sta_info are needed here, so init
850 * flow in sta_info_create gives the TID as data, while the timer_to_id
851 * array gives the sta through container_of */
852 u8 *ptid = (u8 *)data;
853 u8 *timer_to_id = ptid - *ptid;
854 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
855 timer_to_tid[0]);
856
857#ifdef CONFIG_MAC80211_HT_DEBUG
858 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
859#endif
860 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
861 (u16)*ptid, WLAN_BACK_TIMER,
862 WLAN_REASON_QSTA_TIMEOUT);
863}
864
865void ieee80211_process_addba_request(struct ieee80211_local *local,
866 struct sta_info *sta,
867 struct ieee80211_mgmt *mgmt,
868 size_t len)
869{
870 struct ieee80211_hw *hw = &local->hw;
871 struct ieee80211_conf *conf = &hw->conf;
872 struct tid_ampdu_rx *tid_agg_rx;
873 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
874 u8 dialog_token;
875 int ret = -EOPNOTSUPP;
876
877 /* extract session parameters from addba request frame */
878 dialog_token = mgmt->u.action.u.addba_req.dialog_token;
879 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
880 start_seq_num =
881 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
882
883 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
884 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
885 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
886 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
887
888 status = WLAN_STATUS_REQUEST_DECLINED;
889
890 /* sanity check for incoming parameters:
891 * check if configuration can support the BA policy
892 * and if buffer size does not exceeds max value */
893 /* XXX: check own ht delayed BA capability?? */
894 if (((ba_policy != 1)
895 && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA)))
896 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
897 status = WLAN_STATUS_INVALID_QOS_PARAM;
898#ifdef CONFIG_MAC80211_HT_DEBUG
899 if (net_ratelimit())
900 printk(KERN_DEBUG "AddBA Req with bad params from "
901 "%pM on tid %u. policy %d, buffer size %d\n",
902 mgmt->sa, tid, ba_policy,
903 buf_size);
904#endif /* CONFIG_MAC80211_HT_DEBUG */
905 goto end_no_lock;
906 }
907 /* determine default buffer size */
908 if (buf_size == 0) {
909 struct ieee80211_supported_band *sband;
910
911 sband = local->hw.wiphy->bands[conf->channel->band];
912 buf_size = IEEE80211_MIN_AMPDU_BUF;
913 buf_size = buf_size << sband->ht_cap.ampdu_factor;
914 }
915
916
917 /* examine state machine */
918 spin_lock_bh(&sta->lock);
919
920 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
921#ifdef CONFIG_MAC80211_HT_DEBUG
922 if (net_ratelimit())
923 printk(KERN_DEBUG "unexpected AddBA Req from "
924 "%pM on tid %u\n",
925 mgmt->sa, tid);
926#endif /* CONFIG_MAC80211_HT_DEBUG */
927 goto end;
928 }
929
930 /* prepare A-MPDU MLME for Rx aggregation */
931 sta->ampdu_mlme.tid_rx[tid] =
932 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
933 if (!sta->ampdu_mlme.tid_rx[tid]) {
934#ifdef CONFIG_MAC80211_HT_DEBUG
935 if (net_ratelimit())
936 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
937 tid);
938#endif
939 goto end;
940 }
941 /* rx timer */
942 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
943 sta_rx_agg_session_timer_expired;
944 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
945 (unsigned long)&sta->timer_to_tid[tid];
946 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
947
948 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
949
950 /* prepare reordering buffer */
951 tid_agg_rx->reorder_buf =
952 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
953 if (!tid_agg_rx->reorder_buf) {
954#ifdef CONFIG_MAC80211_HT_DEBUG
955 if (net_ratelimit())
956 printk(KERN_ERR "can not allocate reordering buffer "
957 "to tid %d\n", tid);
958#endif
959 kfree(sta->ampdu_mlme.tid_rx[tid]);
960 goto end;
961 }
962 memset(tid_agg_rx->reorder_buf, 0,
963 buf_size * sizeof(struct sk_buff *));
964
965 if (local->ops->ampdu_action)
966 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
967 &sta->sta, tid, &start_seq_num);
968#ifdef CONFIG_MAC80211_HT_DEBUG
969 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
970#endif /* CONFIG_MAC80211_HT_DEBUG */
971
972 if (ret) {
973 kfree(tid_agg_rx->reorder_buf);
974 kfree(tid_agg_rx);
975 sta->ampdu_mlme.tid_rx[tid] = NULL;
976 goto end;
977 }
978
979 /* change state and send addba resp */
980 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
981 tid_agg_rx->dialog_token = dialog_token;
982 tid_agg_rx->ssn = start_seq_num;
983 tid_agg_rx->head_seq_num = start_seq_num;
984 tid_agg_rx->buf_size = buf_size;
985 tid_agg_rx->timeout = timeout;
986 tid_agg_rx->stored_mpdu_num = 0;
987 status = WLAN_STATUS_SUCCESS;
988end:
989 spin_unlock_bh(&sta->lock);
990
991end_no_lock:
992 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
993 dialog_token, status, 1, buf_size, timeout);
994}
995
996void ieee80211_process_addba_resp(struct ieee80211_local *local,
997 struct sta_info *sta,
998 struct ieee80211_mgmt *mgmt,
999 size_t len)
1000{
1001 struct ieee80211_hw *hw = &local->hw;
1002 u16 capab;
1003 u16 tid, start_seq_num;
1004 u8 *state;
1005
1006 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
1007 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1008
1009 state = &sta->ampdu_mlme.tid_state_tx[tid];
1010
1011 spin_lock_bh(&sta->lock);
1012
1013 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1014 spin_unlock_bh(&sta->lock);
1015 return;
1016 }
1017
1018 if (mgmt->u.action.u.addba_resp.dialog_token !=
1019 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1020 spin_unlock_bh(&sta->lock);
1021#ifdef CONFIG_MAC80211_HT_DEBUG
1022 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1023#endif /* CONFIG_MAC80211_HT_DEBUG */
1024 return;
1025 }
1026
1027 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
1028#ifdef CONFIG_MAC80211_HT_DEBUG
1029 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1030#endif /* CONFIG_MAC80211_HT_DEBUG */
1031 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1032 == WLAN_STATUS_SUCCESS) {
1033 *state |= HT_ADDBA_RECEIVED_MSK;
1034 sta->ampdu_mlme.addba_req_num[tid] = 0;
1035
1036 if (*state == HT_AGG_STATE_OPERATIONAL &&
1037 local->hw.ampdu_queues)
1038 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1039
1040 if (local->ops->ampdu_action) {
1041 (void)local->ops->ampdu_action(hw,
1042 IEEE80211_AMPDU_TX_RESUME,
1043 &sta->sta, tid, &start_seq_num);
1044 }
1045#ifdef CONFIG_MAC80211_HT_DEBUG
1046 printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid);
1047#endif /* CONFIG_MAC80211_HT_DEBUG */
1048 spin_unlock_bh(&sta->lock);
1049 } else {
1050 sta->ampdu_mlme.addba_req_num[tid]++;
1051 /* this will allow the state check in stop_BA_session */
1052 *state = HT_AGG_STATE_OPERATIONAL;
1053 spin_unlock_bh(&sta->lock);
1054 ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid,
1055 WLAN_BACK_INITIATOR);
1056 }
1057} 209}
1058 210
1059void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, 211void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f3eec989662..2cb743ed9f9 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -43,7 +43,7 @@ struct ieee80211_local;
43 43
44/* Required encryption head and tailroom */ 44/* Required encryption head and tailroom */
45#define IEEE80211_ENCRYPT_HEADROOM 8 45#define IEEE80211_ENCRYPT_HEADROOM 8
46#define IEEE80211_ENCRYPT_TAILROOM 12 46#define IEEE80211_ENCRYPT_TAILROOM 18
47 47
48/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent 48/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
49 * reception of at least three fragmented frames. This limit can be increased 49 * reception of at least three fragmented frames. This limit can be increased
@@ -57,6 +57,8 @@ struct ieee80211_local;
57 */ 57 */
58#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) 58#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
59 59
60#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
61
60struct ieee80211_fragment_entry { 62struct ieee80211_fragment_entry {
61 unsigned long first_frag_time; 63 unsigned long first_frag_time;
62 unsigned int seq; 64 unsigned int seq;
@@ -70,43 +72,36 @@ struct ieee80211_fragment_entry {
70 72
71 73
72struct ieee80211_bss { 74struct ieee80211_bss {
73 struct list_head list; 75 /* Yes, this is a hack */
74 struct ieee80211_bss *hnext; 76 struct cfg80211_bss cbss;
75 size_t ssid_len;
76 77
77 atomic_t users; 78 /* don't want to look up all the time */
78 79 size_t ssid_len;
79 u8 bssid[ETH_ALEN];
80 u8 ssid[IEEE80211_MAX_SSID_LEN]; 80 u8 ssid[IEEE80211_MAX_SSID_LEN];
81
81 u8 dtim_period; 82 u8 dtim_period;
82 u16 capability; /* host byte order */ 83
83 enum ieee80211_band band;
84 int freq;
85 int signal, noise, qual;
86 u8 *ies; /* all information elements from the last Beacon or Probe
87 * Response frames; note Beacon frame is not allowed to
88 * override values from Probe Response */
89 size_t ies_len;
90 bool wmm_used; 84 bool wmm_used;
85
86 unsigned long last_probe_resp;
87
91#ifdef CONFIG_MAC80211_MESH 88#ifdef CONFIG_MAC80211_MESH
92 u8 *mesh_id; 89 u8 *mesh_id;
93 size_t mesh_id_len; 90 size_t mesh_id_len;
94 u8 *mesh_cfg; 91 u8 *mesh_cfg;
95#endif 92#endif
93
96#define IEEE80211_MAX_SUPP_RATES 32 94#define IEEE80211_MAX_SUPP_RATES 32
97 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 95 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
98 size_t supp_rates_len; 96 size_t supp_rates_len;
99 u64 timestamp;
100 int beacon_int;
101 97
102 unsigned long last_probe_resp; 98 /*
103 unsigned long last_update; 99 * During assocation, we save an ERP value from a probe response so
104
105 /* during assocation, we save an ERP value from a probe response so
106 * that we can feed ERP info to the driver when handling the 100 * that we can feed ERP info to the driver when handling the
107 * association completes. these fields probably won't be up-to-date 101 * association completes. these fields probably won't be up-to-date
108 * otherwise, you probably don't want to use them. */ 102 * otherwise, you probably don't want to use them.
109 int has_erp_value; 103 */
104 bool has_erp_value;
110 u8 erp_value; 105 u8 erp_value;
111}; 106};
112 107
@@ -258,6 +253,9 @@ struct mesh_preq_queue {
258#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 253#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
259#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 254#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
260#define IEEE80211_STA_PRIVACY_INVOKED BIT(13) 255#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
256#define IEEE80211_STA_TKIP_WEP_USED BIT(14)
257#define IEEE80211_STA_CSA_RECEIVED BIT(15)
258#define IEEE80211_STA_MFP_ENABLED BIT(16)
261/* flags for MLME request */ 259/* flags for MLME request */
262#define IEEE80211_STA_REQ_SCAN 0 260#define IEEE80211_STA_REQ_SCAN 0
263#define IEEE80211_STA_REQ_DIRECT_PROBE 1 261#define IEEE80211_STA_REQ_DIRECT_PROBE 1
@@ -282,13 +280,13 @@ enum ieee80211_sta_mlme_state {
282 280
283struct ieee80211_if_sta { 281struct ieee80211_if_sta {
284 struct timer_list timer; 282 struct timer_list timer;
283 struct timer_list chswitch_timer;
285 struct work_struct work; 284 struct work_struct work;
285 struct work_struct chswitch_work;
286 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 286 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
287 u8 ssid[IEEE80211_MAX_SSID_LEN]; 287 u8 ssid[IEEE80211_MAX_SSID_LEN];
288 enum ieee80211_sta_mlme_state state; 288 enum ieee80211_sta_mlme_state state;
289 size_t ssid_len; 289 size_t ssid_len;
290 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
291 size_t scan_ssid_len;
292 u16 aid; 290 u16 aid;
293 u16 ap_capab, capab; 291 u16 ap_capab, capab;
294 u8 *extra_ie; /* to be added to the end of AssocReq */ 292 u8 *extra_ie; /* to be added to the end of AssocReq */
@@ -315,11 +313,33 @@ struct ieee80211_if_sta {
315 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 313 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
316 int auth_transaction; 314 int auth_transaction;
317 315
316 enum {
317 IEEE80211_MFP_DISABLED,
318 IEEE80211_MFP_OPTIONAL,
319 IEEE80211_MFP_REQUIRED
320 } mfp; /* management frame protection */
321
318 unsigned long ibss_join_req; 322 unsigned long ibss_join_req;
319 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ 323 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */
320 u32 supp_rates_bits[IEEE80211_NUM_BANDS]; 324 u32 supp_rates_bits[IEEE80211_NUM_BANDS];
321 325
322 int wmm_last_param_set; 326 int wmm_last_param_set;
327
328 /* Extra IE data for management frames */
329 u8 *ie_probereq;
330 size_t ie_probereq_len;
331 u8 *ie_proberesp;
332 size_t ie_proberesp_len;
333 u8 *ie_auth;
334 size_t ie_auth_len;
335 u8 *ie_assocreq;
336 size_t ie_assocreq_len;
337 u8 *ie_reassocreq;
338 size_t ie_reassocreq_len;
339 u8 *ie_deauth;
340 size_t ie_deauth_len;
341 u8 *ie_disassoc;
342 size_t ie_disassoc_len;
323}; 343};
324 344
325struct ieee80211_if_mesh { 345struct ieee80211_if_mesh {
@@ -404,8 +424,10 @@ struct ieee80211_sub_if_data {
404 unsigned int fragment_next; 424 unsigned int fragment_next;
405 425
406#define NUM_DEFAULT_KEYS 4 426#define NUM_DEFAULT_KEYS 4
407 struct ieee80211_key *keys[NUM_DEFAULT_KEYS]; 427#define NUM_DEFAULT_MGMT_KEYS 2
428 struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
408 struct ieee80211_key *default_key; 429 struct ieee80211_key *default_key;
430 struct ieee80211_key *default_mgmt_key;
409 431
410 u16 sequence_number; 432 u16 sequence_number;
411 433
@@ -477,6 +499,7 @@ struct ieee80211_sub_if_data {
477 } debugfs; 499 } debugfs;
478 struct { 500 struct {
479 struct dentry *default_key; 501 struct dentry *default_key;
502 struct dentry *default_mgmt_key;
480 } common_debugfs; 503 } common_debugfs;
481 504
482#ifdef CONFIG_MAC80211_MESH 505#ifdef CONFIG_MAC80211_MESH
@@ -541,6 +564,7 @@ enum {
541enum queue_stop_reason { 564enum queue_stop_reason {
542 IEEE80211_QUEUE_STOP_REASON_DRIVER, 565 IEEE80211_QUEUE_STOP_REASON_DRIVER,
543 IEEE80211_QUEUE_STOP_REASON_PS, 566 IEEE80211_QUEUE_STOP_REASON_PS,
567 IEEE80211_QUEUE_STOP_REASON_CSA
544}; 568};
545 569
546/* maximum number of hardware queues we support. */ 570/* maximum number of hardware queues we support. */
@@ -568,7 +592,6 @@ struct ieee80211_local {
568 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss; 592 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss;
569 unsigned int filter_flags; /* FIF_* */ 593 unsigned int filter_flags; /* FIF_* */
570 struct iw_statistics wstats; 594 struct iw_statistics wstats;
571 u8 wstats_flags;
572 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 595 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
573 int tx_headroom; /* required headroom for hardware/radiotap */ 596 int tx_headroom; /* required headroom for hardware/radiotap */
574 597
@@ -612,7 +635,9 @@ struct ieee80211_local {
612 struct crypto_blkcipher *wep_rx_tfm; 635 struct crypto_blkcipher *wep_rx_tfm;
613 u32 wep_iv; 636 u32 wep_iv;
614 637
638 /* see iface.c */
615 struct list_head interfaces; 639 struct list_head interfaces;
640 struct mutex iflist_mtx;
616 641
617 /* 642 /*
618 * Key lock, protects sdata's key_list and sta_info's 643 * Key lock, protects sdata's key_list and sta_info's
@@ -623,20 +648,18 @@ struct ieee80211_local {
623 648
624 /* Scanning and BSS list */ 649 /* Scanning and BSS list */
625 bool sw_scanning, hw_scanning; 650 bool sw_scanning, hw_scanning;
651 struct cfg80211_ssid scan_ssid;
652 struct cfg80211_scan_request int_scan_req;
653 struct cfg80211_scan_request *scan_req;
654 struct ieee80211_channel *scan_channel;
626 int scan_channel_idx; 655 int scan_channel_idx;
627 enum ieee80211_band scan_band;
628 656
629 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 657 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
630 unsigned long last_scan_completed; 658 unsigned long last_scan_completed;
631 struct delayed_work scan_work; 659 struct delayed_work scan_work;
632 struct ieee80211_sub_if_data *scan_sdata; 660 struct ieee80211_sub_if_data *scan_sdata;
633 struct ieee80211_channel *oper_channel, *scan_channel;
634 enum nl80211_channel_type oper_channel_type; 661 enum nl80211_channel_type oper_channel_type;
635 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 662 struct ieee80211_channel *oper_channel, *csa_channel;
636 size_t scan_ssid_len;
637 struct list_head bss_list;
638 struct ieee80211_bss *bss_hash[STA_HASH_SIZE];
639 spinlock_t bss_lock;
640 663
641 /* SNMP counters */ 664 /* SNMP counters */
642 /* dot11CountersTable */ 665 /* dot11CountersTable */
@@ -649,7 +672,6 @@ struct ieee80211_local {
649 u32 dot11ReceivedFragmentCount; 672 u32 dot11ReceivedFragmentCount;
650 u32 dot11MulticastReceivedFrameCount; 673 u32 dot11MulticastReceivedFrameCount;
651 u32 dot11TransmittedFrameCount; 674 u32 dot11TransmittedFrameCount;
652 u32 dot11WEPUndecryptableCount;
653 675
654#ifdef CONFIG_MAC80211_LEDS 676#ifdef CONFIG_MAC80211_LEDS
655 int tx_led_counter, rx_led_counter; 677 int tx_led_counter, rx_led_counter;
@@ -696,11 +718,14 @@ struct ieee80211_local {
696 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 718 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
697 719
698 bool powersave; 720 bool powersave;
699 int dynamic_ps_timeout; 721 bool pspolling;
700 struct work_struct dynamic_ps_enable_work; 722 struct work_struct dynamic_ps_enable_work;
701 struct work_struct dynamic_ps_disable_work; 723 struct work_struct dynamic_ps_disable_work;
702 struct timer_list dynamic_ps_timer; 724 struct timer_list dynamic_ps_timer;
703 725
726 int user_power_level; /* in dBm */
727 int power_constr_level; /* in dBm */
728
704#ifdef CONFIG_MAC80211_DEBUGFS 729#ifdef CONFIG_MAC80211_DEBUGFS
705 struct local_debugfsdentries { 730 struct local_debugfsdentries {
706 struct dentry *rcdir; 731 struct dentry *rcdir;
@@ -712,6 +737,7 @@ struct ieee80211_local {
712 struct dentry *long_retry_limit; 737 struct dentry *long_retry_limit;
713 struct dentry *total_ps_buffered; 738 struct dentry *total_ps_buffered;
714 struct dentry *wep_iv; 739 struct dentry *wep_iv;
740 struct dentry *tsf;
715 struct dentry *statistics; 741 struct dentry *statistics;
716 struct local_debugfsdentries_statsdentries { 742 struct local_debugfsdentries_statsdentries {
717 struct dentry *transmitted_fragment_count; 743 struct dentry *transmitted_fragment_count;
@@ -805,6 +831,7 @@ struct ieee802_11_elems {
805 u8 *country_elem; 831 u8 *country_elem;
806 u8 *pwr_constr_elem; 832 u8 *pwr_constr_elem;
807 u8 *quiet_elem; /* first quite element */ 833 u8 *quiet_elem; /* first quite element */
834 u8 *timeout_int;
808 835
809 /* length of them, respectively */ 836 /* length of them, respectively */
810 u8 ssid_len; 837 u8 ssid_len;
@@ -832,6 +859,7 @@ struct ieee802_11_elems {
832 u8 pwr_constr_elem_len; 859 u8 pwr_constr_elem_len;
833 u8 quiet_elem_len; 860 u8 quiet_elem_len;
834 u8 num_of_quiet_elem; /* can be more the one */ 861 u8 num_of_quiet_elem; /* can be more the one */
862 u8 timeout_int_len;
835}; 863};
836 864
837static inline struct ieee80211_local *hw_to_local( 865static inline struct ieee80211_local *hw_to_local(
@@ -875,19 +903,21 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
875void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata, 903void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
876 struct ieee80211_if_sta *ifsta); 904 struct ieee80211_if_sta *ifsta);
877struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 905struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
878 u8 *bssid, u8 *addr, u64 supp_rates); 906 u8 *bssid, u8 *addr, u32 supp_rates);
879int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason); 907int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
880int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); 908int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
881u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); 909u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
882u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 910u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
883 struct ieee802_11_elems *elems, 911 struct ieee802_11_elems *elems,
884 enum ieee80211_band band); 912 enum ieee80211_band band);
885void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 913void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
886 u8 *ssid, size_t ssid_len); 914 u8 *ssid, size_t ssid_len);
915void ieee80211_send_pspoll(struct ieee80211_local *local,
916 struct ieee80211_sub_if_data *sdata);
887 917
888/* scan/BSS handling */ 918/* scan/BSS handling */
889int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 919int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
890 u8 *ssid, size_t ssid_len); 920 struct cfg80211_scan_request *req);
891int ieee80211_scan_results(struct ieee80211_local *local, 921int ieee80211_scan_results(struct ieee80211_local *local,
892 struct iw_request_info *info, 922 struct iw_request_info *info,
893 char *buf, size_t len); 923 char *buf, size_t len);
@@ -895,29 +925,27 @@ ieee80211_rx_result
895ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, 925ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
896 struct sk_buff *skb, 926 struct sk_buff *skb,
897 struct ieee80211_rx_status *rx_status); 927 struct ieee80211_rx_status *rx_status);
898void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
899void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
900int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, 928int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
901 char *ie, size_t len); 929 char *ie, size_t len);
902 930
903void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 931void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
904int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, 932int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
905 u8 *ssid, size_t ssid_len); 933 struct cfg80211_scan_request *req);
906struct ieee80211_bss * 934struct ieee80211_bss *
907ieee80211_bss_info_update(struct ieee80211_local *local, 935ieee80211_bss_info_update(struct ieee80211_local *local,
908 struct ieee80211_rx_status *rx_status, 936 struct ieee80211_rx_status *rx_status,
909 struct ieee80211_mgmt *mgmt, 937 struct ieee80211_mgmt *mgmt,
910 size_t len, 938 size_t len,
911 struct ieee802_11_elems *elems, 939 struct ieee802_11_elems *elems,
912 int freq, bool beacon); 940 struct ieee80211_channel *channel,
913struct ieee80211_bss * 941 bool beacon);
914ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
915 u8 *ssid, u8 ssid_len);
916struct ieee80211_bss * 942struct ieee80211_bss *
917ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 943ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
918 u8 *ssid, u8 ssid_len); 944 u8 *ssid, u8 ssid_len);
919void ieee80211_rx_bss_put(struct ieee80211_local *local, 945void ieee80211_rx_bss_put(struct ieee80211_local *local,
920 struct ieee80211_bss *bss); 946 struct ieee80211_bss *bss);
947void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid,
948 int freq, u8 *ssid, u8 ssid_len);
921 949
922/* interface handling */ 950/* interface handling */
923int ieee80211_if_add(struct ieee80211_local *local, const char *name, 951int ieee80211_if_add(struct ieee80211_local *local, const char *name,
@@ -943,10 +971,15 @@ u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
943 struct ieee80211_ht_info *hti, 971 struct ieee80211_ht_info *hti,
944 u16 ap_ht_cap_flags); 972 u16 ap_ht_cap_flags);
945void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); 973void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
974void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
975 const u8 *da, u16 tid,
976 u16 initiator, u16 reason_code);
946 977
947void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, 978void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
948 u16 tid, u16 initiator, u16 reason); 979 u16 tid, u16 initiator, u16 reason);
949void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr); 980void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
981 u16 initiator, u16 reason);
982void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
950void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, 983void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
951 struct sta_info *sta, 984 struct sta_info *sta,
952 struct ieee80211_mgmt *mgmt, size_t len); 985 struct ieee80211_mgmt *mgmt, size_t len);
@@ -959,10 +992,25 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
959 struct ieee80211_mgmt *mgmt, 992 struct ieee80211_mgmt *mgmt,
960 size_t len); 993 size_t len);
961 994
995int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
996 enum ieee80211_back_parties initiator);
997
962/* Spectrum management */ 998/* Spectrum management */
963void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 999void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
964 struct ieee80211_mgmt *mgmt, 1000 struct ieee80211_mgmt *mgmt,
965 size_t len); 1001 size_t len);
1002void ieee80211_chswitch_timer(unsigned long data);
1003void ieee80211_chswitch_work(struct work_struct *work);
1004void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1005 struct ieee80211_channel_sw_ie *sw_elem,
1006 struct ieee80211_bss *bss);
1007void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1008 u16 capab_info, u8 *pwr_constr_elem,
1009 u8 pwr_constr_elem_len);
1010
1011/* Suspend/resume */
1012int __ieee80211_suspend(struct ieee80211_hw *hw);
1013int __ieee80211_resume(struct ieee80211_hw *hw);
966 1014
967/* utility functions/constants */ 1015/* utility functions/constants */
968extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1016extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -980,12 +1028,15 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
980void ieee802_11_parse_elems(u8 *start, size_t len, 1028void ieee802_11_parse_elems(u8 *start, size_t len,
981 struct ieee802_11_elems *elems); 1029 struct ieee802_11_elems *elems);
982int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); 1030int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
983u64 ieee80211_mandatory_rates(struct ieee80211_local *local, 1031u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
984 enum ieee80211_band band); 1032 enum ieee80211_band band);
985 1033
986void ieee80211_dynamic_ps_enable_work(struct work_struct *work); 1034void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
987void ieee80211_dynamic_ps_disable_work(struct work_struct *work); 1035void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
988void ieee80211_dynamic_ps_timer(unsigned long data); 1036void ieee80211_dynamic_ps_timer(unsigned long data);
1037void ieee80211_send_nullfunc(struct ieee80211_local *local,
1038 struct ieee80211_sub_if_data *sdata,
1039 int powersave);
989 1040
990void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1041void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
991 enum queue_stop_reason reason); 1042 enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b9074824862..df94b936526 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -21,6 +21,23 @@
21#include "mesh.h" 21#include "mesh.h"
22#include "led.h" 22#include "led.h"
23 23
24/**
25 * DOC: Interface list locking
26 *
27 * The interface list in each struct ieee80211_local is protected
28 * three-fold:
29 *
30 * (1) modifications may only be done under the RTNL
31 * (2) modifications and readers are protected against each other by
32 * the iflist_mtx.
33 * (3) modifications are done in an RCU manner so atomic readers
34 * can traverse the list in RCU-safe blocks.
35 *
36 * As a consequence, reads (traversals) of the list can be protected
37 * by either the RTNL, the iflist_mtx or RCU.
38 */
39
40
24static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 41static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
25{ 42{
26 int meshhdrlen; 43 int meshhdrlen;
@@ -345,8 +362,7 @@ static int ieee80211_stop(struct net_device *dev)
345 362
346 list_for_each_entry_rcu(sta, &local->sta_list, list) { 363 list_for_each_entry_rcu(sta, &local->sta_list, list) {
347 if (sta->sdata == sdata) 364 if (sta->sdata == sdata)
348 ieee80211_sta_tear_down_BA_sessions(sdata, 365 ieee80211_sta_tear_down_BA_sessions(sta);
349 sta->sta.addr);
350 } 366 }
351 367
352 rcu_read_unlock(); 368 rcu_read_unlock();
@@ -383,6 +399,8 @@ static int ieee80211_stop(struct net_device *dev)
383 atomic_dec(&local->iff_promiscs); 399 atomic_dec(&local->iff_promiscs);
384 400
385 dev_mc_unsync(local->mdev, dev); 401 dev_mc_unsync(local->mdev, dev);
402 del_timer_sync(&local->dynamic_ps_timer);
403 cancel_work_sync(&local->dynamic_ps_enable_work);
386 404
387 /* APs need special treatment */ 405 /* APs need special treatment */
388 if (sdata->vif.type == NL80211_IFTYPE_AP) { 406 if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -441,6 +459,7 @@ static int ieee80211_stop(struct net_device *dev)
441 WLAN_REASON_DEAUTH_LEAVING); 459 WLAN_REASON_DEAUTH_LEAVING);
442 460
443 memset(sdata->u.sta.bssid, 0, ETH_ALEN); 461 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
462 del_timer_sync(&sdata->u.sta.chswitch_timer);
444 del_timer_sync(&sdata->u.sta.timer); 463 del_timer_sync(&sdata->u.sta.timer);
445 /* 464 /*
446 * If the timer fired while we waited for it, it will have 465 * If the timer fired while we waited for it, it will have
@@ -450,6 +469,7 @@ static int ieee80211_stop(struct net_device *dev)
450 * it no longer is. 469 * it no longer is.
451 */ 470 */
452 cancel_work_sync(&sdata->u.sta.work); 471 cancel_work_sync(&sdata->u.sta.work);
472 cancel_work_sync(&sdata->u.sta.chswitch_work);
453 /* 473 /*
454 * When we get here, the interface is marked down. 474 * When we get here, the interface is marked down.
455 * Call synchronize_rcu() to wait for the RX path 475 * Call synchronize_rcu() to wait for the RX path
@@ -459,7 +479,8 @@ static int ieee80211_stop(struct net_device *dev)
459 synchronize_rcu(); 479 synchronize_rcu();
460 skb_queue_purge(&sdata->u.sta.skb_queue); 480 skb_queue_purge(&sdata->u.sta.skb_queue);
461 481
462 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; 482 sdata->u.sta.flags &= ~(IEEE80211_STA_PRIVACY_INVOKED |
483 IEEE80211_STA_TKIP_WEP_USED);
463 kfree(sdata->u.sta.extra_ie); 484 kfree(sdata->u.sta.extra_ie);
464 sdata->u.sta.extra_ie = NULL; 485 sdata->u.sta.extra_ie = NULL;
465 sdata->u.sta.extra_ie_len = 0; 486 sdata->u.sta.extra_ie_len = 0;
@@ -501,7 +522,7 @@ static int ieee80211_stop(struct net_device *dev)
501 * scan event to userspace -- the scan is incomplete. 522 * scan event to userspace -- the scan is incomplete.
502 */ 523 */
503 if (local->sw_scanning) 524 if (local->sw_scanning)
504 ieee80211_scan_completed(&local->hw); 525 ieee80211_scan_completed(&local->hw, true);
505 } 526 }
506 527
507 conf.vif = &sdata->vif; 528 conf.vif = &sdata->vif;
@@ -569,19 +590,6 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
569 dev_mc_sync(local->mdev, dev); 590 dev_mc_sync(local->mdev, dev);
570} 591}
571 592
572static void ieee80211_if_setup(struct net_device *dev)
573{
574 ether_setup(dev);
575 dev->hard_start_xmit = ieee80211_subif_start_xmit;
576 dev->wireless_handlers = &ieee80211_iw_handler_def;
577 dev->set_multicast_list = ieee80211_set_multicast_list;
578 dev->change_mtu = ieee80211_change_mtu;
579 dev->open = ieee80211_open;
580 dev->stop = ieee80211_stop;
581 dev->destructor = free_netdev;
582 /* we will validate the address ourselves in ->open */
583 dev->validate_addr = NULL;
584}
585/* 593/*
586 * Called when the netdev is removed or, by the code below, before 594 * Called when the netdev is removed or, by the code below, before
587 * the interface type changes. 595 * the interface type changes.
@@ -627,6 +635,13 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
627 kfree(sdata->u.sta.assocreq_ies); 635 kfree(sdata->u.sta.assocreq_ies);
628 kfree(sdata->u.sta.assocresp_ies); 636 kfree(sdata->u.sta.assocresp_ies);
629 kfree_skb(sdata->u.sta.probe_resp); 637 kfree_skb(sdata->u.sta.probe_resp);
638 kfree(sdata->u.sta.ie_probereq);
639 kfree(sdata->u.sta.ie_proberesp);
640 kfree(sdata->u.sta.ie_auth);
641 kfree(sdata->u.sta.ie_assocreq);
642 kfree(sdata->u.sta.ie_reassocreq);
643 kfree(sdata->u.sta.ie_deauth);
644 kfree(sdata->u.sta.ie_disassoc);
630 break; 645 break;
631 case NL80211_IFTYPE_WDS: 646 case NL80211_IFTYPE_WDS:
632 case NL80211_IFTYPE_AP_VLAN: 647 case NL80211_IFTYPE_AP_VLAN:
@@ -642,6 +657,34 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
642 WARN_ON(flushed); 657 WARN_ON(flushed);
643} 658}
644 659
660static const struct net_device_ops ieee80211_dataif_ops = {
661 .ndo_open = ieee80211_open,
662 .ndo_stop = ieee80211_stop,
663 .ndo_uninit = ieee80211_teardown_sdata,
664 .ndo_start_xmit = ieee80211_subif_start_xmit,
665 .ndo_set_multicast_list = ieee80211_set_multicast_list,
666 .ndo_change_mtu = ieee80211_change_mtu,
667 .ndo_set_mac_address = eth_mac_addr,
668};
669
670static const struct net_device_ops ieee80211_monitorif_ops = {
671 .ndo_open = ieee80211_open,
672 .ndo_stop = ieee80211_stop,
673 .ndo_uninit = ieee80211_teardown_sdata,
674 .ndo_start_xmit = ieee80211_monitor_start_xmit,
675 .ndo_set_multicast_list = ieee80211_set_multicast_list,
676 .ndo_change_mtu = ieee80211_change_mtu,
677 .ndo_set_mac_address = eth_mac_addr,
678};
679
680static void ieee80211_if_setup(struct net_device *dev)
681{
682 ether_setup(dev);
683 dev->netdev_ops = &ieee80211_dataif_ops;
684 dev->wireless_handlers = &ieee80211_iw_handler_def;
685 dev->destructor = free_netdev;
686}
687
645/* 688/*
646 * Helper function to initialise an interface to a specific type. 689 * Helper function to initialise an interface to a specific type.
647 */ 690 */
@@ -653,7 +696,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
653 696
654 /* and set some type-dependent values */ 697 /* and set some type-dependent values */
655 sdata->vif.type = type; 698 sdata->vif.type = type;
656 sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit; 699 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
657 sdata->wdev.iftype = type; 700 sdata->wdev.iftype = type;
658 701
659 /* only monitor differs */ 702 /* only monitor differs */
@@ -674,7 +717,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
674 break; 717 break;
675 case NL80211_IFTYPE_MONITOR: 718 case NL80211_IFTYPE_MONITOR:
676 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; 719 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
677 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; 720 sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
678 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | 721 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
679 MONITOR_FLAG_OTHER_BSS; 722 MONITOR_FLAG_OTHER_BSS;
680 break; 723 break;
@@ -780,15 +823,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
780 if (ret) 823 if (ret)
781 goto fail; 824 goto fail;
782 825
783 ndev->uninit = ieee80211_teardown_sdata;
784
785 if (ieee80211_vif_is_mesh(&sdata->vif) && 826 if (ieee80211_vif_is_mesh(&sdata->vif) &&
786 params && params->mesh_id_len) 827 params && params->mesh_id_len)
787 ieee80211_sdata_set_mesh_id(sdata, 828 ieee80211_sdata_set_mesh_id(sdata,
788 params->mesh_id_len, 829 params->mesh_id_len,
789 params->mesh_id); 830 params->mesh_id);
790 831
832 mutex_lock(&local->iflist_mtx);
791 list_add_tail_rcu(&sdata->list, &local->interfaces); 833 list_add_tail_rcu(&sdata->list, &local->interfaces);
834 mutex_unlock(&local->iflist_mtx);
792 835
793 if (new_dev) 836 if (new_dev)
794 *new_dev = ndev; 837 *new_dev = ndev;
@@ -804,7 +847,10 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
804{ 847{
805 ASSERT_RTNL(); 848 ASSERT_RTNL();
806 849
850 mutex_lock(&sdata->local->iflist_mtx);
807 list_del_rcu(&sdata->list); 851 list_del_rcu(&sdata->list);
852 mutex_unlock(&sdata->local->iflist_mtx);
853
808 synchronize_rcu(); 854 synchronize_rcu();
809 unregister_netdevice(sdata->dev); 855 unregister_netdevice(sdata->dev);
810} 856}
@@ -820,7 +866,16 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
820 ASSERT_RTNL(); 866 ASSERT_RTNL();
821 867
822 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 868 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
869 /*
870 * we cannot hold the iflist_mtx across unregister_netdevice,
871 * but we only need to hold it for list modifications to lock
872 * out readers since we're under the RTNL here as all other
873 * writers.
874 */
875 mutex_lock(&local->iflist_mtx);
823 list_del(&sdata->list); 876 list_del(&sdata->list);
877 mutex_unlock(&local->iflist_mtx);
878
824 unregister_netdevice(sdata->dev); 879 unregister_netdevice(sdata->dev);
825 } 880 }
826} 881}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 999f7aa4232..19b480de4bb 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -18,6 +18,7 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "debugfs_key.h" 19#include "debugfs_key.h"
20#include "aes_ccm.h" 20#include "aes_ccm.h"
21#include "aes_cmac.h"
21 22
22 23
23/** 24/**
@@ -47,7 +48,6 @@
47 */ 48 */
48 49
49static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 50static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
50static const u8 zero_addr[ETH_ALEN];
51 51
52/* key mutex: used to synchronise todo runners */ 52/* key mutex: used to synchronise todo runners */
53static DEFINE_MUTEX(key_mutex); 53static DEFINE_MUTEX(key_mutex);
@@ -108,29 +108,18 @@ static void assert_key_lock(void)
108 WARN_ON(!mutex_is_locked(&key_mutex)); 108 WARN_ON(!mutex_is_locked(&key_mutex));
109} 109}
110 110
111static const u8 *get_mac_for_key(struct ieee80211_key *key) 111static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
112{ 112{
113 const u8 *addr = bcast_addr;
114
115 /*
116 * If we're an AP we won't ever receive frames with a non-WEP
117 * group key so we tell the driver that by using the zero MAC
118 * address to indicate a transmit-only key.
119 */
120 if (key->conf.alg != ALG_WEP &&
121 (key->sdata->vif.type == NL80211_IFTYPE_AP ||
122 key->sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
123 addr = zero_addr;
124
125 if (key->sta) 113 if (key->sta)
126 addr = key->sta->sta.addr; 114 return &key->sta->sta;
127 115
128 return addr; 116 return NULL;
129} 117}
130 118
131static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 119static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
132{ 120{
133 const u8 *addr; 121 struct ieee80211_sub_if_data *sdata;
122 struct ieee80211_sta *sta;
134 int ret; 123 int ret;
135 124
136 assert_key_lock(); 125 assert_key_lock();
@@ -139,11 +128,16 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 if (!key->local->ops->set_key) 128 if (!key->local->ops->set_key)
140 return; 129 return;
141 130
142 addr = get_mac_for_key(key); 131 sta = get_sta_for_key(key);
132
133 sdata = key->sdata;
134 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
135 sdata = container_of(sdata->bss,
136 struct ieee80211_sub_if_data,
137 u.ap);
143 138
144 ret = key->local->ops->set_key(local_to_hw(key->local), SET_KEY, 139 ret = key->local->ops->set_key(local_to_hw(key->local), SET_KEY,
145 key->sdata->dev->dev_addr, addr, 140 &sdata->vif, sta, &key->conf);
146 &key->conf);
147 141
148 if (!ret) { 142 if (!ret) {
149 spin_lock(&todo_lock); 143 spin_lock(&todo_lock);
@@ -155,12 +149,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
155 printk(KERN_ERR "mac80211-%s: failed to set key " 149 printk(KERN_ERR "mac80211-%s: failed to set key "
156 "(%d, %pM) to hardware (%d)\n", 150 "(%d, %pM) to hardware (%d)\n",
157 wiphy_name(key->local->hw.wiphy), 151 wiphy_name(key->local->hw.wiphy),
158 key->conf.keyidx, addr, ret); 152 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
159} 153}
160 154
161static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 155static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
162{ 156{
163 const u8 *addr; 157 struct ieee80211_sub_if_data *sdata;
158 struct ieee80211_sta *sta;
164 int ret; 159 int ret;
165 160
166 assert_key_lock(); 161 assert_key_lock();
@@ -176,17 +171,22 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
176 } 171 }
177 spin_unlock(&todo_lock); 172 spin_unlock(&todo_lock);
178 173
179 addr = get_mac_for_key(key); 174 sta = get_sta_for_key(key);
175 sdata = key->sdata;
176
177 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
178 sdata = container_of(sdata->bss,
179 struct ieee80211_sub_if_data,
180 u.ap);
180 181
181 ret = key->local->ops->set_key(local_to_hw(key->local), DISABLE_KEY, 182 ret = key->local->ops->set_key(local_to_hw(key->local), DISABLE_KEY,
182 key->sdata->dev->dev_addr, addr, 183 &sdata->vif, sta, &key->conf);
183 &key->conf);
184 184
185 if (ret) 185 if (ret)
186 printk(KERN_ERR "mac80211-%s: failed to remove key " 186 printk(KERN_ERR "mac80211-%s: failed to remove key "
187 "(%d, %pM) from hardware (%d)\n", 187 "(%d, %pM) from hardware (%d)\n",
188 wiphy_name(key->local->hw.wiphy), 188 wiphy_name(key->local->hw.wiphy),
189 key->conf.keyidx, addr, ret); 189 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
190 190
191 spin_lock(&todo_lock); 191 spin_lock(&todo_lock);
192 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 192 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
@@ -216,13 +216,38 @@ void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
216 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 216 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
217} 217}
218 218
219static void
220__ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
221{
222 struct ieee80211_key *key = NULL;
223
224 if (idx >= NUM_DEFAULT_KEYS &&
225 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
226 key = sdata->keys[idx];
227
228 rcu_assign_pointer(sdata->default_mgmt_key, key);
229
230 if (key)
231 add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY);
232}
233
234void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
235 int idx)
236{
237 unsigned long flags;
238
239 spin_lock_irqsave(&sdata->local->key_lock, flags);
240 __ieee80211_set_default_mgmt_key(sdata, idx);
241 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
242}
243
219 244
220static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, 245static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
221 struct sta_info *sta, 246 struct sta_info *sta,
222 struct ieee80211_key *old, 247 struct ieee80211_key *old,
223 struct ieee80211_key *new) 248 struct ieee80211_key *new)
224{ 249{
225 int idx, defkey; 250 int idx, defkey, defmgmtkey;
226 251
227 if (new) 252 if (new)
228 list_add(&new->list, &sdata->key_list); 253 list_add(&new->list, &sdata->key_list);
@@ -238,13 +263,19 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
238 idx = new->conf.keyidx; 263 idx = new->conf.keyidx;
239 264
240 defkey = old && sdata->default_key == old; 265 defkey = old && sdata->default_key == old;
266 defmgmtkey = old && sdata->default_mgmt_key == old;
241 267
242 if (defkey && !new) 268 if (defkey && !new)
243 __ieee80211_set_default_key(sdata, -1); 269 __ieee80211_set_default_key(sdata, -1);
270 if (defmgmtkey && !new)
271 __ieee80211_set_default_mgmt_key(sdata, -1);
244 272
245 rcu_assign_pointer(sdata->keys[idx], new); 273 rcu_assign_pointer(sdata->keys[idx], new);
246 if (defkey && new) 274 if (defkey && new)
247 __ieee80211_set_default_key(sdata, new->conf.keyidx); 275 __ieee80211_set_default_key(sdata, new->conf.keyidx);
276 if (defmgmtkey && new)
277 __ieee80211_set_default_mgmt_key(sdata,
278 new->conf.keyidx);
248 } 279 }
249 280
250 if (old) { 281 if (old) {
@@ -263,7 +294,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
263{ 294{
264 struct ieee80211_key *key; 295 struct ieee80211_key *key;
265 296
266 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS); 297 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
267 298
268 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); 299 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
269 if (!key) 300 if (!key)
@@ -292,6 +323,10 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
292 key->conf.iv_len = CCMP_HDR_LEN; 323 key->conf.iv_len = CCMP_HDR_LEN;
293 key->conf.icv_len = CCMP_MIC_LEN; 324 key->conf.icv_len = CCMP_MIC_LEN;
294 break; 325 break;
326 case ALG_AES_CMAC:
327 key->conf.iv_len = 0;
328 key->conf.icv_len = sizeof(struct ieee80211_mmie);
329 break;
295 } 330 }
296 memcpy(key->conf.key, key_data, key_len); 331 memcpy(key->conf.key, key_data, key_len);
297 INIT_LIST_HEAD(&key->list); 332 INIT_LIST_HEAD(&key->list);
@@ -309,6 +344,19 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
309 } 344 }
310 } 345 }
311 346
347 if (alg == ALG_AES_CMAC) {
348 /*
349 * Initialize AES key state here as an optimization so that
350 * it does not need to be initialized for every packet.
351 */
352 key->u.aes_cmac.tfm =
353 ieee80211_aes_cmac_key_setup(key_data);
354 if (!key->u.aes_cmac.tfm) {
355 kfree(key);
356 return NULL;
357 }
358 }
359
312 return key; 360 return key;
313} 361}
314 362
@@ -462,6 +510,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
462 510
463 if (key->conf.alg == ALG_CCMP) 511 if (key->conf.alg == ALG_CCMP)
464 ieee80211_aes_key_free(key->u.ccmp.tfm); 512 ieee80211_aes_key_free(key->u.ccmp.tfm);
513 if (key->conf.alg == ALG_AES_CMAC)
514 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
465 ieee80211_debugfs_key_remove(key); 515 ieee80211_debugfs_key_remove(key);
466 516
467 kfree(key); 517 kfree(key);
@@ -484,6 +534,7 @@ static void __ieee80211_key_todo(void)
484 list_del_init(&key->todo); 534 list_del_init(&key->todo);
485 todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS | 535 todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS |
486 KEY_FLAG_TODO_DEFKEY | 536 KEY_FLAG_TODO_DEFKEY |
537 KEY_FLAG_TODO_DEFMGMTKEY |
487 KEY_FLAG_TODO_HWACCEL_ADD | 538 KEY_FLAG_TODO_HWACCEL_ADD |
488 KEY_FLAG_TODO_HWACCEL_REMOVE | 539 KEY_FLAG_TODO_HWACCEL_REMOVE |
489 KEY_FLAG_TODO_DELETE); 540 KEY_FLAG_TODO_DELETE);
@@ -501,6 +552,11 @@ static void __ieee80211_key_todo(void)
501 ieee80211_debugfs_key_add_default(key->sdata); 552 ieee80211_debugfs_key_add_default(key->sdata);
502 work_done = true; 553 work_done = true;
503 } 554 }
555 if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) {
556 ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
557 ieee80211_debugfs_key_add_mgmt_default(key->sdata);
558 work_done = true;
559 }
504 if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) { 560 if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) {
505 ieee80211_key_enable_hw_accel(key); 561 ieee80211_key_enable_hw_accel(key);
506 work_done = true; 562 work_done = true;
@@ -536,6 +592,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
536 ieee80211_key_lock(); 592 ieee80211_key_lock();
537 593
538 ieee80211_debugfs_key_remove_default(sdata); 594 ieee80211_debugfs_key_remove_default(sdata);
595 ieee80211_debugfs_key_remove_mgmt_default(sdata);
539 596
540 spin_lock_irqsave(&sdata->local->key_lock, flags); 597 spin_lock_irqsave(&sdata->local->key_lock, flags);
541 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 598 list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 425816e0996..215d3ef42a4 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -46,6 +46,8 @@ struct sta_info;
46 * acceleration. 46 * acceleration.
47 * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated. 47 * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated.
48 * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs. 48 * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs.
49 * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs
50 * to be updated.
49 */ 51 */
50enum ieee80211_internal_key_flags { 52enum ieee80211_internal_key_flags {
51 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), 53 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
@@ -54,6 +56,7 @@ enum ieee80211_internal_key_flags {
54 KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3), 56 KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3),
55 KEY_FLAG_TODO_DEFKEY = BIT(4), 57 KEY_FLAG_TODO_DEFKEY = BIT(4),
56 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), 58 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
57}; 60};
58 61
59struct tkip_ctx { 62struct tkip_ctx {
@@ -96,6 +99,16 @@ struct ieee80211_key {
96 u8 tx_crypto_buf[6 * AES_BLOCK_LEN]; 99 u8 tx_crypto_buf[6 * AES_BLOCK_LEN];
97 u8 rx_crypto_buf[6 * AES_BLOCK_LEN]; 100 u8 rx_crypto_buf[6 * AES_BLOCK_LEN];
98 } ccmp; 101 } ccmp;
102 struct {
103 u8 tx_pn[6];
104 u8 rx_pn[6];
105 struct crypto_cipher *tfm;
106 u32 replays; /* dot11RSNAStatsCMACReplays */
107 u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
108 /* scratch buffers for virt_to_page() (crypto API) */
109 u8 tx_crypto_buf[2 * AES_BLOCK_LEN];
110 u8 rx_crypto_buf[2 * AES_BLOCK_LEN];
111 } aes_cmac;
99 } u; 112 } u;
100 113
101 /* number of times this key has been used */ 114 /* number of times this key has been used */
@@ -114,6 +127,7 @@ struct ieee80211_key {
114 struct dentry *tx_spec; 127 struct dentry *tx_spec;
115 struct dentry *rx_spec; 128 struct dentry *rx_spec;
116 struct dentry *replays; 129 struct dentry *replays;
130 struct dentry *icverrors;
117 struct dentry *key; 131 struct dentry *key;
118 struct dentry *ifindex; 132 struct dentry *ifindex;
119 int cnt; 133 int cnt;
@@ -140,6 +154,8 @@ void ieee80211_key_link(struct ieee80211_key *key,
140 struct sta_info *sta); 154 struct sta_info *sta);
141void ieee80211_key_free(struct ieee80211_key *key); 155void ieee80211_key_free(struct ieee80211_key *key);
142void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 156void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
157void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
158 int idx);
143void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); 159void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
144void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 160void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
145void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); 161void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 24b14363d6e..5667f4e8067 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -168,7 +168,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
168 return 0; 168 return 0;
169 169
170 memset(&conf, 0, sizeof(conf)); 170 memset(&conf, 0, sizeof(conf));
171 conf.changed = changed;
172 171
173 if (sdata->vif.type == NL80211_IFTYPE_STATION || 172 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
174 sdata->vif.type == NL80211_IFTYPE_ADHOC) 173 sdata->vif.type == NL80211_IFTYPE_ADHOC)
@@ -176,16 +175,59 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
176 else if (sdata->vif.type == NL80211_IFTYPE_AP) 175 else if (sdata->vif.type == NL80211_IFTYPE_AP)
177 conf.bssid = sdata->dev->dev_addr; 176 conf.bssid = sdata->dev->dev_addr;
178 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 177 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
179 u8 zero[ETH_ALEN] = { 0 }; 178 static const u8 zero[ETH_ALEN] = { 0 };
180 conf.bssid = zero; 179 conf.bssid = zero;
181 } else { 180 } else {
182 WARN_ON(1); 181 WARN_ON(1);
183 return -EINVAL; 182 return -EINVAL;
184 } 183 }
185 184
185 switch (sdata->vif.type) {
186 case NL80211_IFTYPE_AP:
187 case NL80211_IFTYPE_ADHOC:
188 case NL80211_IFTYPE_MESH_POINT:
189 break;
190 default:
191 /* do not warn to simplify caller in scan.c */
192 changed &= ~IEEE80211_IFCC_BEACON_ENABLED;
193 if (WARN_ON(changed & IEEE80211_IFCC_BEACON))
194 return -EINVAL;
195 changed &= ~IEEE80211_IFCC_BEACON;
196 break;
197 }
198
199 if (changed & IEEE80211_IFCC_BEACON_ENABLED) {
200 if (local->sw_scanning) {
201 conf.enable_beacon = false;
202 } else {
203 /*
204 * Beacon should be enabled, but AP mode must
205 * check whether there is a beacon configured.
206 */
207 switch (sdata->vif.type) {
208 case NL80211_IFTYPE_AP:
209 conf.enable_beacon =
210 !!rcu_dereference(sdata->u.ap.beacon);
211 break;
212 case NL80211_IFTYPE_ADHOC:
213 conf.enable_beacon = !!sdata->u.sta.probe_resp;
214 break;
215 case NL80211_IFTYPE_MESH_POINT:
216 conf.enable_beacon = true;
217 break;
218 default:
219 /* not reached */
220 WARN_ON(1);
221 break;
222 }
223 }
224 }
225
186 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID))) 226 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
187 return -EINVAL; 227 return -EINVAL;
188 228
229 conf.changed = changed;
230
189 return local->ops->config_interface(local_to_hw(local), 231 return local->ops->config_interface(local_to_hw(local),
190 &sdata->vif, &conf); 232 &sdata->vif, &conf);
191} 233}
@@ -208,26 +250,22 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
208 } 250 }
209 251
210 if (chan != local->hw.conf.channel || 252 if (chan != local->hw.conf.channel ||
211 channel_type != local->hw.conf.ht.channel_type) { 253 channel_type != local->hw.conf.channel_type) {
212 local->hw.conf.channel = chan; 254 local->hw.conf.channel = chan;
213 local->hw.conf.ht.channel_type = channel_type; 255 local->hw.conf.channel_type = channel_type;
214 switch (channel_type) {
215 case NL80211_CHAN_NO_HT:
216 local->hw.conf.ht.enabled = false;
217 break;
218 case NL80211_CHAN_HT20:
219 case NL80211_CHAN_HT40MINUS:
220 case NL80211_CHAN_HT40PLUS:
221 local->hw.conf.ht.enabled = true;
222 break;
223 }
224 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 256 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
225 } 257 }
226 258
227 if (!local->hw.conf.power_level) 259 if (local->sw_scanning)
228 power = chan->max_power; 260 power = chan->max_power;
229 else 261 else
230 power = min(chan->max_power, local->hw.conf.power_level); 262 power = local->power_constr_level ?
263 (chan->max_power - local->power_constr_level) :
264 chan->max_power;
265
266 if (local->user_power_level)
267 power = min(power, local->user_power_level);
268
231 if (local->hw.conf.power_level != power) { 269 if (local->hw.conf.power_level != power) {
232 changed |= IEEE80211_CONF_CHANGE_POWER; 270 changed |= IEEE80211_CONF_CHANGE_POWER;
233 local->hw.conf.power_level = power; 271 local->hw.conf.power_level = power;
@@ -695,6 +733,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
695 return NULL; 733 return NULL;
696 734
697 wiphy->privid = mac80211_wiphy_privid; 735 wiphy->privid = mac80211_wiphy_privid;
736 wiphy->max_scan_ssids = 4;
737 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
738 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
739 sizeof(struct cfg80211_bss);
698 740
699 local = wiphy_priv(wiphy); 741 local = wiphy_priv(wiphy);
700 local->hw.wiphy = wiphy; 742 local->hw.wiphy = wiphy;
@@ -722,6 +764,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
722 local->hw.conf.radio_enabled = true; 764 local->hw.conf.radio_enabled = true;
723 765
724 INIT_LIST_HEAD(&local->interfaces); 766 INIT_LIST_HEAD(&local->interfaces);
767 mutex_init(&local->iflist_mtx);
725 768
726 spin_lock_init(&local->key_lock); 769 spin_lock_init(&local->key_lock);
727 770
@@ -754,6 +797,23 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
754} 797}
755EXPORT_SYMBOL(ieee80211_alloc_hw); 798EXPORT_SYMBOL(ieee80211_alloc_hw);
756 799
800static const struct net_device_ops ieee80211_master_ops = {
801 .ndo_start_xmit = ieee80211_master_start_xmit,
802 .ndo_open = ieee80211_master_open,
803 .ndo_stop = ieee80211_master_stop,
804 .ndo_set_multicast_list = ieee80211_master_set_multicast_list,
805 .ndo_select_queue = ieee80211_select_queue,
806};
807
808static void ieee80211_master_setup(struct net_device *mdev)
809{
810 mdev->type = ARPHRD_IEEE80211;
811 mdev->netdev_ops = &ieee80211_master_ops;
812 mdev->header_ops = &ieee80211_header_ops;
813 mdev->tx_queue_len = 1000;
814 mdev->addr_len = ETH_ALEN;
815}
816
757int ieee80211_register_hw(struct ieee80211_hw *hw) 817int ieee80211_register_hw(struct ieee80211_hw *hw)
758{ 818{
759 struct ieee80211_local *local = hw_to_local(hw); 819 struct ieee80211_local *local = hw_to_local(hw);
@@ -761,25 +821,33 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
761 enum ieee80211_band band; 821 enum ieee80211_band band;
762 struct net_device *mdev; 822 struct net_device *mdev;
763 struct ieee80211_master_priv *mpriv; 823 struct ieee80211_master_priv *mpriv;
824 int channels, i, j;
764 825
765 /* 826 /*
766 * generic code guarantees at least one band, 827 * generic code guarantees at least one band,
767 * set this very early because much code assumes 828 * set this very early because much code assumes
768 * that hw.conf.channel is assigned 829 * that hw.conf.channel is assigned
769 */ 830 */
831 channels = 0;
770 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 832 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
771 struct ieee80211_supported_band *sband; 833 struct ieee80211_supported_band *sband;
772 834
773 sband = local->hw.wiphy->bands[band]; 835 sband = local->hw.wiphy->bands[band];
774 if (sband) { 836 if (sband && !local->oper_channel) {
775 /* init channel we're on */ 837 /* init channel we're on */
776 local->hw.conf.channel = 838 local->hw.conf.channel =
777 local->oper_channel = 839 local->oper_channel =
778 local->scan_channel = &sband->channels[0]; 840 local->scan_channel = &sband->channels[0];
779 break;
780 } 841 }
842 if (sband)
843 channels += sband->n_channels;
781 } 844 }
782 845
846 local->int_scan_req.n_channels = channels;
847 local->int_scan_req.channels = kzalloc(sizeof(void *) * channels, GFP_KERNEL);
848 if (!local->int_scan_req.channels)
849 return -ENOMEM;
850
783 /* if low-level driver supports AP, we also support VLAN */ 851 /* if low-level driver supports AP, we also support VLAN */
784 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) 852 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
785 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 853 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
@@ -789,7 +857,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
789 857
790 result = wiphy_register(local->hw.wiphy); 858 result = wiphy_register(local->hw.wiphy);
791 if (result < 0) 859 if (result < 0)
792 return result; 860 goto fail_wiphy_register;
793 861
794 /* 862 /*
795 * We use the number of queues for feature tests (QoS, HT) internally 863 * We use the number of queues for feature tests (QoS, HT) internally
@@ -803,7 +871,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
803 hw->ampdu_queues = 0; 871 hw->ampdu_queues = 0;
804 872
805 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), 873 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
806 "wmaster%d", ether_setup, 874 "wmaster%d", ieee80211_master_setup,
807 ieee80211_num_queues(hw)); 875 ieee80211_num_queues(hw));
808 if (!mdev) 876 if (!mdev)
809 goto fail_mdev_alloc; 877 goto fail_mdev_alloc;
@@ -812,17 +880,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
812 mpriv->local = local; 880 mpriv->local = local;
813 local->mdev = mdev; 881 local->mdev = mdev;
814 882
815 ieee80211_rx_bss_list_init(local);
816
817 mdev->hard_start_xmit = ieee80211_master_start_xmit;
818 mdev->open = ieee80211_master_open;
819 mdev->stop = ieee80211_master_stop;
820 mdev->type = ARPHRD_IEEE80211;
821 mdev->header_ops = &ieee80211_header_ops;
822 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
823
824 local->hw.workqueue = 883 local->hw.workqueue =
825 create_freezeable_workqueue(wiphy_name(local->hw.wiphy)); 884 create_singlethread_workqueue(wiphy_name(local->hw.wiphy));
826 if (!local->hw.workqueue) { 885 if (!local->hw.workqueue) {
827 result = -ENOMEM; 886 result = -ENOMEM;
828 goto fail_workqueue; 887 goto fail_workqueue;
@@ -846,15 +905,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
846 905
847 local->hw.conf.listen_interval = local->hw.max_listen_interval; 906 local->hw.conf.listen_interval = local->hw.max_listen_interval;
848 907
849 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
850 IEEE80211_HW_SIGNAL_DB |
851 IEEE80211_HW_SIGNAL_DBM) ?
852 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
853 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
854 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
855 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
856 local->wstats_flags |= IW_QUAL_DBM;
857
858 result = sta_info_start(local); 908 result = sta_info_start(local);
859 if (result < 0) 909 if (result < 0)
860 goto fail_sta_info; 910 goto fail_sta_info;
@@ -887,8 +937,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
887 goto fail_wep; 937 goto fail_wep;
888 } 938 }
889 939
890 local->mdev->select_queue = ieee80211_select_queue;
891
892 /* add one default STA interface if supported */ 940 /* add one default STA interface if supported */
893 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { 941 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
894 result = ieee80211_if_add(local, "wlan%d", NULL, 942 result = ieee80211_if_add(local, "wlan%d", NULL,
@@ -902,6 +950,20 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
902 950
903 ieee80211_led_init(local); 951 ieee80211_led_init(local);
904 952
953 /* alloc internal scan request */
954 i = 0;
955 local->int_scan_req.ssids = &local->scan_ssid;
956 local->int_scan_req.n_ssids = 1;
957 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
958 if (!hw->wiphy->bands[band])
959 continue;
960 for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
961 local->int_scan_req.channels[i] =
962 &hw->wiphy->bands[band]->channels[j];
963 i++;
964 }
965 }
966
905 return 0; 967 return 0;
906 968
907fail_wep: 969fail_wep:
@@ -920,6 +982,8 @@ fail_workqueue:
920 free_netdev(local->mdev); 982 free_netdev(local->mdev);
921fail_mdev_alloc: 983fail_mdev_alloc:
922 wiphy_unregister(local->hw.wiphy); 984 wiphy_unregister(local->hw.wiphy);
985fail_wiphy_register:
986 kfree(local->int_scan_req.channels);
923 return result; 987 return result;
924} 988}
925EXPORT_SYMBOL(ieee80211_register_hw); 989EXPORT_SYMBOL(ieee80211_register_hw);
@@ -947,7 +1011,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
947 1011
948 rtnl_unlock(); 1012 rtnl_unlock();
949 1013
950 ieee80211_rx_bss_list_deinit(local);
951 ieee80211_clear_tx_pending(local); 1014 ieee80211_clear_tx_pending(local);
952 sta_info_stop(local); 1015 sta_info_stop(local);
953 rate_control_deinitialize(local); 1016 rate_control_deinitialize(local);
@@ -965,6 +1028,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
965 ieee80211_wep_free(local); 1028 ieee80211_wep_free(local);
966 ieee80211_led_exit(local); 1029 ieee80211_led_exit(local);
967 free_netdev(local->mdev); 1030 free_netdev(local->mdev);
1031 kfree(local->int_scan_req.channels);
968} 1032}
969EXPORT_SYMBOL(ieee80211_unregister_hw); 1033EXPORT_SYMBOL(ieee80211_unregister_hw);
970 1034
@@ -972,6 +1036,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
972{ 1036{
973 struct ieee80211_local *local = hw_to_local(hw); 1037 struct ieee80211_local *local = hw_to_local(hw);
974 1038
1039 mutex_destroy(&local->iflist_mtx);
1040
975 wiphy_free(local->hw.wiphy); 1041 wiphy_free(local->hw.wiphy);
976} 1042}
977EXPORT_SYMBOL(ieee80211_free_hw); 1043EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 82f568e9436..9a3e5de0410 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -275,16 +275,6 @@ u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_t
275 & tbl->hash_mask; 275 & tbl->hash_mask;
276} 276}
277 277
278u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len)
279{
280 if (!mesh_id_len)
281 return 1;
282 else if (mesh_id_len == 1)
283 return (u8) mesh_id[0];
284 else
285 return (u8) (mesh_id[0] + 2 * mesh_id[1]);
286}
287
288struct mesh_table *mesh_table_alloc(int size_order) 278struct mesh_table *mesh_table_alloc(int size_order)
289{ 279{
290 int i; 280 int i;
@@ -442,7 +432,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
442 432
443 ifmsh->housekeeping = true; 433 ifmsh->housekeeping = true;
444 queue_work(local->hw.workqueue, &ifmsh->work); 434 queue_work(local->hw.workqueue, &ifmsh->work);
445 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 435 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON |
436 IEEE80211_IFCC_BEACON_ENABLED);
446} 437}
447 438
448void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 439void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -476,7 +467,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
476 struct ieee80211_local *local = sdata->local; 467 struct ieee80211_local *local = sdata->local;
477 struct ieee802_11_elems elems; 468 struct ieee802_11_elems elems;
478 struct ieee80211_channel *channel; 469 struct ieee80211_channel *channel;
479 u64 supp_rates = 0; 470 u32 supp_rates = 0;
480 size_t baselen; 471 size_t baselen;
481 int freq; 472 int freq;
482 enum ieee80211_band band = rx_status->band; 473 enum ieee80211_band band = rx_status->band;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c197ab545e5..d891d7ddccd 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -24,15 +24,15 @@
24 * 24 *
25 * 25 *
26 * 26 *
27 * @MESH_PATH_ACTIVE: the mesh path is can be used for forwarding 27 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
28 * @MESH_PATH_RESOLVED: the discovery process is running for this mesh path 28 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
29 * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence 29 * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence
30 * number 30 * number
31 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be 31 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
32 * modified 32 * modified
33 * @MESH_PATH_RESOLVED: the mesh path can has been resolved 33 * @MESH_PATH_RESOLVED: the mesh path can has been resolved
34 * 34 *
35 * MESH_PATH_RESOLVED and MESH_PATH_DELETE are used by the mesh path timer to 35 * MESH_PATH_RESOLVED is used by the mesh path timer to
36 * decide when to stop or cancel the mesh path discovery. 36 * decide when to stop or cancel the mesh path discovery.
37 */ 37 */
38enum mesh_path_flags { 38enum mesh_path_flags {
@@ -196,7 +196,6 @@ struct mesh_rmc {
196 196
197/* Public interfaces */ 197/* Public interfaces */
198/* Various */ 198/* Various */
199u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len);
200int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); 199int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
201int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 200int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
202 struct ieee80211_sub_if_data *sdata); 201 struct ieee80211_sub_if_data *sdata);
@@ -236,14 +235,13 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
236 struct ieee80211_mgmt *mgmt, size_t len); 235 struct ieee80211_mgmt *mgmt, size_t len);
237int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 236int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
238/* Mesh plinks */ 237/* Mesh plinks */
239void mesh_neighbour_update(u8 *hw_addr, u64 rates, 238void mesh_neighbour_update(u8 *hw_addr, u32 rates,
240 struct ieee80211_sub_if_data *sdata, bool add); 239 struct ieee80211_sub_if_data *sdata, bool add);
241bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 240bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
242void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 241void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
243void mesh_plink_broken(struct sta_info *sta); 242void mesh_plink_broken(struct sta_info *sta);
244void mesh_plink_deactivate(struct sta_info *sta); 243void mesh_plink_deactivate(struct sta_info *sta);
245int mesh_plink_open(struct sta_info *sta); 244int mesh_plink_open(struct sta_info *sta);
246int mesh_plink_close(struct sta_info *sta);
247void mesh_plink_block(struct sta_info *sta); 245void mesh_plink_block(struct sta_info *sta);
248void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, 246void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
249 struct ieee80211_mgmt *mgmt, size_t len, 247 struct ieee80211_mgmt *mgmt, size_t len,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 71fe6096123..60b35accda9 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -58,7 +58,6 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
58#define PERR_IE_DST_ADDR(x) (x + 2) 58#define PERR_IE_DST_ADDR(x) (x + 2)
59#define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0); 59#define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
60 60
61#define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000))
62#define MSEC_TO_TU(x) (x*1000/1024) 61#define MSEC_TO_TU(x) (x*1000/1024)
63#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0) 62#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
64#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) 63#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
@@ -149,7 +148,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
149 pos += ETH_ALEN; 148 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4); 149 memcpy(pos, &dst_dsn, 4);
151 150
152 ieee80211_tx_skb(sdata, skb, 0); 151 ieee80211_tx_skb(sdata, skb, 1);
153 return 0; 152 return 0;
154} 153}
155 154
@@ -198,7 +197,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
198 pos += ETH_ALEN; 197 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4); 198 memcpy(pos, &dst_dsn, 4);
200 199
201 ieee80211_tx_skb(sdata, skb, 0); 200 ieee80211_tx_skb(sdata, skb, 1);
202 return 0; 201 return 0;
203} 202}
204 203
@@ -759,7 +758,7 @@ enddiscovery:
759} 758}
760 759
761/** 760/**
762 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame 761 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
763 * 762 *
764 * @skb: 802.11 frame to be sent 763 * @skb: 802.11 frame to be sent
765 * @sdata: network subif the frame will be sent through 764 * @sdata: network subif the frame will be sent through
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 1159bdb4119..a8bbdeca013 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -93,7 +93,7 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
93 * on it in the lifecycle management section! 93 * on it in the lifecycle management section!
94 */ 94 */
95static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, 95static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
96 u8 *hw_addr, u64 rates) 96 u8 *hw_addr, u32 rates)
97{ 97{
98 struct ieee80211_local *local = sdata->local; 98 struct ieee80211_local *local = sdata->local;
99 struct sta_info *sta; 99 struct sta_info *sta;
@@ -218,11 +218,11 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
218 memcpy(pos, &reason, 2); 218 memcpy(pos, &reason, 2);
219 } 219 }
220 220
221 ieee80211_tx_skb(sdata, skb, 0); 221 ieee80211_tx_skb(sdata, skb, 1);
222 return 0; 222 return 0;
223} 223}
224 224
225void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata, 225void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata,
226 bool peer_accepting_plinks) 226 bool peer_accepting_plinks)
227{ 227{
228 struct ieee80211_local *local = sdata->local; 228 struct ieee80211_local *local = sdata->local;
@@ -361,36 +361,6 @@ void mesh_plink_block(struct sta_info *sta)
361 spin_unlock_bh(&sta->lock); 361 spin_unlock_bh(&sta->lock);
362} 362}
363 363
364int mesh_plink_close(struct sta_info *sta)
365{
366 struct ieee80211_sub_if_data *sdata = sta->sdata;
367 __le16 llid, plid, reason;
368
369 mpl_dbg("Mesh plink: closing link with %pM\n", sta->sta.addr);
370 spin_lock_bh(&sta->lock);
371 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
372 reason = sta->reason;
373
374 if (sta->plink_state == PLINK_LISTEN ||
375 sta->plink_state == PLINK_BLOCKED) {
376 mesh_plink_fsm_restart(sta);
377 spin_unlock_bh(&sta->lock);
378 return 0;
379 } else if (sta->plink_state == PLINK_ESTAB) {
380 __mesh_plink_deactivate(sta);
381 /* The timer should not be running */
382 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
383 } else if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)))
384 sta->ignore_plink_timer = true;
385
386 sta->plink_state = PLINK_HOLDING;
387 llid = sta->llid;
388 plid = sta->plid;
389 spin_unlock_bh(&sta->lock);
390 mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->sta.addr, llid,
391 plid, reason);
392 return 0;
393}
394 364
395void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, 365void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
396 size_t len, struct ieee80211_rx_status *rx_status) 366 size_t len, struct ieee80211_rx_status *rx_status)
@@ -477,7 +447,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
477 spin_lock_bh(&sta->lock); 447 spin_lock_bh(&sta->lock);
478 } else if (!sta) { 448 } else if (!sta) {
479 /* ftype == PLINK_OPEN */ 449 /* ftype == PLINK_OPEN */
480 u64 rates; 450 u32 rates;
481 if (!mesh_plink_free_count(sdata)) { 451 if (!mesh_plink_free_count(sdata)) {
482 mpl_dbg("Mesh plink error: no more free plinks\n"); 452 mpl_dbg("Mesh plink error: no more free plinks\n");
483 rcu_read_unlock(); 453 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2b890af01ba..fbb766afe59 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * BSS client mode implementation 2 * BSS client mode implementation
3 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> 3 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
4 * Copyright 2004, Instant802 Networks, Inc. 4 * Copyright 2004, Instant802 Networks, Inc.
5 * Copyright 2005, Devicescape Software, Inc. 5 * Copyright 2005, Devicescape Software, Inc.
6 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
@@ -55,10 +55,10 @@ static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
55{ 55{
56 u8 *end, *pos; 56 u8 *end, *pos;
57 57
58 pos = bss->ies; 58 pos = bss->cbss.information_elements;
59 if (pos == NULL) 59 if (pos == NULL)
60 return NULL; 60 return NULL;
61 end = pos + bss->ies_len; 61 end = pos + bss->cbss.len_information_elements;
62 62
63 while (pos + 1 < end) { 63 while (pos + 1 < end) {
64 if (pos + 2 + pos[1] > end) 64 if (pos + 2 + pos[1] > end)
@@ -73,7 +73,7 @@ static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
73 73
74static int ieee80211_compatible_rates(struct ieee80211_bss *bss, 74static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
75 struct ieee80211_supported_band *sband, 75 struct ieee80211_supported_band *sband,
76 u64 *rates) 76 u32 *rates)
77{ 77{
78 int i, j, count; 78 int i, j, count;
79 *rates = 0; 79 *rates = 0;
@@ -93,14 +93,14 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
93} 93}
94 94
95/* also used by mesh code */ 95/* also used by mesh code */
96u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 96u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
97 struct ieee802_11_elems *elems, 97 struct ieee802_11_elems *elems,
98 enum ieee80211_band band) 98 enum ieee80211_band band)
99{ 99{
100 struct ieee80211_supported_band *sband; 100 struct ieee80211_supported_band *sband;
101 struct ieee80211_rate *bitrates; 101 struct ieee80211_rate *bitrates;
102 size_t num_rates; 102 size_t num_rates;
103 u64 supp_rates; 103 u32 supp_rates;
104 int i, j; 104 int i, j;
105 sband = local->hw.wiphy->bands[band]; 105 sband = local->hw.wiphy->bands[band];
106 106
@@ -131,6 +131,12 @@ u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
131 131
132/* frame sending functions */ 132/* frame sending functions */
133 133
134static void add_extra_ies(struct sk_buff *skb, u8 *ies, size_t ies_len)
135{
136 if (ies)
137 memcpy(skb_put(skb, ies_len), ies, ies_len);
138}
139
134/* also used by scanning code */ 140/* also used by scanning code */
135void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 141void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
136 u8 *ssid, size_t ssid_len) 142 u8 *ssid, size_t ssid_len)
@@ -142,7 +148,8 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
142 u8 *pos, *supp_rates, *esupp_rates = NULL; 148 u8 *pos, *supp_rates, *esupp_rates = NULL;
143 int i; 149 int i;
144 150
145 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); 151 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
152 sdata->u.sta.ie_probereq_len);
146 if (!skb) { 153 if (!skb) {
147 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 154 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
148 "request\n", sdata->dev->name); 155 "request\n", sdata->dev->name);
@@ -189,6 +196,9 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
189 *pos = rate->bitrate / 5; 196 *pos = rate->bitrate / 5;
190 } 197 }
191 198
199 add_extra_ies(skb, sdata->u.sta.ie_probereq,
200 sdata->u.sta.ie_probereq_len);
201
192 ieee80211_tx_skb(sdata, skb, 0); 202 ieee80211_tx_skb(sdata, skb, 0);
193} 203}
194 204
@@ -202,7 +212,8 @@ static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
202 struct ieee80211_mgmt *mgmt; 212 struct ieee80211_mgmt *mgmt;
203 213
204 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 214 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
205 sizeof(*mgmt) + 6 + extra_len); 215 sizeof(*mgmt) + 6 + extra_len +
216 sdata->u.sta.ie_auth_len);
206 if (!skb) { 217 if (!skb) {
207 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 218 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
208 "frame\n", sdata->dev->name); 219 "frame\n", sdata->dev->name);
@@ -225,6 +236,7 @@ static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
225 mgmt->u.auth.status_code = cpu_to_le16(0); 236 mgmt->u.auth.status_code = cpu_to_le16(0);
226 if (extra) 237 if (extra)
227 memcpy(skb_put(skb, extra_len), extra, extra_len); 238 memcpy(skb_put(skb, extra_len), extra, extra_len);
239 add_extra_ies(skb, sdata->u.sta.ie_auth, sdata->u.sta.ie_auth_len);
228 240
229 ieee80211_tx_skb(sdata, skb, encrypt); 241 ieee80211_tx_skb(sdata, skb, encrypt);
230} 242}
@@ -235,17 +247,26 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
235 struct ieee80211_local *local = sdata->local; 247 struct ieee80211_local *local = sdata->local;
236 struct sk_buff *skb; 248 struct sk_buff *skb;
237 struct ieee80211_mgmt *mgmt; 249 struct ieee80211_mgmt *mgmt;
238 u8 *pos, *ies, *ht_ie; 250 u8 *pos, *ies, *ht_ie, *e_ies;
239 int i, len, count, rates_len, supp_rates_len; 251 int i, len, count, rates_len, supp_rates_len;
240 u16 capab; 252 u16 capab;
241 struct ieee80211_bss *bss; 253 struct ieee80211_bss *bss;
242 int wmm = 0; 254 int wmm = 0;
243 struct ieee80211_supported_band *sband; 255 struct ieee80211_supported_band *sband;
244 u64 rates = 0; 256 u32 rates = 0;
257 size_t e_ies_len;
258
259 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) {
260 e_ies = sdata->u.sta.ie_reassocreq;
261 e_ies_len = sdata->u.sta.ie_reassocreq_len;
262 } else {
263 e_ies = sdata->u.sta.ie_assocreq;
264 e_ies_len = sdata->u.sta.ie_assocreq_len;
265 }
245 266
246 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 267 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
247 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + 268 sizeof(*mgmt) + 200 + ifsta->extra_ie_len +
248 ifsta->ssid_len); 269 ifsta->ssid_len + e_ies_len);
249 if (!skb) { 270 if (!skb) {
250 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 271 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
251 "frame\n", sdata->dev->name); 272 "frame\n", sdata->dev->name);
@@ -268,7 +289,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
268 local->hw.conf.channel->center_freq, 289 local->hw.conf.channel->center_freq,
269 ifsta->ssid, ifsta->ssid_len); 290 ifsta->ssid, ifsta->ssid_len);
270 if (bss) { 291 if (bss) {
271 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 292 if (bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
272 capab |= WLAN_CAPABILITY_PRIVACY; 293 capab |= WLAN_CAPABILITY_PRIVACY;
273 if (bss->wmm_used) 294 if (bss->wmm_used)
274 wmm = 1; 295 wmm = 1;
@@ -279,7 +300,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
279 * b-only mode) */ 300 * b-only mode) */
280 rates_len = ieee80211_compatible_rates(bss, sband, &rates); 301 rates_len = ieee80211_compatible_rates(bss, sband, &rates);
281 302
282 if ((bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && 303 if ((bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
283 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) 304 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
284 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; 305 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
285 306
@@ -391,10 +412,17 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
391 } 412 }
392 413
393 /* wmm support is a must to HT */ 414 /* wmm support is a must to HT */
415 /*
416 * IEEE802.11n does not allow TKIP/WEP as pairwise
417 * ciphers in HT mode. We still associate in non-ht
418 * mode (11a/b/g) if any one of these ciphers is
419 * configured as pairwise.
420 */
394 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && 421 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
395 sband->ht_cap.ht_supported && 422 sband->ht_cap.ht_supported &&
396 (ht_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_INFORMATION)) && 423 (ht_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_INFORMATION)) &&
397 ht_ie[1] >= sizeof(struct ieee80211_ht_info)) { 424 ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
425 (!(ifsta->flags & IEEE80211_STA_TKIP_WEP_USED))) {
398 struct ieee80211_ht_info *ht_info = 426 struct ieee80211_ht_info *ht_info =
399 (struct ieee80211_ht_info *)(ht_ie + 2); 427 (struct ieee80211_ht_info *)(ht_ie + 2);
400 u16 cap = sband->ht_cap.cap; 428 u16 cap = sband->ht_cap.cap;
@@ -429,6 +457,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
429 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 457 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
430 } 458 }
431 459
460 add_extra_ies(skb, e_ies, e_ies_len);
461
432 kfree(ifsta->assocreq_ies); 462 kfree(ifsta->assocreq_ies);
433 ifsta->assocreq_ies_len = (skb->data + skb->len) - ies; 463 ifsta->assocreq_ies_len = (skb->data + skb->len) - ies;
434 ifsta->assocreq_ies = kmalloc(ifsta->assocreq_ies_len, GFP_KERNEL); 464 ifsta->assocreq_ies = kmalloc(ifsta->assocreq_ies_len, GFP_KERNEL);
@@ -446,8 +476,19 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
446 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 476 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
447 struct sk_buff *skb; 477 struct sk_buff *skb;
448 struct ieee80211_mgmt *mgmt; 478 struct ieee80211_mgmt *mgmt;
479 u8 *ies;
480 size_t ies_len;
481
482 if (stype == IEEE80211_STYPE_DEAUTH) {
483 ies = sdata->u.sta.ie_deauth;
484 ies_len = sdata->u.sta.ie_deauth_len;
485 } else {
486 ies = sdata->u.sta.ie_disassoc;
487 ies_len = sdata->u.sta.ie_disassoc_len;
488 }
449 489
450 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 490 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) +
491 ies_len);
451 if (!skb) { 492 if (!skb) {
452 printk(KERN_DEBUG "%s: failed to allocate buffer for " 493 printk(KERN_DEBUG "%s: failed to allocate buffer for "
453 "deauth/disassoc frame\n", sdata->dev->name); 494 "deauth/disassoc frame\n", sdata->dev->name);
@@ -465,19 +506,55 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
465 /* u.deauth.reason_code == u.disassoc.reason_code */ 506 /* u.deauth.reason_code == u.disassoc.reason_code */
466 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 507 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
467 508
509 add_extra_ies(skb, ies, ies_len);
510
511 ieee80211_tx_skb(sdata, skb, ifsta->flags & IEEE80211_STA_MFP_ENABLED);
512}
513
514void ieee80211_send_pspoll(struct ieee80211_local *local,
515 struct ieee80211_sub_if_data *sdata)
516{
517 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
518 struct ieee80211_pspoll *pspoll;
519 struct sk_buff *skb;
520 u16 fc;
521
522 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
523 if (!skb) {
524 printk(KERN_DEBUG "%s: failed to allocate buffer for "
525 "pspoll frame\n", sdata->dev->name);
526 return;
527 }
528 skb_reserve(skb, local->hw.extra_tx_headroom);
529
530 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
531 memset(pspoll, 0, sizeof(*pspoll));
532 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
533 pspoll->frame_control = cpu_to_le16(fc);
534 pspoll->aid = cpu_to_le16(ifsta->aid);
535
536 /* aid in PS-Poll has its two MSBs each set to 1 */
537 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
538
539 memcpy(pspoll->bssid, ifsta->bssid, ETH_ALEN);
540 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
541
468 ieee80211_tx_skb(sdata, skb, 0); 542 ieee80211_tx_skb(sdata, skb, 0);
543
544 return;
469} 545}
470 546
471/* MLME */ 547/* MLME */
472static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 548static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
473 struct ieee80211_bss *bss) 549 const size_t supp_rates_len,
550 const u8 *supp_rates)
474{ 551{
475 struct ieee80211_local *local = sdata->local; 552 struct ieee80211_local *local = sdata->local;
476 int i, have_higher_than_11mbit = 0; 553 int i, have_higher_than_11mbit = 0;
477 554
478 /* cf. IEEE 802.11 9.2.12 */ 555 /* cf. IEEE 802.11 9.2.12 */
479 for (i = 0; i < bss->supp_rates_len; i++) 556 for (i = 0; i < supp_rates_len; i++)
480 if ((bss->supp_rates[i] & 0x7f) * 5 > 110) 557 if ((supp_rates[i] & 0x7f) * 5 > 110)
481 have_higher_than_11mbit = 1; 558 have_higher_than_11mbit = 1;
482 559
483 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 560 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
@@ -568,6 +645,27 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
568 } 645 }
569} 646}
570 647
648static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
649{
650 u8 mask;
651 u8 index, indexn1, indexn2;
652 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
653
654 aid &= 0x3fff;
655 index = aid / 8;
656 mask = 1 << (aid & 7);
657
658 indexn1 = tim->bitmap_ctrl & 0xfe;
659 indexn2 = elems->tim_len + indexn1 - 4;
660
661 if (index < indexn1 || index > indexn2)
662 return false;
663
664 index -= indexn1;
665
666 return !!(tim->virtual_map[index] & mask);
667}
668
571static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 669static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
572 u16 capab, bool erp_valid, u8 erp) 670 u16 capab, bool erp_valid, u8 erp)
573{ 671{
@@ -710,20 +808,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
710 bss_info_changed |= BSS_CHANGED_ASSOC; 808 bss_info_changed |= BSS_CHANGED_ASSOC;
711 ifsta->flags |= IEEE80211_STA_ASSOCIATED; 809 ifsta->flags |= IEEE80211_STA_ASSOCIATED;
712 810
713 if (sdata->vif.type != NL80211_IFTYPE_STATION)
714 return;
715
716 bss = ieee80211_rx_bss_get(local, ifsta->bssid, 811 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
717 conf->channel->center_freq, 812 conf->channel->center_freq,
718 ifsta->ssid, ifsta->ssid_len); 813 ifsta->ssid, ifsta->ssid_len);
719 if (bss) { 814 if (bss) {
720 /* set timing information */ 815 /* set timing information */
721 sdata->vif.bss_conf.beacon_int = bss->beacon_int; 816 sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval;
722 sdata->vif.bss_conf.timestamp = bss->timestamp; 817 sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
723 sdata->vif.bss_conf.dtim_period = bss->dtim_period; 818 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
724 819
725 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 820 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
726 bss->capability, bss->has_erp_value, bss->erp_value); 821 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
727 822
728 ieee80211_rx_bss_put(local, bss); 823 ieee80211_rx_bss_put(local, bss);
729 } 824 }
@@ -745,13 +840,16 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
745 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 840 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
746 841
747 if (local->powersave) { 842 if (local->powersave) {
748 if (local->dynamic_ps_timeout > 0) 843 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) &&
844 local->hw.conf.dynamic_ps_timeout > 0) {
749 mod_timer(&local->dynamic_ps_timer, jiffies + 845 mod_timer(&local->dynamic_ps_timer, jiffies +
750 msecs_to_jiffies(local->dynamic_ps_timeout)); 846 msecs_to_jiffies(
751 else { 847 local->hw.conf.dynamic_ps_timeout));
848 } else {
849 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
850 ieee80211_send_nullfunc(local, sdata, 1);
752 conf->flags |= IEEE80211_CONF_PS; 851 conf->flags |= IEEE80211_CONF_PS;
753 ieee80211_hw_config(local, 852 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
754 IEEE80211_CONF_CHANGE_PS);
755 } 853 }
756 } 854 }
757 855
@@ -770,6 +868,14 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
770 sdata->dev->name, ifsta->bssid); 868 sdata->dev->name, ifsta->bssid);
771 ifsta->state = IEEE80211_STA_MLME_DISABLED; 869 ifsta->state = IEEE80211_STA_MLME_DISABLED;
772 ieee80211_sta_send_apinfo(sdata, ifsta); 870 ieee80211_sta_send_apinfo(sdata, ifsta);
871
872 /*
873 * Most likely AP is not in the range so remove the
874 * bss information associated to the AP
875 */
876 ieee80211_rx_bss_remove(sdata, ifsta->bssid,
877 sdata->local->hw.conf.channel->center_freq,
878 ifsta->ssid, ifsta->ssid_len);
773 return; 879 return;
774 } 880 }
775 881
@@ -801,6 +907,9 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
801 sdata->dev->name, ifsta->bssid); 907 sdata->dev->name, ifsta->bssid);
802 ifsta->state = IEEE80211_STA_MLME_DISABLED; 908 ifsta->state = IEEE80211_STA_MLME_DISABLED;
803 ieee80211_sta_send_apinfo(sdata, ifsta); 909 ieee80211_sta_send_apinfo(sdata, ifsta);
910 ieee80211_rx_bss_remove(sdata, ifsta->bssid,
911 sdata->local->hw.conf.channel->center_freq,
912 ifsta->ssid, ifsta->ssid_len);
804 return; 913 return;
805 } 914 }
806 915
@@ -843,7 +952,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
843 netif_tx_stop_all_queues(sdata->dev); 952 netif_tx_stop_all_queues(sdata->dev);
844 netif_carrier_off(sdata->dev); 953 netif_carrier_off(sdata->dev);
845 954
846 ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr); 955 ieee80211_sta_tear_down_BA_sessions(sta);
847 956
848 if (self_disconnected) { 957 if (self_disconnected) {
849 if (deauth) 958 if (deauth)
@@ -863,14 +972,19 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
863 972
864 ieee80211_sta_send_apinfo(sdata, ifsta); 973 ieee80211_sta_send_apinfo(sdata, ifsta);
865 974
866 if (self_disconnected || reason == WLAN_REASON_DISASSOC_STA_HAS_LEFT) 975 if (self_disconnected || reason == WLAN_REASON_DISASSOC_STA_HAS_LEFT) {
867 ifsta->state = IEEE80211_STA_MLME_DISABLED; 976 ifsta->state = IEEE80211_STA_MLME_DISABLED;
977 ieee80211_rx_bss_remove(sdata, ifsta->bssid,
978 sdata->local->hw.conf.channel->center_freq,
979 ifsta->ssid, ifsta->ssid_len);
980 }
868 981
869 rcu_read_unlock(); 982 rcu_read_unlock();
870 983
871 local->hw.conf.ht.enabled = false; 984 /* channel(_type) changes are handled by ieee80211_hw_config */
872 local->oper_channel_type = NL80211_CHAN_NO_HT; 985 local->oper_channel_type = NL80211_CHAN_NO_HT;
873 config_changed |= IEEE80211_CONF_CHANGE_HT; 986
987 local->power_constr_level = 0;
874 988
875 del_timer_sync(&local->dynamic_ps_timer); 989 del_timer_sync(&local->dynamic_ps_timer);
876 cancel_work_sync(&local->dynamic_ps_enable_work); 990 cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -924,7 +1038,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata,
924 if (!bss) 1038 if (!bss)
925 return 0; 1039 return 0;
926 1040
927 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); 1041 bss_privacy = !!(bss->cbss.capability & WLAN_CAPABILITY_PRIVACY);
928 wep_privacy = !!ieee80211_sta_wep_configured(sdata); 1042 wep_privacy = !!ieee80211_sta_wep_configured(sdata);
929 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); 1043 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
930 1044
@@ -946,6 +1060,9 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata,
946 sdata->dev->name, ifsta->bssid); 1060 sdata->dev->name, ifsta->bssid);
947 ifsta->state = IEEE80211_STA_MLME_DISABLED; 1061 ifsta->state = IEEE80211_STA_MLME_DISABLED;
948 ieee80211_sta_send_apinfo(sdata, ifsta); 1062 ieee80211_sta_send_apinfo(sdata, ifsta);
1063 ieee80211_rx_bss_remove(sdata, ifsta->bssid,
1064 sdata->local->hw.conf.channel->center_freq,
1065 ifsta->ssid, ifsta->ssid_len);
949 return; 1066 return;
950 } 1067 }
951 1068
@@ -1049,6 +1166,30 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1049 elems.challenge_len + 2, 1); 1166 elems.challenge_len + 2, 1);
1050} 1167}
1051 1168
1169static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
1170 struct ieee80211_if_sta *ifsta,
1171 struct ieee80211_mgmt *mgmt,
1172 size_t len)
1173{
1174 u16 auth_alg, auth_transaction, status_code;
1175
1176 if (len < 24 + 6)
1177 return;
1178
1179 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1180 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1181 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1182
1183 /*
1184 * IEEE 802.11 standard does not require authentication in IBSS
1185 * networks and most implementations do not seem to use it.
1186 * However, try to reply to authentication attempts if someone
1187 * has actually implemented this.
1188 */
1189 if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1)
1190 ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
1191}
1192
1052static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, 1193static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1053 struct ieee80211_if_sta *ifsta, 1194 struct ieee80211_if_sta *ifsta,
1054 struct ieee80211_mgmt *mgmt, 1195 struct ieee80211_mgmt *mgmt,
@@ -1056,37 +1197,22 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1056{ 1197{
1057 u16 auth_alg, auth_transaction, status_code; 1198 u16 auth_alg, auth_transaction, status_code;
1058 1199
1059 if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && 1200 if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE)
1060 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1061 return; 1201 return;
1062 1202
1063 if (len < 24 + 6) 1203 if (len < 24 + 6)
1064 return; 1204 return;
1065 1205
1066 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 1206 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1067 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1068 return; 1207 return;
1069 1208
1070 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 1209 if (memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1071 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1072 return; 1210 return;
1073 1211
1074 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 1212 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1075 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 1213 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1076 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1214 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1077 1215
1078 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1079 /*
1080 * IEEE 802.11 standard does not require authentication in IBSS
1081 * networks and most implementations do not seem to use it.
1082 * However, try to reply to authentication attempts if someone
1083 * has actually implemented this.
1084 */
1085 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1086 return;
1087 ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
1088 }
1089
1090 if (auth_alg != ifsta->auth_alg || 1216 if (auth_alg != ifsta->auth_alg ||
1091 auth_transaction != ifsta->auth_transaction) 1217 auth_transaction != ifsta->auth_transaction)
1092 return; 1218 return;
@@ -1211,7 +1337,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1211 struct ieee80211_local *local = sdata->local; 1337 struct ieee80211_local *local = sdata->local;
1212 struct ieee80211_supported_band *sband; 1338 struct ieee80211_supported_band *sband;
1213 struct sta_info *sta; 1339 struct sta_info *sta;
1214 u64 rates, basic_rates; 1340 u32 rates, basic_rates;
1215 u16 capab_info, status_code, aid; 1341 u16 capab_info, status_code, aid;
1216 struct ieee802_11_elems elems; 1342 struct ieee802_11_elems elems;
1217 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1343 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
@@ -1242,6 +1368,24 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1242 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa, 1368 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
1243 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 1369 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1244 1370
1371 pos = mgmt->u.assoc_resp.variable;
1372 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1373
1374 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
1375 elems.timeout_int && elems.timeout_int_len == 5 &&
1376 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
1377 u32 tu, ms;
1378 tu = get_unaligned_le32(elems.timeout_int + 1);
1379 ms = tu * 1024 / 1000;
1380 printk(KERN_DEBUG "%s: AP rejected association temporarily; "
1381 "comeback duration %u TU (%u ms)\n",
1382 sdata->dev->name, tu, ms);
1383 if (ms > IEEE80211_ASSOC_TIMEOUT)
1384 mod_timer(&ifsta->timer,
1385 jiffies + msecs_to_jiffies(ms));
1386 return;
1387 }
1388
1245 if (status_code != WLAN_STATUS_SUCCESS) { 1389 if (status_code != WLAN_STATUS_SUCCESS) {
1246 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1390 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1247 sdata->dev->name, status_code); 1391 sdata->dev->name, status_code);
@@ -1257,9 +1401,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1257 "set\n", sdata->dev->name, aid); 1401 "set\n", sdata->dev->name, aid);
1258 aid &= ~(BIT(15) | BIT(14)); 1402 aid &= ~(BIT(15) | BIT(14));
1259 1403
1260 pos = mgmt->u.assoc_resp.variable;
1261 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1262
1263 if (!elems.supp_rates) { 1404 if (!elems.supp_rates) {
1264 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1405 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
1265 sdata->dev->name); 1406 sdata->dev->name);
@@ -1281,8 +1422,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1281 /* Add STA entry for the AP */ 1422 /* Add STA entry for the AP */
1282 sta = sta_info_get(local, ifsta->bssid); 1423 sta = sta_info_get(local, ifsta->bssid);
1283 if (!sta) { 1424 if (!sta) {
1284 struct ieee80211_bss *bss;
1285
1286 newsta = true; 1425 newsta = true;
1287 1426
1288 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); 1427 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
@@ -1292,15 +1431,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1292 rcu_read_unlock(); 1431 rcu_read_unlock();
1293 return; 1432 return;
1294 } 1433 }
1295 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1296 local->hw.conf.channel->center_freq,
1297 ifsta->ssid, ifsta->ssid_len);
1298 if (bss) {
1299 sta->last_signal = bss->signal;
1300 sta->last_qual = bss->qual;
1301 sta->last_noise = bss->noise;
1302 ieee80211_rx_bss_put(local, bss);
1303 }
1304 1434
1305 /* update new sta with its last rx activity */ 1435 /* update new sta with its last rx activity */
1306 sta->last_rx = jiffies; 1436 sta->last_rx = jiffies;
@@ -1375,6 +1505,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1375 1505
1376 rate_control_rate_init(sta); 1506 rate_control_rate_init(sta);
1377 1507
1508 if (ifsta->flags & IEEE80211_STA_MFP_ENABLED)
1509 set_sta_flags(sta, WLAN_STA_MFP);
1510
1378 if (elems.wmm_param) 1511 if (elems.wmm_param)
1379 set_sta_flags(sta, WLAN_STA_WME); 1512 set_sta_flags(sta, WLAN_STA_WME);
1380 1513
@@ -1409,49 +1542,61 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1409} 1542}
1410 1543
1411 1544
1412static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 1545static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
1413 struct ieee80211_if_sta *ifsta, 1546 struct ieee80211_if_sta *ifsta,
1414 struct ieee80211_bss *bss) 1547 const u8 *bssid, const int beacon_int,
1548 const int freq,
1549 const size_t supp_rates_len,
1550 const u8 *supp_rates,
1551 const u16 capability)
1415{ 1552{
1416 struct ieee80211_local *local = sdata->local; 1553 struct ieee80211_local *local = sdata->local;
1417 int res, rates, i, j; 1554 int res = 0, rates, i, j;
1418 struct sk_buff *skb; 1555 struct sk_buff *skb;
1419 struct ieee80211_mgmt *mgmt; 1556 struct ieee80211_mgmt *mgmt;
1420 u8 *pos; 1557 u8 *pos;
1421 struct ieee80211_supported_band *sband; 1558 struct ieee80211_supported_band *sband;
1422 union iwreq_data wrqu; 1559 union iwreq_data wrqu;
1423 1560
1424 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 1561 if (local->ops->reset_tsf) {
1562 /* Reset own TSF to allow time synchronization work. */
1563 local->ops->reset_tsf(local_to_hw(local));
1564 }
1565
1566 if ((ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) &&
1567 memcmp(ifsta->bssid, bssid, ETH_ALEN) == 0)
1568 return res;
1569
1570 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
1571 sdata->u.sta.ie_proberesp_len);
1425 if (!skb) { 1572 if (!skb) {
1426 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1573 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
1427 "response\n", sdata->dev->name); 1574 "response\n", sdata->dev->name);
1428 return -ENOMEM; 1575 return -ENOMEM;
1429 } 1576 }
1430 1577
1431 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1578 if (!(ifsta->flags & IEEE80211_STA_PREV_BSSID_SET)) {
1432 1579 /* Remove possible STA entries from other IBSS networks. */
1433 /* Remove possible STA entries from other IBSS networks. */ 1580 sta_info_flush_delayed(sdata);
1434 sta_info_flush_delayed(sdata);
1435
1436 if (local->ops->reset_tsf) {
1437 /* Reset own TSF to allow time synchronization work. */
1438 local->ops->reset_tsf(local_to_hw(local));
1439 } 1581 }
1440 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); 1582
1583 memcpy(ifsta->bssid, bssid, ETH_ALEN);
1441 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); 1584 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
1442 if (res) 1585 if (res)
1443 return res; 1586 return res;
1444 1587
1445 local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10; 1588 local->hw.conf.beacon_int = beacon_int >= 10 ? beacon_int : 10;
1446 1589
1447 sdata->drop_unencrypted = bss->capability & 1590 sdata->drop_unencrypted = capability &
1448 WLAN_CAPABILITY_PRIVACY ? 1 : 0; 1591 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
1449 1592
1450 res = ieee80211_set_freq(sdata, bss->freq); 1593 res = ieee80211_set_freq(sdata, freq);
1451 1594
1452 if (res) 1595 if (res)
1453 return res; 1596 return res;
1454 1597
1598 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1599
1455 /* Build IBSS probe response */ 1600 /* Build IBSS probe response */
1456 1601
1457 skb_reserve(skb, local->hw.extra_tx_headroom); 1602 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -1460,33 +1605,32 @@ static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
1460 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 1605 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
1461 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 1606 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
1462 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1607 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1463 IEEE80211_STYPE_PROBE_RESP); 1608 IEEE80211_STYPE_PROBE_RESP);
1464 memset(mgmt->da, 0xff, ETH_ALEN); 1609 memset(mgmt->da, 0xff, ETH_ALEN);
1465 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1610 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1466 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1611 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1467 mgmt->u.beacon.beacon_int = 1612 mgmt->u.beacon.beacon_int =
1468 cpu_to_le16(local->hw.conf.beacon_int); 1613 cpu_to_le16(local->hw.conf.beacon_int);
1469 mgmt->u.beacon.timestamp = cpu_to_le64(bss->timestamp); 1614 mgmt->u.beacon.capab_info = cpu_to_le16(capability);
1470 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
1471 1615
1472 pos = skb_put(skb, 2 + ifsta->ssid_len); 1616 pos = skb_put(skb, 2 + ifsta->ssid_len);
1473 *pos++ = WLAN_EID_SSID; 1617 *pos++ = WLAN_EID_SSID;
1474 *pos++ = ifsta->ssid_len; 1618 *pos++ = ifsta->ssid_len;
1475 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 1619 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
1476 1620
1477 rates = bss->supp_rates_len; 1621 rates = supp_rates_len;
1478 if (rates > 8) 1622 if (rates > 8)
1479 rates = 8; 1623 rates = 8;
1480 pos = skb_put(skb, 2 + rates); 1624 pos = skb_put(skb, 2 + rates);
1481 *pos++ = WLAN_EID_SUPP_RATES; 1625 *pos++ = WLAN_EID_SUPP_RATES;
1482 *pos++ = rates; 1626 *pos++ = rates;
1483 memcpy(pos, bss->supp_rates, rates); 1627 memcpy(pos, supp_rates, rates);
1484 1628
1485 if (bss->band == IEEE80211_BAND_2GHZ) { 1629 if (sband->band == IEEE80211_BAND_2GHZ) {
1486 pos = skb_put(skb, 2 + 1); 1630 pos = skb_put(skb, 2 + 1);
1487 *pos++ = WLAN_EID_DS_PARAMS; 1631 *pos++ = WLAN_EID_DS_PARAMS;
1488 *pos++ = 1; 1632 *pos++ = 1;
1489 *pos++ = ieee80211_frequency_to_channel(bss->freq); 1633 *pos++ = ieee80211_frequency_to_channel(freq);
1490 } 1634 }
1491 1635
1492 pos = skb_put(skb, 2 + 2); 1636 pos = skb_put(skb, 2 + 2);
@@ -1496,43 +1640,59 @@ static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
1496 *pos++ = 0; 1640 *pos++ = 0;
1497 *pos++ = 0; 1641 *pos++ = 0;
1498 1642
1499 if (bss->supp_rates_len > 8) { 1643 if (supp_rates_len > 8) {
1500 rates = bss->supp_rates_len - 8; 1644 rates = supp_rates_len - 8;
1501 pos = skb_put(skb, 2 + rates); 1645 pos = skb_put(skb, 2 + rates);
1502 *pos++ = WLAN_EID_EXT_SUPP_RATES; 1646 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1503 *pos++ = rates; 1647 *pos++ = rates;
1504 memcpy(pos, &bss->supp_rates[8], rates); 1648 memcpy(pos, &supp_rates[8], rates);
1505 } 1649 }
1506 1650
1651 add_extra_ies(skb, sdata->u.sta.ie_proberesp,
1652 sdata->u.sta.ie_proberesp_len);
1653
1507 ifsta->probe_resp = skb; 1654 ifsta->probe_resp = skb;
1508 1655
1509 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 1656 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON |
1657 IEEE80211_IFCC_BEACON_ENABLED);
1510 1658
1511 1659
1512 rates = 0; 1660 rates = 0;
1513 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1661 for (i = 0; i < supp_rates_len; i++) {
1514 for (i = 0; i < bss->supp_rates_len; i++) { 1662 int bitrate = (supp_rates[i] & 0x7f) * 5;
1515 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
1516 for (j = 0; j < sband->n_bitrates; j++) 1663 for (j = 0; j < sband->n_bitrates; j++)
1517 if (sband->bitrates[j].bitrate == bitrate) 1664 if (sband->bitrates[j].bitrate == bitrate)
1518 rates |= BIT(j); 1665 rates |= BIT(j);
1519 } 1666 }
1520 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; 1667 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
1521 1668
1522 ieee80211_sta_def_wmm_params(sdata, bss); 1669 ieee80211_sta_def_wmm_params(sdata, supp_rates_len, supp_rates);
1523 1670
1671 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
1524 ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED; 1672 ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED;
1525 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 1673 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
1526 1674
1527 ieee80211_led_assoc(local, true); 1675 ieee80211_led_assoc(local, true);
1528 1676
1529 memset(&wrqu, 0, sizeof(wrqu)); 1677 memset(&wrqu, 0, sizeof(wrqu));
1530 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); 1678 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
1531 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); 1679 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
1532 1680
1533 return res; 1681 return res;
1534} 1682}
1535 1683
1684static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
1685 struct ieee80211_if_sta *ifsta,
1686 struct ieee80211_bss *bss)
1687{
1688 return __ieee80211_sta_join_ibss(sdata, ifsta,
1689 bss->cbss.bssid,
1690 bss->cbss.beacon_interval,
1691 bss->cbss.channel->center_freq,
1692 bss->supp_rates_len, bss->supp_rates,
1693 bss->cbss.capability);
1694}
1695
1536static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 1696static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1537 struct ieee80211_mgmt *mgmt, 1697 struct ieee80211_mgmt *mgmt,
1538 size_t len, 1698 size_t len,
@@ -1546,7 +1706,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1546 struct sta_info *sta; 1706 struct sta_info *sta;
1547 struct ieee80211_channel *channel; 1707 struct ieee80211_channel *channel;
1548 u64 beacon_timestamp, rx_timestamp; 1708 u64 beacon_timestamp, rx_timestamp;
1549 u64 supp_rates = 0; 1709 u32 supp_rates = 0;
1550 enum ieee80211_band band = rx_status->band; 1710 enum ieee80211_band band = rx_status->band;
1551 1711
1552 if (elems->ds_params && elems->ds_params_len == 1) 1712 if (elems->ds_params && elems->ds_params_len == 1)
@@ -1567,7 +1727,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1567 1727
1568 sta = sta_info_get(local, mgmt->sa); 1728 sta = sta_info_get(local, mgmt->sa);
1569 if (sta) { 1729 if (sta) {
1570 u64 prev_rates; 1730 u32 prev_rates;
1571 1731
1572 prev_rates = sta->sta.supp_rates[band]; 1732 prev_rates = sta->sta.supp_rates[band];
1573 /* make sure mandatory rates are always added */ 1733 /* make sure mandatory rates are always added */
@@ -1593,80 +1753,99 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1593 } 1753 }
1594 1754
1595 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 1755 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
1596 freq, beacon); 1756 channel, beacon);
1597 if (!bss) 1757 if (!bss)
1598 return; 1758 return;
1599 1759
1760 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
1761 (memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0)) {
1762 struct ieee80211_channel_sw_ie *sw_elem =
1763 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
1764 ieee80211_process_chanswitch(sdata, sw_elem, bss);
1765 }
1766
1600 /* was just updated in ieee80211_bss_info_update */ 1767 /* was just updated in ieee80211_bss_info_update */
1601 beacon_timestamp = bss->timestamp; 1768 beacon_timestamp = bss->cbss.tsf;
1602 1769
1603 /* 1770 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1604 * In STA mode, the remaining parameters should not be overridden 1771 goto put_bss;
1605 * by beacons because they're not necessarily accurate there.
1606 */
1607 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1608 bss->last_probe_resp && beacon) {
1609 ieee80211_rx_bss_put(local, bss);
1610 return;
1611 }
1612 1772
1613 /* check if we need to merge IBSS */ 1773 /* check if we need to merge IBSS */
1614 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && beacon && 1774
1615 bss->capability & WLAN_CAPABILITY_IBSS && 1775 /* merge only on beacons (???) */
1616 bss->freq == local->oper_channel->center_freq && 1776 if (!beacon)
1617 elems->ssid_len == sdata->u.sta.ssid_len && 1777 goto put_bss;
1778
1779 /* we use a fixed BSSID */
1780 if (sdata->u.sta.flags & IEEE80211_STA_BSSID_SET)
1781 goto put_bss;
1782
1783 /* not an IBSS */
1784 if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS))
1785 goto put_bss;
1786
1787 /* different channel */
1788 if (bss->cbss.channel != local->oper_channel)
1789 goto put_bss;
1790
1791 /* different SSID */
1792 if (elems->ssid_len != sdata->u.sta.ssid_len ||
1618 memcmp(elems->ssid, sdata->u.sta.ssid, 1793 memcmp(elems->ssid, sdata->u.sta.ssid,
1619 sdata->u.sta.ssid_len) == 0) { 1794 sdata->u.sta.ssid_len))
1620 if (rx_status->flag & RX_FLAG_TSFT) { 1795 goto put_bss;
1621 /* in order for correct IBSS merging we need mactime 1796
1622 * 1797 if (rx_status->flag & RX_FLAG_TSFT) {
1623 * since mactime is defined as the time the first data 1798 /*
1624 * symbol of the frame hits the PHY, and the timestamp 1799 * For correct IBSS merging we need mactime; since mactime is
1625 * of the beacon is defined as "the time that the data 1800 * defined as the time the first data symbol of the frame hits
1626 * symbol containing the first bit of the timestamp is 1801 * the PHY, and the timestamp of the beacon is defined as "the
1627 * transmitted to the PHY plus the transmitting STA’s 1802 * time that the data symbol containing the first bit of the
1628 * delays through its local PHY from the MAC-PHY 1803 * timestamp is transmitted to the PHY plus the transmitting
1629 * interface to its interface with the WM" 1804 * STA's delays through its local PHY from the MAC-PHY
1630 * (802.11 11.1.2) - equals the time this bit arrives at 1805 * interface to its interface with the WM" (802.11 11.1.2)
1631 * the receiver - we have to take into account the 1806 * - equals the time this bit arrives at the receiver - we have
1632 * offset between the two. 1807 * to take into account the offset between the two.
1633 * e.g: at 1 MBit that means mactime is 192 usec earlier 1808 *
1634 * (=24 bytes * 8 usecs/byte) than the beacon timestamp. 1809 * E.g. at 1 MBit that means mactime is 192 usec earlier
1635 */ 1810 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
1636 int rate; 1811 */
1637 if (rx_status->flag & RX_FLAG_HT) { 1812 int rate;
1638 rate = 65; /* TODO: HT rates */ 1813
1639 } else { 1814 if (rx_status->flag & RX_FLAG_HT)
1640 rate = local->hw.wiphy->bands[band]-> 1815 rate = 65; /* TODO: HT rates */
1641 bitrates[rx_status->rate_idx].bitrate;
1642 }
1643 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
1644 } else if (local && local->ops && local->ops->get_tsf)
1645 /* second best option: get current TSF */
1646 rx_timestamp = local->ops->get_tsf(local_to_hw(local));
1647 else 1816 else
1648 /* can't merge without knowing the TSF */ 1817 rate = local->hw.wiphy->bands[band]->
1649 rx_timestamp = -1LLU; 1818 bitrates[rx_status->rate_idx].bitrate;
1819
1820 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
1821 } else if (local && local->ops && local->ops->get_tsf)
1822 /* second best option: get current TSF */
1823 rx_timestamp = local->ops->get_tsf(local_to_hw(local));
1824 else
1825 /* can't merge without knowing the TSF */
1826 rx_timestamp = -1LLU;
1827
1650#ifdef CONFIG_MAC80211_IBSS_DEBUG 1828#ifdef CONFIG_MAC80211_IBSS_DEBUG
1651 printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" 1829 printk(KERN_DEBUG "RX beacon SA=%pM BSSID="
1652 "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", 1830 "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
1653 mgmt->sa, mgmt->bssid, 1831 mgmt->sa, mgmt->bssid,
1654 (unsigned long long)rx_timestamp, 1832 (unsigned long long)rx_timestamp,
1655 (unsigned long long)beacon_timestamp, 1833 (unsigned long long)beacon_timestamp,
1656 (unsigned long long)(rx_timestamp - beacon_timestamp), 1834 (unsigned long long)(rx_timestamp - beacon_timestamp),
1657 jiffies); 1835 jiffies);
1658#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 1836#endif
1659 if (beacon_timestamp > rx_timestamp) { 1837
1838 if (beacon_timestamp > rx_timestamp) {
1660#ifdef CONFIG_MAC80211_IBSS_DEBUG 1839#ifdef CONFIG_MAC80211_IBSS_DEBUG
1661 printk(KERN_DEBUG "%s: beacon TSF higher than " 1840 printk(KERN_DEBUG "%s: beacon TSF higher than "
1662 "local TSF - IBSS merge with BSSID %pM\n", 1841 "local TSF - IBSS merge with BSSID %pM\n",
1663 sdata->dev->name, mgmt->bssid); 1842 sdata->dev->name, mgmt->bssid);
1664#endif 1843#endif
1665 ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss); 1844 ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss);
1666 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 1845 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
1667 }
1668 } 1846 }
1669 1847
1848 put_bss:
1670 ieee80211_rx_bss_put(local, bss); 1849 ieee80211_rx_bss_put(local, bss);
1671} 1850}
1672 1851
@@ -1712,7 +1891,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1712 struct ieee802_11_elems elems; 1891 struct ieee802_11_elems elems;
1713 struct ieee80211_local *local = sdata->local; 1892 struct ieee80211_local *local = sdata->local;
1714 u32 changed = 0; 1893 u32 changed = 0;
1715 bool erp_valid; 1894 bool erp_valid, directed_tim;
1716 u8 erp_value = 0; 1895 u8 erp_value = 0;
1717 1896
1718 /* Process beacon from the current BSS */ 1897 /* Process beacon from the current BSS */
@@ -1732,9 +1911,37 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1732 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 1911 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1733 return; 1912 return;
1734 1913
1914 if (rx_status->freq != local->hw.conf.channel->center_freq)
1915 return;
1916
1735 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, 1917 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
1736 elems.wmm_param_len); 1918 elems.wmm_param_len);
1737 1919
1920 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK &&
1921 local->hw.conf.flags & IEEE80211_CONF_PS) {
1922 directed_tim = ieee80211_check_tim(&elems, ifsta->aid);
1923
1924 if (directed_tim) {
1925 if (local->hw.conf.dynamic_ps_timeout > 0) {
1926 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1927 ieee80211_hw_config(local,
1928 IEEE80211_CONF_CHANGE_PS);
1929 ieee80211_send_nullfunc(local, sdata, 0);
1930 } else {
1931 local->pspolling = true;
1932
1933 /*
1934 * Here is assumed that the driver will be
1935 * able to send ps-poll frame and receive a
1936 * response even though power save mode is
1937 * enabled, but some drivers might require
1938 * to disable power save here. This needs
1939 * to be investigated.
1940 */
1941 ieee80211_send_pspoll(local, sdata);
1942 }
1943 }
1944 }
1738 1945
1739 if (elems.erp_info && elems.erp_info_len >= 1) { 1946 if (elems.erp_info && elems.erp_info_len >= 1) {
1740 erp_valid = true; 1947 erp_valid = true;
@@ -1778,6 +1985,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1778 * for the BSSID we are associated to */ 1985 * for the BSSID we are associated to */
1779 regulatory_hint_11d(local->hw.wiphy, 1986 regulatory_hint_11d(local->hw.wiphy,
1780 elems.country_elem, elems.country_elem_len); 1987 elems.country_elem, elems.country_elem_len);
1988
1989 /* TODO: IBSS also needs this */
1990 if (elems.pwr_constr_elem)
1991 ieee80211_handle_pwr_constr(sdata,
1992 le16_to_cpu(mgmt->u.probe_resp.capab_info),
1993 elems.pwr_constr_elem,
1994 elems.pwr_constr_elem_len);
1781 } 1995 }
1782 1996
1783 ieee80211_bss_info_change_notify(sdata, changed); 1997 ieee80211_bss_info_change_notify(sdata, changed);
@@ -1787,8 +2001,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1787static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 2001static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
1788 struct ieee80211_if_sta *ifsta, 2002 struct ieee80211_if_sta *ifsta,
1789 struct ieee80211_mgmt *mgmt, 2003 struct ieee80211_mgmt *mgmt,
1790 size_t len, 2004 size_t len)
1791 struct ieee80211_rx_status *rx_status)
1792{ 2005{
1793 struct ieee80211_local *local = sdata->local; 2006 struct ieee80211_local *local = sdata->local;
1794 int tx_last_beacon; 2007 int tx_last_beacon;
@@ -1796,8 +2009,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
1796 struct ieee80211_mgmt *resp; 2009 struct ieee80211_mgmt *resp;
1797 u8 *pos, *end; 2010 u8 *pos, *end;
1798 2011
1799 if (sdata->vif.type != NL80211_IFTYPE_ADHOC || 2012 if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
1800 ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
1801 len < 24 + 2 || !ifsta->probe_resp) 2013 len < 24 + 2 || !ifsta->probe_resp)
1802 return; 2014 return;
1803 2015
@@ -1901,32 +2113,54 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1901 mgmt = (struct ieee80211_mgmt *) skb->data; 2113 mgmt = (struct ieee80211_mgmt *) skb->data;
1902 fc = le16_to_cpu(mgmt->frame_control); 2114 fc = le16_to_cpu(mgmt->frame_control);
1903 2115
1904 switch (fc & IEEE80211_FCTL_STYPE) { 2116 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1905 case IEEE80211_STYPE_PROBE_REQ: 2117 switch (fc & IEEE80211_FCTL_STYPE) {
1906 ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len, 2118 case IEEE80211_STYPE_PROBE_REQ:
1907 rx_status); 2119 ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt,
1908 break; 2120 skb->len);
1909 case IEEE80211_STYPE_PROBE_RESP: 2121 break;
1910 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status); 2122 case IEEE80211_STYPE_PROBE_RESP:
1911 break; 2123 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
1912 case IEEE80211_STYPE_BEACON: 2124 rx_status);
1913 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); 2125 break;
1914 break; 2126 case IEEE80211_STYPE_BEACON:
1915 case IEEE80211_STYPE_AUTH: 2127 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
1916 ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len); 2128 rx_status);
1917 break; 2129 break;
1918 case IEEE80211_STYPE_ASSOC_RESP: 2130 case IEEE80211_STYPE_AUTH:
1919 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); 2131 ieee80211_rx_mgmt_auth_ibss(sdata, ifsta, mgmt,
1920 break; 2132 skb->len);
1921 case IEEE80211_STYPE_REASSOC_RESP: 2133 break;
1922 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); 2134 }
1923 break; 2135 } else { /* NL80211_IFTYPE_STATION */
1924 case IEEE80211_STYPE_DEAUTH: 2136 switch (fc & IEEE80211_FCTL_STYPE) {
1925 ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len); 2137 case IEEE80211_STYPE_PROBE_RESP:
1926 break; 2138 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
1927 case IEEE80211_STYPE_DISASSOC: 2139 rx_status);
1928 ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len); 2140 break;
1929 break; 2141 case IEEE80211_STYPE_BEACON:
2142 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
2143 rx_status);
2144 break;
2145 case IEEE80211_STYPE_AUTH:
2146 ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len);
2147 break;
2148 case IEEE80211_STYPE_ASSOC_RESP:
2149 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt,
2150 skb->len, 0);
2151 break;
2152 case IEEE80211_STYPE_REASSOC_RESP:
2153 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt,
2154 skb->len, 1);
2155 break;
2156 case IEEE80211_STYPE_DEAUTH:
2157 ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len);
2158 break;
2159 case IEEE80211_STYPE_DISASSOC:
2160 ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt,
2161 skb->len);
2162 break;
2163 }
1930 } 2164 }
1931 2165
1932 kfree_skb(skb); 2166 kfree_skb(skb);
@@ -1965,9 +2199,21 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata,
1965 if (ieee80211_sta_active_ibss(sdata)) 2199 if (ieee80211_sta_active_ibss(sdata))
1966 return; 2200 return;
1967 2201
2202 if ((sdata->u.sta.flags & IEEE80211_STA_BSSID_SET) &&
2203 (!(sdata->u.sta.flags & IEEE80211_STA_AUTO_CHANNEL_SEL)))
2204 return;
2205
1968 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 2206 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
1969 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 2207 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
1970 ieee80211_request_scan(sdata, ifsta->ssid, ifsta->ssid_len); 2208
2209 /* XXX maybe racy? */
2210 if (sdata->local->scan_req)
2211 return;
2212
2213 memcpy(sdata->local->int_scan_req.ssids[0].ssid,
2214 ifsta->ssid, IEEE80211_MAX_SSID_LEN);
2215 sdata->local->int_scan_req.ssids[0].ssid_len = ifsta->ssid_len;
2216 ieee80211_request_scan(sdata, &sdata->local->int_scan_req);
1971} 2217}
1972 2218
1973 2219
@@ -2013,94 +2259,56 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata,
2013 netif_carrier_off(sdata->dev); 2259 netif_carrier_off(sdata->dev);
2014} 2260}
2015 2261
2016
2017static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
2018 const char *ssid, int ssid_len)
2019{
2020 int tmp, hidden_ssid;
2021
2022 if (ssid_len == ifsta->ssid_len &&
2023 !memcmp(ifsta->ssid, ssid, ssid_len))
2024 return 1;
2025
2026 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL)
2027 return 0;
2028
2029 hidden_ssid = 1;
2030 tmp = ssid_len;
2031 while (tmp--) {
2032 if (ssid[tmp] != '\0') {
2033 hidden_ssid = 0;
2034 break;
2035 }
2036 }
2037
2038 if (hidden_ssid && (ifsta->ssid_len == ssid_len || ssid_len == 0))
2039 return 1;
2040
2041 if (ssid_len == 1 && ssid[0] == ' ')
2042 return 1;
2043
2044 return 0;
2045}
2046
2047static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata, 2262static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata,
2048 struct ieee80211_if_sta *ifsta) 2263 struct ieee80211_if_sta *ifsta)
2049{ 2264{
2050 struct ieee80211_local *local = sdata->local; 2265 struct ieee80211_local *local = sdata->local;
2051 struct ieee80211_bss *bss;
2052 struct ieee80211_supported_band *sband; 2266 struct ieee80211_supported_band *sband;
2053 u8 bssid[ETH_ALEN], *pos; 2267 u8 *pos;
2268 u8 bssid[ETH_ALEN];
2269 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
2270 u16 capability;
2054 int i; 2271 int i;
2055 int ret; 2272
2056 2273 if (sdata->u.sta.flags & IEEE80211_STA_BSSID_SET) {
2057#if 0 2274 memcpy(bssid, ifsta->bssid, ETH_ALEN);
2058 /* Easier testing, use fixed BSSID. */ 2275 } else {
2059 memset(bssid, 0xfe, ETH_ALEN); 2276 /* Generate random, not broadcast, locally administered BSSID. Mix in
2060#else 2277 * own MAC address to make sure that devices that do not have proper
2061 /* Generate random, not broadcast, locally administered BSSID. Mix in 2278 * random number generator get different BSSID. */
2062 * own MAC address to make sure that devices that do not have proper 2279 get_random_bytes(bssid, ETH_ALEN);
2063 * random number generator get different BSSID. */ 2280 for (i = 0; i < ETH_ALEN; i++)
2064 get_random_bytes(bssid, ETH_ALEN); 2281 bssid[i] ^= sdata->dev->dev_addr[i];
2065 for (i = 0; i < ETH_ALEN; i++) 2282 bssid[0] &= ~0x01;
2066 bssid[i] ^= sdata->dev->dev_addr[i]; 2283 bssid[0] |= 0x02;
2067 bssid[0] &= ~0x01; 2284 }
2068 bssid[0] |= 0x02;
2069#endif
2070 2285
2071 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 2286 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
2072 sdata->dev->name, bssid); 2287 sdata->dev->name, bssid);
2073 2288
2074 bss = ieee80211_rx_bss_add(local, bssid, 2289 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2075 local->hw.conf.channel->center_freq,
2076 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
2077 if (!bss)
2078 return -ENOMEM;
2079
2080 bss->band = local->hw.conf.channel->band;
2081 sband = local->hw.wiphy->bands[bss->band];
2082 2290
2083 if (local->hw.conf.beacon_int == 0) 2291 if (local->hw.conf.beacon_int == 0)
2084 local->hw.conf.beacon_int = 100; 2292 local->hw.conf.beacon_int = 100;
2085 bss->beacon_int = local->hw.conf.beacon_int; 2293
2086 bss->last_update = jiffies; 2294 capability = WLAN_CAPABILITY_IBSS;
2087 bss->capability = WLAN_CAPABILITY_IBSS;
2088 2295
2089 if (sdata->default_key) 2296 if (sdata->default_key)
2090 bss->capability |= WLAN_CAPABILITY_PRIVACY; 2297 capability |= WLAN_CAPABILITY_PRIVACY;
2091 else 2298 else
2092 sdata->drop_unencrypted = 0; 2299 sdata->drop_unencrypted = 0;
2093 2300
2094 bss->supp_rates_len = sband->n_bitrates; 2301 pos = supp_rates;
2095 pos = bss->supp_rates;
2096 for (i = 0; i < sband->n_bitrates; i++) { 2302 for (i = 0; i < sband->n_bitrates; i++) {
2097 int rate = sband->bitrates[i].bitrate; 2303 int rate = sband->bitrates[i].bitrate;
2098 *pos++ = (u8) (rate / 5); 2304 *pos++ = (u8) (rate / 5);
2099 } 2305 }
2100 2306
2101 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); 2307 return __ieee80211_sta_join_ibss(sdata, ifsta,
2102 ieee80211_rx_bss_put(local, bss); 2308 bssid, local->hw.conf.beacon_int,
2103 return ret; 2309 local->hw.conf.channel->center_freq,
2310 sband->n_bitrates, supp_rates,
2311 capability);
2104} 2312}
2105 2313
2106 2314
@@ -2109,8 +2317,6 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
2109{ 2317{
2110 struct ieee80211_local *local = sdata->local; 2318 struct ieee80211_local *local = sdata->local;
2111 struct ieee80211_bss *bss; 2319 struct ieee80211_bss *bss;
2112 int found = 0;
2113 u8 bssid[ETH_ALEN];
2114 int active_ibss; 2320 int active_ibss;
2115 2321
2116 if (ifsta->ssid_len == 0) 2322 if (ifsta->ssid_len == 0)
@@ -2121,51 +2327,39 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
2121 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 2327 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
2122 sdata->dev->name, active_ibss); 2328 sdata->dev->name, active_ibss);
2123#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2329#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2124 spin_lock_bh(&local->bss_lock); 2330
2125 list_for_each_entry(bss, &local->bss_list, list) { 2331 if (active_ibss)
2126 if (ifsta->ssid_len != bss->ssid_len || 2332 return 0;
2127 memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0 2333
2128 || !(bss->capability & WLAN_CAPABILITY_IBSS)) 2334 if (ifsta->flags & IEEE80211_STA_BSSID_SET)
2129 continue; 2335 bss = ieee80211_rx_bss_get(local, ifsta->bssid, 0,
2130#ifdef CONFIG_MAC80211_IBSS_DEBUG 2336 ifsta->ssid, ifsta->ssid_len);
2131 printk(KERN_DEBUG " bssid=%pM found\n", bss->bssid); 2337 else
2132#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2338 bss = (void *)cfg80211_get_ibss(local->hw.wiphy,
2133 memcpy(bssid, bss->bssid, ETH_ALEN); 2339 NULL,
2134 found = 1; 2340 ifsta->ssid, ifsta->ssid_len);
2135 if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0)
2136 break;
2137 }
2138 spin_unlock_bh(&local->bss_lock);
2139 2341
2140#ifdef CONFIG_MAC80211_IBSS_DEBUG 2342#ifdef CONFIG_MAC80211_IBSS_DEBUG
2141 if (found) 2343 if (bss)
2142 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 2344 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
2143 "%pM\n", bssid, ifsta->bssid); 2345 "%pM\n", bss->cbss.bssid, ifsta->bssid);
2144#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2346#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2145 2347
2146 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 2348 if (bss &&
2349 (!(ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) ||
2350 memcmp(ifsta->bssid, bss->cbss.bssid, ETH_ALEN))) {
2147 int ret; 2351 int ret;
2148 int search_freq;
2149
2150 if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
2151 search_freq = bss->freq;
2152 else
2153 search_freq = local->hw.conf.channel->center_freq;
2154
2155 bss = ieee80211_rx_bss_get(local, bssid, search_freq,
2156 ifsta->ssid, ifsta->ssid_len);
2157 if (!bss)
2158 goto dont_join;
2159 2352
2160 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 2353 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
2161 " based on configured SSID\n", 2354 " based on configured SSID\n",
2162 sdata->dev->name, bssid); 2355 sdata->dev->name, bss->cbss.bssid);
2356
2163 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); 2357 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
2164 ieee80211_rx_bss_put(local, bss); 2358 ieee80211_rx_bss_put(local, bss);
2165 return ret; 2359 return ret;
2166 } 2360 } else if (bss)
2361 ieee80211_rx_bss_put(local, bss);
2167 2362
2168dont_join:
2169#ifdef CONFIG_MAC80211_IBSS_DEBUG 2363#ifdef CONFIG_MAC80211_IBSS_DEBUG
2170 printk(KERN_DEBUG " did not try to join ibss\n"); 2364 printk(KERN_DEBUG " did not try to join ibss\n");
2171#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2365#endif /* CONFIG_MAC80211_IBSS_DEBUG */
@@ -2179,8 +2373,15 @@ dont_join:
2179 IEEE80211_SCAN_INTERVAL)) { 2373 IEEE80211_SCAN_INTERVAL)) {
2180 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 2374 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
2181 "join\n", sdata->dev->name); 2375 "join\n", sdata->dev->name);
2182 return ieee80211_request_scan(sdata, ifsta->ssid, 2376
2183 ifsta->ssid_len); 2377 /* XXX maybe racy? */
2378 if (local->scan_req)
2379 return -EBUSY;
2380
2381 memcpy(local->int_scan_req.ssids[0].ssid,
2382 ifsta->ssid, IEEE80211_MAX_SSID_LEN);
2383 local->int_scan_req.ssids[0].ssid_len = ifsta->ssid_len;
2384 return ieee80211_request_scan(sdata, &local->int_scan_req);
2184 } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) { 2385 } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) {
2185 int interval = IEEE80211_SCAN_INTERVAL; 2386 int interval = IEEE80211_SCAN_INTERVAL;
2186 2387
@@ -2214,76 +2415,81 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
2214 struct ieee80211_if_sta *ifsta) 2415 struct ieee80211_if_sta *ifsta)
2215{ 2416{
2216 struct ieee80211_local *local = sdata->local; 2417 struct ieee80211_local *local = sdata->local;
2217 struct ieee80211_bss *bss, *selected = NULL; 2418 struct ieee80211_bss *bss;
2218 int top_rssi = 0, freq; 2419 u8 *bssid = ifsta->bssid, *ssid = ifsta->ssid;
2219 2420 u8 ssid_len = ifsta->ssid_len;
2220 spin_lock_bh(&local->bss_lock); 2421 u16 capa_mask = WLAN_CAPABILITY_ESS;
2221 freq = local->oper_channel->center_freq; 2422 u16 capa_val = WLAN_CAPABILITY_ESS;
2222 list_for_each_entry(bss, &local->bss_list, list) { 2423 struct ieee80211_channel *chan = local->oper_channel;
2223 if (!(bss->capability & WLAN_CAPABILITY_ESS)) 2424
2224 continue; 2425 if (ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
2225 2426 IEEE80211_STA_AUTO_BSSID_SEL |
2226 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | 2427 IEEE80211_STA_AUTO_CHANNEL_SEL)) {
2227 IEEE80211_STA_AUTO_BSSID_SEL | 2428 capa_mask |= WLAN_CAPABILITY_PRIVACY;
2228 IEEE80211_STA_AUTO_CHANNEL_SEL)) && 2429 if (sdata->default_key)
2229 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ 2430 capa_val |= WLAN_CAPABILITY_PRIVACY;
2230 !!sdata->default_key))
2231 continue;
2232
2233 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
2234 bss->freq != freq)
2235 continue;
2236
2237 if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) &&
2238 memcmp(bss->bssid, ifsta->bssid, ETH_ALEN))
2239 continue;
2240
2241 if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) &&
2242 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
2243 continue;
2244
2245 if (!selected || top_rssi < bss->signal) {
2246 selected = bss;
2247 top_rssi = bss->signal;
2248 }
2249 } 2431 }
2250 if (selected)
2251 atomic_inc(&selected->users);
2252 spin_unlock_bh(&local->bss_lock);
2253 2432
2254 if (selected) { 2433 if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
2255 ieee80211_set_freq(sdata, selected->freq); 2434 chan = NULL;
2435
2436 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL)
2437 bssid = NULL;
2438
2439 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) {
2440 ssid = NULL;
2441 ssid_len = 0;
2442 }
2443
2444 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan,
2445 bssid, ssid, ssid_len,
2446 capa_mask, capa_val);
2447
2448 if (bss) {
2449 ieee80211_set_freq(sdata, bss->cbss.channel->center_freq);
2256 if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) 2450 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
2257 ieee80211_sta_set_ssid(sdata, selected->ssid, 2451 ieee80211_sta_set_ssid(sdata, bss->ssid,
2258 selected->ssid_len); 2452 bss->ssid_len);
2259 ieee80211_sta_set_bssid(sdata, selected->bssid); 2453 ieee80211_sta_set_bssid(sdata, bss->cbss.bssid);
2260 ieee80211_sta_def_wmm_params(sdata, selected); 2454 ieee80211_sta_def_wmm_params(sdata, bss->supp_rates_len,
2455 bss->supp_rates);
2456 if (sdata->u.sta.mfp == IEEE80211_MFP_REQUIRED)
2457 sdata->u.sta.flags |= IEEE80211_STA_MFP_ENABLED;
2458 else
2459 sdata->u.sta.flags &= ~IEEE80211_STA_MFP_ENABLED;
2261 2460
2262 /* Send out direct probe if no probe resp was received or 2461 /* Send out direct probe if no probe resp was received or
2263 * the one we have is outdated 2462 * the one we have is outdated
2264 */ 2463 */
2265 if (!selected->last_probe_resp || 2464 if (!bss->last_probe_resp ||
2266 time_after(jiffies, selected->last_probe_resp 2465 time_after(jiffies, bss->last_probe_resp
2267 + IEEE80211_SCAN_RESULT_EXPIRE)) 2466 + IEEE80211_SCAN_RESULT_EXPIRE))
2268 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; 2467 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
2269 else 2468 else
2270 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; 2469 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
2271 2470
2272 ieee80211_rx_bss_put(local, selected); 2471 ieee80211_rx_bss_put(local, bss);
2273 ieee80211_sta_reset_auth(sdata, ifsta); 2472 ieee80211_sta_reset_auth(sdata, ifsta);
2274 return 0; 2473 return 0;
2275 } else { 2474 } else {
2276 if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { 2475 if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
2277 ifsta->assoc_scan_tries++; 2476 ifsta->assoc_scan_tries++;
2477 /* XXX maybe racy? */
2478 if (local->scan_req)
2479 return -1;
2480 memcpy(local->int_scan_req.ssids[0].ssid,
2481 ifsta->ssid, IEEE80211_MAX_SSID_LEN);
2278 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) 2482 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
2279 ieee80211_start_scan(sdata, NULL, 0); 2483 local->int_scan_req.ssids[0].ssid_len = 0;
2280 else 2484 else
2281 ieee80211_start_scan(sdata, ifsta->ssid, 2485 local->int_scan_req.ssids[0].ssid_len = ifsta->ssid_len;
2282 ifsta->ssid_len); 2486 ieee80211_start_scan(sdata, &local->int_scan_req);
2283 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; 2487 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
2284 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); 2488 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
2285 } else 2489 } else {
2490 ifsta->assoc_scan_tries = 0;
2286 ifsta->state = IEEE80211_STA_MLME_DISABLED; 2491 ifsta->state = IEEE80211_STA_MLME_DISABLED;
2492 }
2287 } 2493 }
2288 return -1; 2494 return -1;
2289} 2495}
@@ -2315,8 +2521,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2315 ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && 2521 ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
2316 ifsta->state != IEEE80211_STA_MLME_ASSOCIATE && 2522 ifsta->state != IEEE80211_STA_MLME_ASSOCIATE &&
2317 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { 2523 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
2318 ieee80211_start_scan(sdata, ifsta->scan_ssid, 2524 ieee80211_start_scan(sdata, local->scan_req);
2319 ifsta->scan_ssid_len);
2320 return; 2525 return;
2321 } 2526 }
2322 2527
@@ -2376,8 +2581,11 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2376 2581
2377 ifsta = &sdata->u.sta; 2582 ifsta = &sdata->u.sta;
2378 INIT_WORK(&ifsta->work, ieee80211_sta_work); 2583 INIT_WORK(&ifsta->work, ieee80211_sta_work);
2584 INIT_WORK(&ifsta->chswitch_work, ieee80211_chswitch_work);
2379 setup_timer(&ifsta->timer, ieee80211_sta_timer, 2585 setup_timer(&ifsta->timer, ieee80211_sta_timer,
2380 (unsigned long) sdata); 2586 (unsigned long) sdata);
2587 setup_timer(&ifsta->chswitch_timer, ieee80211_chswitch_timer,
2588 (unsigned long) sdata);
2381 skb_queue_head_init(&ifsta->skb_queue); 2589 skb_queue_head_init(&ifsta->skb_queue);
2382 2590
2383 ifsta->capab = WLAN_CAPABILITY_ESS; 2591 ifsta->capab = WLAN_CAPABILITY_ESS;
@@ -2396,7 +2604,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2396 * must be callable in atomic context. 2604 * must be callable in atomic context.
2397 */ 2605 */
2398struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 2606struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
2399 u8 *bssid,u8 *addr, u64 supp_rates) 2607 u8 *bssid,u8 *addr, u32 supp_rates)
2400{ 2608{
2401 struct ieee80211_local *local = sdata->local; 2609 struct ieee80211_local *local = sdata->local;
2402 struct sta_info *sta; 2610 struct sta_info *sta;
@@ -2474,16 +2682,16 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
2474 memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); 2682 memset(ifsta->ssid, 0, sizeof(ifsta->ssid));
2475 memcpy(ifsta->ssid, ssid, len); 2683 memcpy(ifsta->ssid, ssid, len);
2476 ifsta->ssid_len = len; 2684 ifsta->ssid_len = len;
2477 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
2478 } 2685 }
2479 2686
2687 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
2688
2480 if (len) 2689 if (len)
2481 ifsta->flags |= IEEE80211_STA_SSID_SET; 2690 ifsta->flags |= IEEE80211_STA_SSID_SET;
2482 else 2691 else
2483 ifsta->flags &= ~IEEE80211_STA_SSID_SET; 2692 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
2484 2693
2485 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 2694 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2486 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
2487 ifsta->ibss_join_req = jiffies; 2695 ifsta->ibss_join_req = jiffies;
2488 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH; 2696 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
2489 return ieee80211_sta_find_ibss(sdata, ifsta); 2697 return ieee80211_sta_find_ibss(sdata, ifsta);
@@ -2503,31 +2711,25 @@ int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
2503int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) 2711int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
2504{ 2712{
2505 struct ieee80211_if_sta *ifsta; 2713 struct ieee80211_if_sta *ifsta;
2506 int res;
2507 2714
2508 ifsta = &sdata->u.sta; 2715 ifsta = &sdata->u.sta;
2509 2716
2510 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 2717 if (is_valid_ether_addr(bssid)) {
2511 memcpy(ifsta->bssid, bssid, ETH_ALEN); 2718 memcpy(ifsta->bssid, bssid, ETH_ALEN);
2512 res = 0; 2719 ifsta->flags |= IEEE80211_STA_BSSID_SET;
2513 /* 2720 } else {
2514 * Hack! See also ieee80211_sta_set_ssid. 2721 memset(ifsta->bssid, 0, ETH_ALEN);
2515 */ 2722 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
2516 if (netif_running(sdata->dev)) 2723 }
2517 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); 2724
2518 if (res) { 2725 if (netif_running(sdata->dev)) {
2726 if (ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID)) {
2519 printk(KERN_DEBUG "%s: Failed to config new BSSID to " 2727 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
2520 "the low-level driver\n", sdata->dev->name); 2728 "the low-level driver\n", sdata->dev->name);
2521 return res;
2522 } 2729 }
2523 } 2730 }
2524 2731
2525 if (is_valid_ether_addr(bssid)) 2732 return ieee80211_sta_set_ssid(sdata, ifsta->ssid, ifsta->ssid_len);
2526 ifsta->flags |= IEEE80211_STA_BSSID_SET;
2527 else
2528 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
2529
2530 return 0;
2531} 2733}
2532 2734
2533int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) 2735int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
@@ -2590,9 +2792,8 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2590 2792
2591 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) { 2793 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2592 ifsta = &sdata->u.sta; 2794 ifsta = &sdata->u.sta;
2593 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || 2795 if ((!(ifsta->flags & IEEE80211_STA_PREV_BSSID_SET)) ||
2594 (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) && 2796 !ieee80211_sta_active_ibss(sdata))
2595 !ieee80211_sta_active_ibss(sdata)))
2596 ieee80211_sta_find_ibss(sdata, ifsta); 2797 ieee80211_sta_find_ibss(sdata, ifsta);
2597 } 2798 }
2598 2799
@@ -2623,12 +2824,15 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
2623 struct ieee80211_local *local = 2824 struct ieee80211_local *local =
2624 container_of(work, struct ieee80211_local, 2825 container_of(work, struct ieee80211_local,
2625 dynamic_ps_enable_work); 2826 dynamic_ps_enable_work);
2827 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2626 2828
2627 if (local->hw.conf.flags & IEEE80211_CONF_PS) 2829 if (local->hw.conf.flags & IEEE80211_CONF_PS)
2628 return; 2830 return;
2629 2831
2630 local->hw.conf.flags |= IEEE80211_CONF_PS; 2832 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
2833 ieee80211_send_nullfunc(local, sdata, 1);
2631 2834
2835 local->hw.conf.flags |= IEEE80211_CONF_PS;
2632 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 2836 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2633} 2837}
2634 2838
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
new file mode 100644
index 00000000000..44525f51707
--- /dev/null
+++ b/net/mac80211/pm.c
@@ -0,0 +1,117 @@
1#include <net/mac80211.h>
2#include <net/rtnetlink.h>
3
4#include "ieee80211_i.h"
5#include "led.h"
6
7int __ieee80211_suspend(struct ieee80211_hw *hw)
8{
9 struct ieee80211_local *local = hw_to_local(hw);
10 struct ieee80211_sub_if_data *sdata;
11 struct ieee80211_if_init_conf conf;
12 struct sta_info *sta;
13
14 flush_workqueue(local->hw.workqueue);
15
16 /* disable keys */
17 list_for_each_entry(sdata, &local->interfaces, list)
18 ieee80211_disable_keys(sdata);
19
20 /* remove STAs */
21 list_for_each_entry(sta, &local->sta_list, list) {
22
23 if (local->ops->sta_notify) {
24 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
25 sdata = container_of(sdata->bss,
26 struct ieee80211_sub_if_data,
27 u.ap);
28
29 local->ops->sta_notify(hw, &sdata->vif,
30 STA_NOTIFY_REMOVE, &sta->sta);
31 }
32 }
33
34 /* remove all interfaces */
35 list_for_each_entry(sdata, &local->interfaces, list) {
36
37 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
38 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
39 netif_running(sdata->dev)) {
40 conf.vif = &sdata->vif;
41 conf.type = sdata->vif.type;
42 conf.mac_addr = sdata->dev->dev_addr;
43 local->ops->remove_interface(hw, &conf);
44 }
45 }
46
47 /* flush again, in case driver queued work */
48 flush_workqueue(local->hw.workqueue);
49
50 /* stop hardware */
51 if (local->open_count) {
52 ieee80211_led_radio(local, false);
53 local->ops->stop(hw);
54 }
55 return 0;
56}
57
58int __ieee80211_resume(struct ieee80211_hw *hw)
59{
60 struct ieee80211_local *local = hw_to_local(hw);
61 struct ieee80211_sub_if_data *sdata;
62 struct ieee80211_if_init_conf conf;
63 struct sta_info *sta;
64 int res;
65
66 /* restart hardware */
67 if (local->open_count) {
68 res = local->ops->start(hw);
69
70 ieee80211_led_radio(local, hw->conf.radio_enabled);
71 }
72
73 /* add interfaces */
74 list_for_each_entry(sdata, &local->interfaces, list) {
75
76 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
77 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
78 netif_running(sdata->dev)) {
79 conf.vif = &sdata->vif;
80 conf.type = sdata->vif.type;
81 conf.mac_addr = sdata->dev->dev_addr;
82 res = local->ops->add_interface(hw, &conf);
83 }
84 }
85
86 /* add STAs back */
87 list_for_each_entry(sta, &local->sta_list, list) {
88
89 if (local->ops->sta_notify) {
90 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
91 sdata = container_of(sdata->bss,
92 struct ieee80211_sub_if_data,
93 u.ap);
94
95 local->ops->sta_notify(hw, &sdata->vif,
96 STA_NOTIFY_ADD, &sta->sta);
97 }
98 }
99
100 /* add back keys */
101 list_for_each_entry(sdata, &local->interfaces, list)
102 if (netif_running(sdata->dev))
103 ieee80211_enable_keys(sdata);
104
105 /* setup RTS threshold */
106 if (local->ops->set_rts_threshold)
107 local->ops->set_rts_threshold(hw, local->rts_threshold);
108
109 /* reconfigure hardware */
110 ieee80211_hw_config(local, ~0);
111
112 netif_addr_lock_bh(local->mdev);
113 ieee80211_configure_filter(local);
114 netif_addr_unlock_bh(local->mdev);
115
116 return 0;
117}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7175ae80c36..1327d424bf3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -86,8 +86,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
86 86
87 if (status->flag & RX_FLAG_TSFT) 87 if (status->flag & RX_FLAG_TSFT)
88 len += 8; 88 len += 8;
89 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB || 89 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
91 len += 1; 90 len += 1;
92 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) 91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
93 len += 1; 92 len += 1;
@@ -102,7 +101,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
102 return len; 101 return len;
103} 102}
104 103
105/** 104/*
106 * ieee80211_add_rx_radiotap_header - add radiotap header 105 * ieee80211_add_rx_radiotap_header - add radiotap header
107 * 106 *
108 * add a radiotap header containing all the fields which the hardware provided. 107 * add a radiotap header containing all the fields which the hardware provided.
@@ -158,7 +157,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
158 */ 157 */
159 *pos = 0; 158 *pos = 0;
160 } else { 159 } else {
161 rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE); 160 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
162 *pos = rate->bitrate / 5; 161 *pos = rate->bitrate / 5;
163 } 162 }
164 pos++; 163 pos++;
@@ -199,14 +198,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
199 *pos = status->antenna; 198 *pos = status->antenna;
200 pos++; 199 pos++;
201 200
202 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
203 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
204 *pos = status->signal;
205 rthdr->it_present |=
206 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
207 pos++;
208 }
209
210 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 201 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
211 202
212 /* IEEE80211_RADIOTAP_RX_FLAGS */ 203 /* IEEE80211_RADIOTAP_RX_FLAGS */
@@ -371,39 +362,50 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
371 rx->skb->priority = (tid > 7) ? 0 : tid; 362 rx->skb->priority = (tid > 7) ? 0 : tid;
372} 363}
373 364
374static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) 365/**
366 * DOC: Packet alignment
367 *
368 * Drivers always need to pass packets that are aligned to two-byte boundaries
369 * to the stack.
370 *
371 * Additionally, should, if possible, align the payload data in a way that
372 * guarantees that the contained IP header is aligned to a four-byte
373 * boundary. In the case of regular frames, this simply means aligning the
374 * payload to a four-byte boundary (because either the IP header is directly
375 * contained, or IV/RFC1042 headers that have a length divisible by four are
376 * in front of it).
377 *
378 * With A-MSDU frames, however, the payload data address must yield two modulo
379 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
380 * push the IP header further back to a multiple of four again. Thankfully, the
381 * specs were sane enough this time around to require padding each A-MSDU
382 * subframe to a length that is a multiple of four.
383 *
384 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
385 * the payload is not supported, the driver is required to move the 802.11
386 * header to be directly in front of the payload in that case.
387 */
388static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
375{ 389{
376#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
377 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 390 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
378 int hdrlen; 391 int hdrlen;
379 392
393#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
394 return;
395#endif
396
397 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
398 "unaligned packet at 0x%p\n", rx->skb->data))
399 return;
400
380 if (!ieee80211_is_data_present(hdr->frame_control)) 401 if (!ieee80211_is_data_present(hdr->frame_control))
381 return; 402 return;
382 403
383 /*
384 * Drivers are required to align the payload data in a way that
385 * guarantees that the contained IP header is aligned to a four-
386 * byte boundary. In the case of regular frames, this simply means
387 * aligning the payload to a four-byte boundary (because either
388 * the IP header is directly contained, or IV/RFC1042 headers that
389 * have a length divisible by four are in front of it.
390 *
391 * With A-MSDU frames, however, the payload data address must
392 * yield two modulo four because there are 14-byte 802.3 headers
393 * within the A-MSDU frames that push the IP header further back
394 * to a multiple of four again. Thankfully, the specs were sane
395 * enough this time around to require padding each A-MSDU subframe
396 * to a length that is a multiple of four.
397 *
398 * Padding like atheros hardware adds which is inbetween the 802.11
399 * header and the payload is not supported, the driver is required
400 * to move the 802.11 header further back in that case.
401 */
402 hdrlen = ieee80211_hdrlen(hdr->frame_control); 404 hdrlen = ieee80211_hdrlen(hdr->frame_control);
403 if (rx->flags & IEEE80211_RX_AMSDU) 405 if (rx->flags & IEEE80211_RX_AMSDU)
404 hdrlen += ETH_HLEN; 406 hdrlen += ETH_HLEN;
405 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); 407 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
406#endif 408 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
407} 409}
408 410
409 411
@@ -435,6 +437,52 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
435 return RX_CONTINUE; 437 return RX_CONTINUE;
436} 438}
437 439
440
441static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
442{
443 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
444
445 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
446 return 0;
447
448 return ieee80211_is_robust_mgmt_frame(hdr);
449}
450
451
452static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
453{
454 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
455
456 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
457 return 0;
458
459 return ieee80211_is_robust_mgmt_frame(hdr);
460}
461
462
463/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
464static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
465{
466 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
467 struct ieee80211_mmie *mmie;
468
469 if (skb->len < 24 + sizeof(*mmie) ||
470 !is_multicast_ether_addr(hdr->da))
471 return -1;
472
473 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
474 return -1; /* not a robust management frame */
475
476 mmie = (struct ieee80211_mmie *)
477 (skb->data + skb->len - sizeof(*mmie));
478 if (mmie->element_id != WLAN_EID_MMIE ||
479 mmie->length != sizeof(*mmie) - 2)
480 return -1;
481
482 return le16_to_cpu(mmie->key_id);
483}
484
485
438static ieee80211_rx_result 486static ieee80211_rx_result
439ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 487ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
440{ 488{
@@ -550,21 +598,23 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
550 int hdrlen; 598 int hdrlen;
551 ieee80211_rx_result result = RX_DROP_UNUSABLE; 599 ieee80211_rx_result result = RX_DROP_UNUSABLE;
552 struct ieee80211_key *stakey = NULL; 600 struct ieee80211_key *stakey = NULL;
601 int mmie_keyidx = -1;
553 602
554 /* 603 /*
555 * Key selection 101 604 * Key selection 101
556 * 605 *
557 * There are three types of keys: 606 * There are four types of keys:
558 * - GTK (group keys) 607 * - GTK (group keys)
608 * - IGTK (group keys for management frames)
559 * - PTK (pairwise keys) 609 * - PTK (pairwise keys)
560 * - STK (station-to-station pairwise keys) 610 * - STK (station-to-station pairwise keys)
561 * 611 *
562 * When selecting a key, we have to distinguish between multicast 612 * When selecting a key, we have to distinguish between multicast
563 * (including broadcast) and unicast frames, the latter can only 613 * (including broadcast) and unicast frames, the latter can only
564 * use PTKs and STKs while the former always use GTKs. Unless, of 614 * use PTKs and STKs while the former always use GTKs and IGTKs.
565 * course, actual WEP keys ("pre-RSNA") are used, then unicast 615 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
566 * frames can also use key indizes like GTKs. Hence, if we don't 616 * unicast frames can also use key indices like GTKs. Hence, if we
567 * have a PTK/STK we check the key index for a WEP key. 617 * don't have a PTK/STK we check the key index for a WEP key.
568 * 618 *
569 * Note that in a regular BSS, multicast frames are sent by the 619 * Note that in a regular BSS, multicast frames are sent by the
570 * AP only, associated stations unicast the frame to the AP first 620 * AP only, associated stations unicast the frame to the AP first
@@ -577,8 +627,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
577 * possible. 627 * possible.
578 */ 628 */
579 629
580 if (!ieee80211_has_protected(hdr->frame_control)) 630 if (!ieee80211_has_protected(hdr->frame_control)) {
581 return RX_CONTINUE; 631 if (!ieee80211_is_mgmt(hdr->frame_control) ||
632 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
633 return RX_CONTINUE;
634 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
635 if (mmie_keyidx < 0)
636 return RX_CONTINUE;
637 }
582 638
583 /* 639 /*
584 * No point in finding a key and decrypting if the frame is neither 640 * No point in finding a key and decrypting if the frame is neither
@@ -592,6 +648,16 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
592 648
593 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 649 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
594 rx->key = stakey; 650 rx->key = stakey;
651 } else if (mmie_keyidx >= 0) {
652 /* Broadcast/multicast robust management frame / BIP */
653 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
654 (rx->status->flag & RX_FLAG_IV_STRIPPED))
655 return RX_CONTINUE;
656
657 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
658 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
659 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
660 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
595 } else { 661 } else {
596 /* 662 /*
597 * The device doesn't give us the IV so we won't be 663 * The device doesn't give us the IV so we won't be
@@ -654,6 +720,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
654 case ALG_CCMP: 720 case ALG_CCMP:
655 result = ieee80211_crypto_ccmp_decrypt(rx); 721 result = ieee80211_crypto_ccmp_decrypt(rx);
656 break; 722 break;
723 case ALG_AES_CMAC:
724 result = ieee80211_crypto_aes_cmac_decrypt(rx);
725 break;
657 } 726 }
658 727
659 /* either the frame has been decrypted or will be dropped */ 728 /* either the frame has been decrypted or will be dropped */
@@ -662,6 +731,39 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
662 return result; 731 return result;
663} 732}
664 733
734static ieee80211_rx_result debug_noinline
735ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
736{
737 struct ieee80211_local *local;
738 struct ieee80211_hdr *hdr;
739 struct sk_buff *skb;
740
741 local = rx->local;
742 skb = rx->skb;
743 hdr = (struct ieee80211_hdr *) skb->data;
744
745 if (!local->pspolling)
746 return RX_CONTINUE;
747
748 if (!ieee80211_has_fromds(hdr->frame_control))
749 /* this is not from AP */
750 return RX_CONTINUE;
751
752 if (!ieee80211_is_data(hdr->frame_control))
753 return RX_CONTINUE;
754
755 if (!ieee80211_has_moredata(hdr->frame_control)) {
756 /* AP has no more frames buffered for us */
757 local->pspolling = false;
758 return RX_CONTINUE;
759 }
760
761 /* more data bit is set, let's request a new frame from the AP */
762 ieee80211_send_pspoll(local, rx->sdata);
763
764 return RX_CONTINUE;
765}
766
665static void ap_sta_ps_start(struct sta_info *sta) 767static void ap_sta_ps_start(struct sta_info *sta)
666{ 768{
667 struct ieee80211_sub_if_data *sdata = sta->sdata; 769 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -1101,6 +1203,15 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1101 /* Drop unencrypted frames if key is set. */ 1203 /* Drop unencrypted frames if key is set. */
1102 if (unlikely(!ieee80211_has_protected(fc) && 1204 if (unlikely(!ieee80211_has_protected(fc) &&
1103 !ieee80211_is_nullfunc(fc) && 1205 !ieee80211_is_nullfunc(fc) &&
1206 (!ieee80211_is_mgmt(fc) ||
1207 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1208 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1209 (rx->key || rx->sdata->drop_unencrypted)))
1210 return -EACCES;
1211 /* BIP does not use Protected field, so need to check MMIE */
1212 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1213 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1214 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1104 (rx->key || rx->sdata->drop_unencrypted))) 1215 (rx->key || rx->sdata->drop_unencrypted)))
1105 return -EACCES; 1216 return -EACCES;
1106 1217
@@ -1138,12 +1249,12 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1138 1249
1139 switch (hdr->frame_control & 1250 switch (hdr->frame_control &
1140 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 1251 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1141 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS): 1252 case cpu_to_le16(IEEE80211_FCTL_TODS):
1142 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP && 1253 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1143 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) 1254 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1144 return -1; 1255 return -1;
1145 break; 1256 break;
1146 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1257 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1147 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS && 1258 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1148 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)) 1259 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1149 return -1; 1260 return -1;
@@ -1157,13 +1268,13 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1157 } 1268 }
1158 } 1269 }
1159 break; 1270 break;
1160 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS): 1271 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1161 if (sdata->vif.type != NL80211_IFTYPE_STATION || 1272 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1162 (is_multicast_ether_addr(dst) && 1273 (is_multicast_ether_addr(dst) &&
1163 !compare_ether_addr(src, dev->dev_addr))) 1274 !compare_ether_addr(src, dev->dev_addr)))
1164 return -1; 1275 return -1;
1165 break; 1276 break;
1166 case __constant_cpu_to_le16(0): 1277 case cpu_to_le16(0):
1167 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 1278 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1168 return -1; 1279 return -1;
1169 break; 1280 break;
@@ -1267,10 +1378,37 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1267 } 1378 }
1268 1379
1269 if (skb) { 1380 if (skb) {
1270 /* deliver to local stack */ 1381 int align __maybe_unused;
1271 skb->protocol = eth_type_trans(skb, dev); 1382
1272 memset(skb->cb, 0, sizeof(skb->cb)); 1383#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1273 netif_rx(skb); 1384 /*
1385 * 'align' will only take the values 0 or 2 here
1386 * since all frames are required to be aligned
1387 * to 2-byte boundaries when being passed to
1388 * mac80211. That also explains the __skb_push()
1389 * below.
1390 */
1391 align = (unsigned long)skb->data & 4;
1392 if (align) {
1393 if (WARN_ON(skb_headroom(skb) < 3)) {
1394 dev_kfree_skb(skb);
1395 skb = NULL;
1396 } else {
1397 u8 *data = skb->data;
1398 size_t len = skb->len;
1399 u8 *new = __skb_push(skb, align);
1400 memmove(new, data, len);
1401 __skb_trim(skb, len);
1402 }
1403 }
1404#endif
1405
1406 if (skb) {
1407 /* deliver to local stack */
1408 skb->protocol = eth_type_trans(skb, dev);
1409 memset(skb->cb, 0, sizeof(skb->cb));
1410 netif_rx(skb);
1411 }
1274 } 1412 }
1275 1413
1276 if (xmit_skb) { 1414 if (xmit_skb) {
@@ -1339,14 +1477,20 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1339 if (remaining <= subframe_len + padding) 1477 if (remaining <= subframe_len + padding)
1340 frame = skb; 1478 frame = skb;
1341 else { 1479 else {
1342 frame = dev_alloc_skb(local->hw.extra_tx_headroom + 1480 /*
1343 subframe_len); 1481 * Allocate and reserve two bytes more for payload
1482 * alignment since sizeof(struct ethhdr) is 14.
1483 */
1484 frame = dev_alloc_skb(
1485 ALIGN(local->hw.extra_tx_headroom, 4) +
1486 subframe_len + 2);
1344 1487
1345 if (frame == NULL) 1488 if (frame == NULL)
1346 return RX_DROP_UNUSABLE; 1489 return RX_DROP_UNUSABLE;
1347 1490
1348 skb_reserve(frame, local->hw.extra_tx_headroom + 1491 skb_reserve(frame,
1349 sizeof(struct ethhdr)); 1492 ALIGN(local->hw.extra_tx_headroom, 4) +
1493 sizeof(struct ethhdr) + 2);
1350 memcpy(skb_put(frame, ntohs(len)), skb->data, 1494 memcpy(skb_put(frame, ntohs(len)), skb->data,
1351 ntohs(len)); 1495 ntohs(len));
1352 1496
@@ -1529,11 +1673,9 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1529 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1673 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1530 1674
1531 /* reset session timer */ 1675 /* reset session timer */
1532 if (tid_agg_rx->timeout) { 1676 if (tid_agg_rx->timeout)
1533 unsigned long expires = 1677 mod_timer(&tid_agg_rx->session_timer,
1534 jiffies + (tid_agg_rx->timeout / 1000) * HZ; 1678 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1535 mod_timer(&tid_agg_rx->session_timer, expires);
1536 }
1537 1679
1538 /* manage reordering buffer according to requested */ 1680 /* manage reordering buffer according to requested */
1539 /* sequence number */ 1681 /* sequence number */
@@ -1547,12 +1689,65 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1547 return RX_CONTINUE; 1689 return RX_CONTINUE;
1548} 1690}
1549 1691
1692static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1693 struct ieee80211_mgmt *mgmt,
1694 size_t len)
1695{
1696 struct ieee80211_local *local = sdata->local;
1697 struct sk_buff *skb;
1698 struct ieee80211_mgmt *resp;
1699
1700 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1701 /* Not to own unicast address */
1702 return;
1703 }
1704
1705 if (compare_ether_addr(mgmt->sa, sdata->u.sta.bssid) != 0 ||
1706 compare_ether_addr(mgmt->bssid, sdata->u.sta.bssid) != 0) {
1707 /* Not from the current AP. */
1708 return;
1709 }
1710
1711 if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATE) {
1712 /* Association in progress; ignore SA Query */
1713 return;
1714 }
1715
1716 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1717 /* Too short SA Query request frame */
1718 return;
1719 }
1720
1721 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1722 if (skb == NULL)
1723 return;
1724
1725 skb_reserve(skb, local->hw.extra_tx_headroom);
1726 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1727 memset(resp, 0, 24);
1728 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1729 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1730 memcpy(resp->bssid, sdata->u.sta.bssid, ETH_ALEN);
1731 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1732 IEEE80211_STYPE_ACTION);
1733 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1734 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1735 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1736 memcpy(resp->u.action.u.sa_query.trans_id,
1737 mgmt->u.action.u.sa_query.trans_id,
1738 WLAN_SA_QUERY_TR_ID_LEN);
1739
1740 ieee80211_tx_skb(sdata, skb, 1);
1741}
1742
1550static ieee80211_rx_result debug_noinline 1743static ieee80211_rx_result debug_noinline
1551ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 1744ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1552{ 1745{
1553 struct ieee80211_local *local = rx->local; 1746 struct ieee80211_local *local = rx->local;
1554 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1747 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1748 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1555 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1749 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1750 struct ieee80211_bss *bss;
1556 int len = rx->skb->len; 1751 int len = rx->skb->len;
1557 1752
1558 if (!ieee80211_is_action(mgmt->frame_control)) 1753 if (!ieee80211_is_action(mgmt->frame_control))
@@ -1564,12 +1759,26 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1564 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1759 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1565 return RX_DROP_MONITOR; 1760 return RX_DROP_MONITOR;
1566 1761
1762 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1763 return RX_DROP_MONITOR;
1764
1567 /* all categories we currently handle have action_code */ 1765 /* all categories we currently handle have action_code */
1568 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1766 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1569 return RX_DROP_MONITOR; 1767 return RX_DROP_MONITOR;
1570 1768
1571 switch (mgmt->u.action.category) { 1769 switch (mgmt->u.action.category) {
1572 case WLAN_CATEGORY_BACK: 1770 case WLAN_CATEGORY_BACK:
1771 /*
1772 * The aggregation code is not prepared to handle
1773 * anything but STA/AP due to the BSSID handling;
1774 * IBSS could work in the code but isn't supported
1775 * by drivers or the standard.
1776 */
1777 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1778 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1779 sdata->vif.type != NL80211_IFTYPE_AP)
1780 return RX_DROP_MONITOR;
1781
1573 switch (mgmt->u.action.u.addba_req.action_code) { 1782 switch (mgmt->u.action.u.addba_req.action_code) {
1574 case WLAN_ACTION_ADDBA_REQ: 1783 case WLAN_ACTION_ADDBA_REQ:
1575 if (len < (IEEE80211_MIN_ACTION_SIZE + 1784 if (len < (IEEE80211_MIN_ACTION_SIZE +
@@ -1601,6 +1810,42 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1601 return RX_DROP_MONITOR; 1810 return RX_DROP_MONITOR;
1602 ieee80211_process_measurement_req(sdata, mgmt, len); 1811 ieee80211_process_measurement_req(sdata, mgmt, len);
1603 break; 1812 break;
1813 case WLAN_ACTION_SPCT_CHL_SWITCH:
1814 if (len < (IEEE80211_MIN_ACTION_SIZE +
1815 sizeof(mgmt->u.action.u.chan_switch)))
1816 return RX_DROP_MONITOR;
1817
1818 if (memcmp(mgmt->bssid, ifsta->bssid, ETH_ALEN) != 0)
1819 return RX_DROP_MONITOR;
1820
1821 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1822 local->hw.conf.channel->center_freq,
1823 ifsta->ssid, ifsta->ssid_len);
1824 if (!bss)
1825 return RX_DROP_MONITOR;
1826
1827 ieee80211_process_chanswitch(sdata,
1828 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1829 ieee80211_rx_bss_put(local, bss);
1830 break;
1831 }
1832 break;
1833 case WLAN_CATEGORY_SA_QUERY:
1834 if (len < (IEEE80211_MIN_ACTION_SIZE +
1835 sizeof(mgmt->u.action.u.sa_query)))
1836 return RX_DROP_MONITOR;
1837 switch (mgmt->u.action.u.sa_query.action) {
1838 case WLAN_ACTION_SA_QUERY_REQUEST:
1839 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1840 return RX_DROP_MONITOR;
1841 ieee80211_process_sa_query_req(sdata, mgmt, len);
1842 break;
1843 case WLAN_ACTION_SA_QUERY_RESPONSE:
1844 /*
1845 * SA Query response is currently only used in AP mode
1846 * and it is processed in user space.
1847 */
1848 return RX_CONTINUE;
1604 } 1849 }
1605 break; 1850 break;
1606 default: 1851 default:
@@ -1616,10 +1861,14 @@ static ieee80211_rx_result debug_noinline
1616ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1861ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1617{ 1862{
1618 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1863 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1864 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1619 1865
1620 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1866 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1621 return RX_DROP_MONITOR; 1867 return RX_DROP_MONITOR;
1622 1868
1869 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1870 return RX_DROP_MONITOR;
1871
1623 if (ieee80211_vif_is_mesh(&sdata->vif)) 1872 if (ieee80211_vif_is_mesh(&sdata->vif))
1624 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); 1873 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1625 1874
@@ -1780,6 +2029,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1780 CALL_RXH(ieee80211_rx_h_passive_scan) 2029 CALL_RXH(ieee80211_rx_h_passive_scan)
1781 CALL_RXH(ieee80211_rx_h_check) 2030 CALL_RXH(ieee80211_rx_h_check)
1782 CALL_RXH(ieee80211_rx_h_decrypt) 2031 CALL_RXH(ieee80211_rx_h_decrypt)
2032 CALL_RXH(ieee80211_rx_h_check_more_data)
1783 CALL_RXH(ieee80211_rx_h_sta_process) 2033 CALL_RXH(ieee80211_rx_h_sta_process)
1784 CALL_RXH(ieee80211_rx_h_defragment) 2034 CALL_RXH(ieee80211_rx_h_defragment)
1785 CALL_RXH(ieee80211_rx_h_ps_poll) 2035 CALL_RXH(ieee80211_rx_h_ps_poll)
@@ -1823,9 +2073,10 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1823/* main receive path */ 2073/* main receive path */
1824 2074
1825static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, 2075static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1826 u8 *bssid, struct ieee80211_rx_data *rx, 2076 struct ieee80211_rx_data *rx,
1827 struct ieee80211_hdr *hdr) 2077 struct ieee80211_hdr *hdr)
1828{ 2078{
2079 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
1829 int multicast = is_multicast_ether_addr(hdr->addr1); 2080 int multicast = is_multicast_ether_addr(hdr->addr1);
1830 2081
1831 switch (sdata->vif.type) { 2082 switch (sdata->vif.type) {
@@ -1928,7 +2179,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1928 int prepares; 2179 int prepares;
1929 struct ieee80211_sub_if_data *prev = NULL; 2180 struct ieee80211_sub_if_data *prev = NULL;
1930 struct sk_buff *skb_new; 2181 struct sk_buff *skb_new;
1931 u8 *bssid;
1932 2182
1933 hdr = (struct ieee80211_hdr *)skb->data; 2183 hdr = (struct ieee80211_hdr *)skb->data;
1934 memset(&rx, 0, sizeof(rx)); 2184 memset(&rx, 0, sizeof(rx));
@@ -1956,7 +2206,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1956 rx.flags |= IEEE80211_RX_IN_SCAN; 2206 rx.flags |= IEEE80211_RX_IN_SCAN;
1957 2207
1958 ieee80211_parse_qos(&rx); 2208 ieee80211_parse_qos(&rx);
1959 ieee80211_verify_ip_alignment(&rx); 2209 ieee80211_verify_alignment(&rx);
1960 2210
1961 skb = rx.skb; 2211 skb = rx.skb;
1962 2212
@@ -1967,9 +2217,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1967 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) 2217 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
1968 continue; 2218 continue;
1969 2219
1970 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1971 rx.flags |= IEEE80211_RX_RA_MATCH; 2220 rx.flags |= IEEE80211_RX_RA_MATCH;
1972 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); 2221 prepares = prepare_for_handlers(sdata, &rx, hdr);
1973 2222
1974 if (!prepares) 2223 if (!prepares)
1975 continue; 2224 continue;
@@ -2174,11 +2423,9 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2174 /* new un-ordered ampdu frame - process it */ 2423 /* new un-ordered ampdu frame - process it */
2175 2424
2176 /* reset session timer */ 2425 /* reset session timer */
2177 if (tid_agg_rx->timeout) { 2426 if (tid_agg_rx->timeout)
2178 unsigned long expires = 2427 mod_timer(&tid_agg_rx->session_timer,
2179 jiffies + (tid_agg_rx->timeout / 1000) * HZ; 2428 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2180 mod_timer(&tid_agg_rx->session_timer, expires);
2181 }
2182 2429
2183 /* if this mpdu is fragmented - terminate rx aggregation session */ 2430 /* if this mpdu is fragmented - terminate rx aggregation session */
2184 sc = le16_to_cpu(hdr->seq_ctrl); 2431 sc = le16_to_cpu(hdr->seq_ctrl);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f5c7c337192..f883ab9f1e6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,14 +12,11 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15/* TODO: 15/* TODO: figure out how to avoid that the "current BSS" expires */
16 * order BSS list by RSSI(?) ("quality of AP")
17 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
18 * SSID)
19 */
20 16
21#include <linux/wireless.h> 17#include <linux/wireless.h>
22#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <linux/rtnetlink.h>
23#include <net/mac80211.h> 20#include <net/mac80211.h>
24#include <net/iw_handler.h> 21#include <net/iw_handler.h>
25 22
@@ -30,192 +27,29 @@
30#define IEEE80211_CHANNEL_TIME (HZ / 33) 27#define IEEE80211_CHANNEL_TIME (HZ / 33)
31#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) 28#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
32 29
33void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
34{
35 spin_lock_init(&local->bss_lock);
36 INIT_LIST_HEAD(&local->bss_list);
37}
38
39void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
40{
41 struct ieee80211_bss *bss, *tmp;
42
43 list_for_each_entry_safe(bss, tmp, &local->bss_list, list)
44 ieee80211_rx_bss_put(local, bss);
45}
46
47struct ieee80211_bss * 30struct ieee80211_bss *
48ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 31ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
49 u8 *ssid, u8 ssid_len) 32 u8 *ssid, u8 ssid_len)
50{ 33{
51 struct ieee80211_bss *bss; 34 return (void *)cfg80211_get_bss(local->hw.wiphy,
52 35 ieee80211_get_channel(local->hw.wiphy,
53 spin_lock_bh(&local->bss_lock); 36 freq),
54 bss = local->bss_hash[STA_HASH(bssid)]; 37 bssid, ssid, ssid_len,
55 while (bss) { 38 0, 0);
56 if (!bss_mesh_cfg(bss) &&
57 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
58 bss->freq == freq &&
59 bss->ssid_len == ssid_len &&
60 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
61 atomic_inc(&bss->users);
62 break;
63 }
64 bss = bss->hnext;
65 }
66 spin_unlock_bh(&local->bss_lock);
67 return bss;
68}
69
70/* Caller must hold local->bss_lock */
71static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local,
72 struct ieee80211_bss *bss)
73{
74 u8 hash_idx;
75
76 if (bss_mesh_cfg(bss))
77 hash_idx = mesh_id_hash(bss_mesh_id(bss),
78 bss_mesh_id_len(bss));
79 else
80 hash_idx = STA_HASH(bss->bssid);
81
82 bss->hnext = local->bss_hash[hash_idx];
83 local->bss_hash[hash_idx] = bss;
84}
85
86/* Caller must hold local->bss_lock */
87static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
88 struct ieee80211_bss *bss)
89{
90 struct ieee80211_bss *b, *prev = NULL;
91 b = local->bss_hash[STA_HASH(bss->bssid)];
92 while (b) {
93 if (b == bss) {
94 if (!prev)
95 local->bss_hash[STA_HASH(bss->bssid)] =
96 bss->hnext;
97 else
98 prev->hnext = bss->hnext;
99 break;
100 }
101 prev = b;
102 b = b->hnext;
103 }
104}
105
106struct ieee80211_bss *
107ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
108 u8 *ssid, u8 ssid_len)
109{
110 struct ieee80211_bss *bss;
111
112 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
113 if (!bss)
114 return NULL;
115 atomic_set(&bss->users, 2);
116 memcpy(bss->bssid, bssid, ETH_ALEN);
117 bss->freq = freq;
118 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
119 memcpy(bss->ssid, ssid, ssid_len);
120 bss->ssid_len = ssid_len;
121 }
122
123 spin_lock_bh(&local->bss_lock);
124 /* TODO: order by RSSI? */
125 list_add_tail(&bss->list, &local->bss_list);
126 __ieee80211_rx_bss_hash_add(local, bss);
127 spin_unlock_bh(&local->bss_lock);
128 return bss;
129}
130
131#ifdef CONFIG_MAC80211_MESH
132static struct ieee80211_bss *
133ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
134 u8 *mesh_cfg, int freq)
135{
136 struct ieee80211_bss *bss;
137
138 spin_lock_bh(&local->bss_lock);
139 bss = local->bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
140 while (bss) {
141 if (bss_mesh_cfg(bss) &&
142 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
143 bss->freq == freq &&
144 mesh_id_len == bss->mesh_id_len &&
145 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
146 mesh_id_len))) {
147 atomic_inc(&bss->users);
148 break;
149 }
150 bss = bss->hnext;
151 }
152 spin_unlock_bh(&local->bss_lock);
153 return bss;
154} 39}
155 40
156static struct ieee80211_bss * 41static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
157ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
158 u8 *mesh_cfg, int mesh_config_len, int freq)
159{ 42{
160 struct ieee80211_bss *bss; 43 struct ieee80211_bss *bss = (void *)cbss;
161
162 if (mesh_config_len != IEEE80211_MESH_CONFIG_LEN)
163 return NULL;
164
165 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
166 if (!bss)
167 return NULL;
168
169 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
170 if (!bss->mesh_cfg) {
171 kfree(bss);
172 return NULL;
173 }
174
175 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
176 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
177 if (!bss->mesh_id) {
178 kfree(bss->mesh_cfg);
179 kfree(bss);
180 return NULL;
181 }
182 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
183 }
184 44
185 atomic_set(&bss->users, 2);
186 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
187 bss->mesh_id_len = mesh_id_len;
188 bss->freq = freq;
189 spin_lock_bh(&local->bss_lock);
190 /* TODO: order by RSSI? */
191 list_add_tail(&bss->list, &local->bss_list);
192 __ieee80211_rx_bss_hash_add(local, bss);
193 spin_unlock_bh(&local->bss_lock);
194 return bss;
195}
196#endif
197
198static void ieee80211_rx_bss_free(struct ieee80211_bss *bss)
199{
200 kfree(bss->ies);
201 kfree(bss_mesh_id(bss)); 45 kfree(bss_mesh_id(bss));
202 kfree(bss_mesh_cfg(bss)); 46 kfree(bss_mesh_cfg(bss));
203 kfree(bss);
204} 47}
205 48
206void ieee80211_rx_bss_put(struct ieee80211_local *local, 49void ieee80211_rx_bss_put(struct ieee80211_local *local,
207 struct ieee80211_bss *bss) 50 struct ieee80211_bss *bss)
208{ 51{
209 local_bh_disable(); 52 cfg80211_put_bss((struct cfg80211_bss *)bss);
210 if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) {
211 local_bh_enable();
212 return;
213 }
214
215 __ieee80211_rx_bss_hash_del(local, bss);
216 list_del(&bss->list);
217 spin_unlock_bh(&local->bss_lock);
218 ieee80211_rx_bss_free(bss);
219} 53}
220 54
221struct ieee80211_bss * 55struct ieee80211_bss *
@@ -224,49 +58,37 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
224 struct ieee80211_mgmt *mgmt, 58 struct ieee80211_mgmt *mgmt,
225 size_t len, 59 size_t len,
226 struct ieee802_11_elems *elems, 60 struct ieee802_11_elems *elems,
227 int freq, bool beacon) 61 struct ieee80211_channel *channel,
62 bool beacon)
228{ 63{
229 struct ieee80211_bss *bss; 64 struct ieee80211_bss *bss;
230 int clen; 65 int clen;
231 66 enum cfg80211_signal_type sigtype = CFG80211_SIGNAL_TYPE_NONE;
232#ifdef CONFIG_MAC80211_MESH 67 s32 signal = 0;
233 if (elems->mesh_config) 68
234 bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id, 69 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
235 elems->mesh_id_len, elems->mesh_config, freq); 70 sigtype = CFG80211_SIGNAL_TYPE_MBM;
236 else 71 signal = rx_status->signal * 100;
237#endif 72 } else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
238 bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq, 73 sigtype = CFG80211_SIGNAL_TYPE_UNSPEC;
239 elems->ssid, elems->ssid_len); 74 signal = (rx_status->signal * 100) / local->hw.max_signal;
240 if (!bss) {
241#ifdef CONFIG_MAC80211_MESH
242 if (elems->mesh_config)
243 bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id,
244 elems->mesh_id_len, elems->mesh_config,
245 elems->mesh_config_len, freq);
246 else
247#endif
248 bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq,
249 elems->ssid, elems->ssid_len);
250 if (!bss)
251 return NULL;
252 } else {
253#if 0
254 /* TODO: order by RSSI? */
255 spin_lock_bh(&local->bss_lock);
256 list_move_tail(&bss->list, &local->bss_list);
257 spin_unlock_bh(&local->bss_lock);
258#endif
259 } 75 }
260 76
77 bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel,
78 mgmt, len, signal, sigtype,
79 GFP_ATOMIC);
80
81 if (!bss)
82 return NULL;
83
84 bss->cbss.free_priv = ieee80211_rx_bss_free;
85
261 /* save the ERP value so that it is available at association time */ 86 /* save the ERP value so that it is available at association time */
262 if (elems->erp_info && elems->erp_info_len >= 1) { 87 if (elems->erp_info && elems->erp_info_len >= 1) {
263 bss->erp_value = elems->erp_info[0]; 88 bss->erp_value = elems->erp_info[0];
264 bss->has_erp_value = 1; 89 bss->has_erp_value = 1;
265 } 90 }
266 91
267 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
268 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
269
270 if (elems->tim) { 92 if (elems->tim) {
271 struct ieee80211_tim_ie *tim_ie = 93 struct ieee80211_tim_ie *tim_ie =
272 (struct ieee80211_tim_ie *)elems->tim; 94 (struct ieee80211_tim_ie *)elems->tim;
@@ -295,37 +117,27 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
295 bss->supp_rates_len += clen; 117 bss->supp_rates_len += clen;
296 } 118 }
297 119
298 bss->band = rx_status->band;
299
300 bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
301 bss->last_update = jiffies;
302 bss->signal = rx_status->signal;
303 bss->noise = rx_status->noise;
304 bss->qual = rx_status->qual;
305 bss->wmm_used = elems->wmm_param || elems->wmm_info; 120 bss->wmm_used = elems->wmm_param || elems->wmm_info;
306 121
307 if (!beacon) 122 if (!beacon)
308 bss->last_probe_resp = jiffies; 123 bss->last_probe_resp = jiffies;
309 124
310 /*
311 * For probe responses, or if we don't have any information yet,
312 * use the IEs from the beacon.
313 */
314 if (!bss->ies || !beacon) {
315 if (bss->ies == NULL || bss->ies_len < elems->total_len) {
316 kfree(bss->ies);
317 bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
318 }
319 if (bss->ies) {
320 memcpy(bss->ies, elems->ie_start, elems->total_len);
321 bss->ies_len = elems->total_len;
322 } else
323 bss->ies_len = 0;
324 }
325
326 return bss; 125 return bss;
327} 126}
328 127
128void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid,
129 int freq, u8 *ssid, u8 ssid_len)
130{
131 struct ieee80211_bss *bss;
132 struct ieee80211_local *local = sdata->local;
133
134 bss = ieee80211_rx_bss_get(local, bssid, freq, ssid, ssid_len);
135 if (bss) {
136 cfg80211_unlink_bss(local->hw.wiphy, (void *)bss);
137 ieee80211_rx_bss_put(local, bss);
138 }
139}
140
329ieee80211_rx_result 141ieee80211_rx_result
330ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 142ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
331 struct ieee80211_rx_status *rx_status) 143 struct ieee80211_rx_status *rx_status)
@@ -387,7 +199,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
387 199
388 bss = ieee80211_bss_info_update(sdata->local, rx_status, 200 bss = ieee80211_bss_info_update(sdata->local, rx_status,
389 mgmt, skb->len, &elems, 201 mgmt, skb->len, &elems,
390 freq, beacon); 202 channel, beacon);
391 if (bss) 203 if (bss)
392 ieee80211_rx_bss_put(sdata->local, bss); 204 ieee80211_rx_bss_put(sdata->local, bss);
393 205
@@ -395,7 +207,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
395 return RX_QUEUED; 207 return RX_QUEUED;
396} 208}
397 209
398static void ieee80211_send_nullfunc(struct ieee80211_local *local, 210void ieee80211_send_nullfunc(struct ieee80211_local *local,
399 struct ieee80211_sub_if_data *sdata, 211 struct ieee80211_sub_if_data *sdata,
400 int powersave) 212 int powersave)
401{ 213{
@@ -425,26 +237,22 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
425 ieee80211_tx_skb(sdata, skb, 0); 237 ieee80211_tx_skb(sdata, skb, 0);
426} 238}
427 239
428void ieee80211_scan_completed(struct ieee80211_hw *hw) 240void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
429{ 241{
430 struct ieee80211_local *local = hw_to_local(hw); 242 struct ieee80211_local *local = hw_to_local(hw);
431 struct ieee80211_sub_if_data *sdata; 243 struct ieee80211_sub_if_data *sdata;
432 union iwreq_data wrqu;
433 244
434 if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) 245 if (WARN_ON(!local->hw_scanning && !local->sw_scanning))
435 return; 246 return;
436 247
437 local->last_scan_completed = jiffies; 248 if (WARN_ON(!local->scan_req))
438 memset(&wrqu, 0, sizeof(wrqu)); 249 return;
439 250
440 /* 251 if (local->scan_req != &local->int_scan_req)
441 * local->scan_sdata could have been NULLed by the interface 252 cfg80211_scan_done(local->scan_req, aborted);
442 * down code in case we were scanning on an interface that is 253 local->scan_req = NULL;
443 * being taken down. 254
444 */ 255 local->last_scan_completed = jiffies;
445 sdata = local->scan_sdata;
446 if (sdata)
447 wireless_send_event(sdata->dev, SIOCGIWSCAN, &wrqu, NULL);
448 256
449 if (local->hw_scanning) { 257 if (local->hw_scanning) {
450 local->hw_scanning = false; 258 local->hw_scanning = false;
@@ -472,8 +280,11 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
472 netif_addr_unlock(local->mdev); 280 netif_addr_unlock(local->mdev);
473 netif_tx_unlock_bh(local->mdev); 281 netif_tx_unlock_bh(local->mdev);
474 282
475 rcu_read_lock(); 283 mutex_lock(&local->iflist_mtx);
476 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 284 list_for_each_entry(sdata, &local->interfaces, list) {
285 if (!netif_running(sdata->dev))
286 continue;
287
477 /* Tell AP we're back */ 288 /* Tell AP we're back */
478 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 289 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
479 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { 290 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
@@ -482,8 +293,15 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
482 } 293 }
483 } else 294 } else
484 netif_tx_wake_all_queues(sdata->dev); 295 netif_tx_wake_all_queues(sdata->dev);
296
297 /* re-enable beaconing */
298 if (sdata->vif.type == NL80211_IFTYPE_AP ||
299 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
300 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
301 ieee80211_if_config(sdata,
302 IEEE80211_IFCC_BEACON_ENABLED);
485 } 303 }
486 rcu_read_unlock(); 304 mutex_unlock(&local->iflist_mtx);
487 305
488 done: 306 done:
489 ieee80211_mlme_notify_scan_completed(local); 307 ieee80211_mlme_notify_scan_completed(local);
@@ -491,15 +309,13 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
491} 309}
492EXPORT_SYMBOL(ieee80211_scan_completed); 310EXPORT_SYMBOL(ieee80211_scan_completed);
493 311
494
495void ieee80211_scan_work(struct work_struct *work) 312void ieee80211_scan_work(struct work_struct *work)
496{ 313{
497 struct ieee80211_local *local = 314 struct ieee80211_local *local =
498 container_of(work, struct ieee80211_local, scan_work.work); 315 container_of(work, struct ieee80211_local, scan_work.work);
499 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 316 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
500 struct ieee80211_supported_band *sband;
501 struct ieee80211_channel *chan; 317 struct ieee80211_channel *chan;
502 int skip; 318 int skip, i;
503 unsigned long next_delay = 0; 319 unsigned long next_delay = 0;
504 320
505 /* 321 /*
@@ -510,33 +326,13 @@ void ieee80211_scan_work(struct work_struct *work)
510 326
511 switch (local->scan_state) { 327 switch (local->scan_state) {
512 case SCAN_SET_CHANNEL: 328 case SCAN_SET_CHANNEL:
513 /*
514 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
515 * after we successfully scanned the last channel of the last
516 * band (and the last band is supported by the hw)
517 */
518 if (local->scan_band < IEEE80211_NUM_BANDS)
519 sband = local->hw.wiphy->bands[local->scan_band];
520 else
521 sband = NULL;
522
523 /*
524 * If we are at an unsupported band and have more bands
525 * left to scan, advance to the next supported one.
526 */
527 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
528 local->scan_band++;
529 sband = local->hw.wiphy->bands[local->scan_band];
530 local->scan_channel_idx = 0;
531 }
532
533 /* if no more bands/channels left, complete scan */ 329 /* if no more bands/channels left, complete scan */
534 if (!sband || local->scan_channel_idx >= sband->n_channels) { 330 if (local->scan_channel_idx >= local->scan_req->n_channels) {
535 ieee80211_scan_completed(local_to_hw(local)); 331 ieee80211_scan_completed(local_to_hw(local), false);
536 return; 332 return;
537 } 333 }
538 skip = 0; 334 skip = 0;
539 chan = &sband->channels[local->scan_channel_idx]; 335 chan = local->scan_req->channels[local->scan_channel_idx];
540 336
541 if (chan->flags & IEEE80211_CHAN_DISABLED || 337 if (chan->flags & IEEE80211_CHAN_DISABLED ||
542 (sdata->vif.type == NL80211_IFTYPE_ADHOC && 338 (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -552,15 +348,6 @@ void ieee80211_scan_work(struct work_struct *work)
552 348
553 /* advance state machine to next channel/band */ 349 /* advance state machine to next channel/band */
554 local->scan_channel_idx++; 350 local->scan_channel_idx++;
555 if (local->scan_channel_idx >= sband->n_channels) {
556 /*
557 * scan_band may end up == IEEE80211_NUM_BANDS, but
558 * we'll catch that case above and complete the scan
559 * if that is the case.
560 */
561 local->scan_band++;
562 local->scan_channel_idx = 0;
563 }
564 351
565 if (skip) 352 if (skip)
566 break; 353 break;
@@ -573,10 +360,14 @@ void ieee80211_scan_work(struct work_struct *work)
573 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; 360 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
574 local->scan_state = SCAN_SET_CHANNEL; 361 local->scan_state = SCAN_SET_CHANNEL;
575 362
576 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) 363 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
364 !local->scan_req->n_ssids)
577 break; 365 break;
578 ieee80211_send_probe_req(sdata, NULL, local->scan_ssid, 366 for (i = 0; i < local->scan_req->n_ssids; i++)
579 local->scan_ssid_len); 367 ieee80211_send_probe_req(
368 sdata, NULL,
369 local->scan_req->ssids[i].ssid,
370 local->scan_req->ssids[i].ssid_len);
580 next_delay = IEEE80211_CHANNEL_TIME; 371 next_delay = IEEE80211_CHANNEL_TIME;
581 break; 372 break;
582 } 373 }
@@ -587,14 +378,19 @@ void ieee80211_scan_work(struct work_struct *work)
587 378
588 379
589int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, 380int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
590 u8 *ssid, size_t ssid_len) 381 struct cfg80211_scan_request *req)
591{ 382{
592 struct ieee80211_local *local = scan_sdata->local; 383 struct ieee80211_local *local = scan_sdata->local;
593 struct ieee80211_sub_if_data *sdata; 384 struct ieee80211_sub_if_data *sdata;
594 385
595 if (ssid_len > IEEE80211_MAX_SSID_LEN) 386 if (!req)
596 return -EINVAL; 387 return -EINVAL;
597 388
389 if (local->scan_req && local->scan_req != req)
390 return -EBUSY;
391
392 local->scan_req = req;
393
598 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) 394 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1)
599 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS 395 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
600 * BSSID: MACAddress 396 * BSSID: MACAddress
@@ -622,7 +418,7 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
622 int rc; 418 int rc;
623 419
624 local->hw_scanning = true; 420 local->hw_scanning = true;
625 rc = local->ops->hw_scan(local_to_hw(local), ssid, ssid_len); 421 rc = local->ops->hw_scan(local_to_hw(local), req);
626 if (rc) { 422 if (rc) {
627 local->hw_scanning = false; 423 local->hw_scanning = false;
628 return rc; 424 return rc;
@@ -633,8 +429,18 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
633 429
634 local->sw_scanning = true; 430 local->sw_scanning = true;
635 431
636 rcu_read_lock(); 432 mutex_lock(&local->iflist_mtx);
637 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 433 list_for_each_entry(sdata, &local->interfaces, list) {
434 if (!netif_running(sdata->dev))
435 continue;
436
437 /* disable beaconing */
438 if (sdata->vif.type == NL80211_IFTYPE_AP ||
439 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
440 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
441 ieee80211_if_config(sdata,
442 IEEE80211_IFCC_BEACON_ENABLED);
443
638 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 444 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
639 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { 445 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
640 netif_tx_stop_all_queues(sdata->dev); 446 netif_tx_stop_all_queues(sdata->dev);
@@ -643,17 +449,12 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
643 } else 449 } else
644 netif_tx_stop_all_queues(sdata->dev); 450 netif_tx_stop_all_queues(sdata->dev);
645 } 451 }
646 rcu_read_unlock(); 452 mutex_unlock(&local->iflist_mtx);
647 453
648 if (ssid) {
649 local->scan_ssid_len = ssid_len;
650 memcpy(local->scan_ssid, ssid, ssid_len);
651 } else
652 local->scan_ssid_len = 0;
653 local->scan_state = SCAN_SET_CHANNEL; 454 local->scan_state = SCAN_SET_CHANNEL;
654 local->scan_channel_idx = 0; 455 local->scan_channel_idx = 0;
655 local->scan_band = IEEE80211_BAND_2GHZ;
656 local->scan_sdata = scan_sdata; 456 local->scan_sdata = scan_sdata;
457 local->scan_req = req;
657 458
658 netif_addr_lock_bh(local->mdev); 459 netif_addr_lock_bh(local->mdev);
659 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; 460 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
@@ -673,13 +474,21 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
673 474
674 475
675int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 476int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
676 u8 *ssid, size_t ssid_len) 477 struct cfg80211_scan_request *req)
677{ 478{
678 struct ieee80211_local *local = sdata->local; 479 struct ieee80211_local *local = sdata->local;
679 struct ieee80211_if_sta *ifsta; 480 struct ieee80211_if_sta *ifsta;
680 481
482 if (!req)
483 return -EINVAL;
484
485 if (local->scan_req && local->scan_req != req)
486 return -EBUSY;
487
488 local->scan_req = req;
489
681 if (sdata->vif.type != NL80211_IFTYPE_STATION) 490 if (sdata->vif.type != NL80211_IFTYPE_STATION)
682 return ieee80211_start_scan(sdata, ssid, ssid_len); 491 return ieee80211_start_scan(sdata, req);
683 492
684 /* 493 /*
685 * STA has a state machine that might need to defer scanning 494 * STA has a state machine that might need to defer scanning
@@ -694,241 +503,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
694 } 503 }
695 504
696 ifsta = &sdata->u.sta; 505 ifsta = &sdata->u.sta;
697
698 ifsta->scan_ssid_len = ssid_len;
699 if (ssid_len)
700 memcpy(ifsta->scan_ssid, ssid, ssid_len);
701 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); 506 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
702 queue_work(local->hw.workqueue, &ifsta->work); 507 queue_work(local->hw.workqueue, &ifsta->work);
703 508
704 return 0; 509 return 0;
705} 510}
706
707
708static void ieee80211_scan_add_ies(struct iw_request_info *info,
709 struct ieee80211_bss *bss,
710 char **current_ev, char *end_buf)
711{
712 u8 *pos, *end, *next;
713 struct iw_event iwe;
714
715 if (bss == NULL || bss->ies == NULL)
716 return;
717
718 /*
719 * If needed, fragment the IEs buffer (at IE boundaries) into short
720 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
721 */
722 pos = bss->ies;
723 end = pos + bss->ies_len;
724
725 while (end - pos > IW_GENERIC_IE_MAX) {
726 next = pos + 2 + pos[1];
727 while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
728 next = next + 2 + next[1];
729
730 memset(&iwe, 0, sizeof(iwe));
731 iwe.cmd = IWEVGENIE;
732 iwe.u.data.length = next - pos;
733 *current_ev = iwe_stream_add_point(info, *current_ev,
734 end_buf, &iwe, pos);
735
736 pos = next;
737 }
738
739 if (end > pos) {
740 memset(&iwe, 0, sizeof(iwe));
741 iwe.cmd = IWEVGENIE;
742 iwe.u.data.length = end - pos;
743 *current_ev = iwe_stream_add_point(info, *current_ev,
744 end_buf, &iwe, pos);
745 }
746}
747
748
749static char *
750ieee80211_scan_result(struct ieee80211_local *local,
751 struct iw_request_info *info,
752 struct ieee80211_bss *bss,
753 char *current_ev, char *end_buf)
754{
755 struct iw_event iwe;
756 char *buf;
757
758 if (time_after(jiffies,
759 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
760 return current_ev;
761
762 memset(&iwe, 0, sizeof(iwe));
763 iwe.cmd = SIOCGIWAP;
764 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
765 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
766 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
767 IW_EV_ADDR_LEN);
768
769 memset(&iwe, 0, sizeof(iwe));
770 iwe.cmd = SIOCGIWESSID;
771 if (bss_mesh_cfg(bss)) {
772 iwe.u.data.length = bss_mesh_id_len(bss);
773 iwe.u.data.flags = 1;
774 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
775 &iwe, bss_mesh_id(bss));
776 } else {
777 iwe.u.data.length = bss->ssid_len;
778 iwe.u.data.flags = 1;
779 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
780 &iwe, bss->ssid);
781 }
782
783 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
784 || bss_mesh_cfg(bss)) {
785 memset(&iwe, 0, sizeof(iwe));
786 iwe.cmd = SIOCGIWMODE;
787 if (bss_mesh_cfg(bss))
788 iwe.u.mode = IW_MODE_MESH;
789 else if (bss->capability & WLAN_CAPABILITY_ESS)
790 iwe.u.mode = IW_MODE_MASTER;
791 else
792 iwe.u.mode = IW_MODE_ADHOC;
793 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
794 &iwe, IW_EV_UINT_LEN);
795 }
796
797 memset(&iwe, 0, sizeof(iwe));
798 iwe.cmd = SIOCGIWFREQ;
799 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
800 iwe.u.freq.e = 0;
801 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
802 IW_EV_FREQ_LEN);
803
804 memset(&iwe, 0, sizeof(iwe));
805 iwe.cmd = SIOCGIWFREQ;
806 iwe.u.freq.m = bss->freq;
807 iwe.u.freq.e = 6;
808 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
809 IW_EV_FREQ_LEN);
810 memset(&iwe, 0, sizeof(iwe));
811 iwe.cmd = IWEVQUAL;
812 iwe.u.qual.qual = bss->qual;
813 iwe.u.qual.level = bss->signal;
814 iwe.u.qual.noise = bss->noise;
815 iwe.u.qual.updated = local->wstats_flags;
816 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
817 IW_EV_QUAL_LEN);
818
819 memset(&iwe, 0, sizeof(iwe));
820 iwe.cmd = SIOCGIWENCODE;
821 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
822 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
823 else
824 iwe.u.data.flags = IW_ENCODE_DISABLED;
825 iwe.u.data.length = 0;
826 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
827 &iwe, "");
828
829 ieee80211_scan_add_ies(info, bss, &current_ev, end_buf);
830
831 if (bss->supp_rates_len > 0) {
832 /* display all supported rates in readable format */
833 char *p = current_ev + iwe_stream_lcp_len(info);
834 int i;
835
836 memset(&iwe, 0, sizeof(iwe));
837 iwe.cmd = SIOCGIWRATE;
838 /* Those two flags are ignored... */
839 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
840
841 for (i = 0; i < bss->supp_rates_len; i++) {
842 iwe.u.bitrate.value = ((bss->supp_rates[i] &
843 0x7f) * 500000);
844 p = iwe_stream_add_value(info, current_ev, p,
845 end_buf, &iwe, IW_EV_PARAM_LEN);
846 }
847 current_ev = p;
848 }
849
850 buf = kmalloc(30, GFP_ATOMIC);
851 if (buf) {
852 memset(&iwe, 0, sizeof(iwe));
853 iwe.cmd = IWEVCUSTOM;
854 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
855 iwe.u.data.length = strlen(buf);
856 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
857 &iwe, buf);
858 memset(&iwe, 0, sizeof(iwe));
859 iwe.cmd = IWEVCUSTOM;
860 sprintf(buf, " Last beacon: %dms ago",
861 jiffies_to_msecs(jiffies - bss->last_update));
862 iwe.u.data.length = strlen(buf);
863 current_ev = iwe_stream_add_point(info, current_ev,
864 end_buf, &iwe, buf);
865 kfree(buf);
866 }
867
868 if (bss_mesh_cfg(bss)) {
869 u8 *cfg = bss_mesh_cfg(bss);
870 buf = kmalloc(50, GFP_ATOMIC);
871 if (buf) {
872 memset(&iwe, 0, sizeof(iwe));
873 iwe.cmd = IWEVCUSTOM;
874 sprintf(buf, "Mesh network (version %d)", cfg[0]);
875 iwe.u.data.length = strlen(buf);
876 current_ev = iwe_stream_add_point(info, current_ev,
877 end_buf,
878 &iwe, buf);
879 sprintf(buf, "Path Selection Protocol ID: "
880 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
881 cfg[4]);
882 iwe.u.data.length = strlen(buf);
883 current_ev = iwe_stream_add_point(info, current_ev,
884 end_buf,
885 &iwe, buf);
886 sprintf(buf, "Path Selection Metric ID: "
887 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
888 cfg[8]);
889 iwe.u.data.length = strlen(buf);
890 current_ev = iwe_stream_add_point(info, current_ev,
891 end_buf,
892 &iwe, buf);
893 sprintf(buf, "Congestion Control Mode ID: "
894 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
895 cfg[11], cfg[12]);
896 iwe.u.data.length = strlen(buf);
897 current_ev = iwe_stream_add_point(info, current_ev,
898 end_buf,
899 &iwe, buf);
900 sprintf(buf, "Channel Precedence: "
901 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
902 cfg[15], cfg[16]);
903 iwe.u.data.length = strlen(buf);
904 current_ev = iwe_stream_add_point(info, current_ev,
905 end_buf,
906 &iwe, buf);
907 kfree(buf);
908 }
909 }
910
911 return current_ev;
912}
913
914
915int ieee80211_scan_results(struct ieee80211_local *local,
916 struct iw_request_info *info,
917 char *buf, size_t len)
918{
919 char *current_ev = buf;
920 char *end_buf = buf + len;
921 struct ieee80211_bss *bss;
922
923 spin_lock_bh(&local->bss_lock);
924 list_for_each_entry(bss, &local->bss_list, list) {
925 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
926 spin_unlock_bh(&local->bss_lock);
927 return -E2BIG;
928 }
929 current_ev = ieee80211_scan_result(local, info, bss,
930 current_ev, end_buf);
931 }
932 spin_unlock_bh(&local->bss_lock);
933 return current_ev - buf;
934}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index f72bad636d8..47bb2aed281 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -65,7 +65,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; 65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; 66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
67 67
68 ieee80211_tx_skb(sdata, skb, 0); 68 ieee80211_tx_skb(sdata, skb, 1);
69} 69}
70 70
71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -84,3 +84,104 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
84 mgmt->sa, mgmt->bssid, 84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token); 85 mgmt->u.action.u.measurement.dialog_token);
86} 86}
87
88void ieee80211_chswitch_work(struct work_struct *work)
89{
90 struct ieee80211_sub_if_data *sdata =
91 container_of(work, struct ieee80211_sub_if_data, u.sta.chswitch_work);
92 struct ieee80211_bss *bss;
93 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
94
95 if (!netif_running(sdata->dev))
96 return;
97
98 bss = ieee80211_rx_bss_get(sdata->local, ifsta->bssid,
99 sdata->local->hw.conf.channel->center_freq,
100 ifsta->ssid, ifsta->ssid_len);
101 if (!bss)
102 goto exit;
103
104 sdata->local->oper_channel = sdata->local->csa_channel;
105 /* XXX: shouldn't really modify cfg80211-owned data! */
106 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
107 bss->cbss.channel = sdata->local->oper_channel;
108
109 ieee80211_rx_bss_put(sdata->local, bss);
110exit:
111 ifsta->flags &= ~IEEE80211_STA_CSA_RECEIVED;
112 ieee80211_wake_queues_by_reason(&sdata->local->hw,
113 IEEE80211_QUEUE_STOP_REASON_CSA);
114}
115
116void ieee80211_chswitch_timer(unsigned long data)
117{
118 struct ieee80211_sub_if_data *sdata =
119 (struct ieee80211_sub_if_data *) data;
120 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
121
122 queue_work(sdata->local->hw.workqueue, &ifsta->chswitch_work);
123}
124
125void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
126 struct ieee80211_channel_sw_ie *sw_elem,
127 struct ieee80211_bss *bss)
128{
129 struct ieee80211_channel *new_ch;
130 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
131 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
132
133 /* FIXME: Handle ADHOC later */
134 if (sdata->vif.type != NL80211_IFTYPE_STATION)
135 return;
136
137 if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATED)
138 return;
139
140 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
141 return;
142
143 /* Disregard subsequent beacons if we are already running a timer
144 processing a CSA */
145
146 if (ifsta->flags & IEEE80211_STA_CSA_RECEIVED)
147 return;
148
149 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
150 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
151 return;
152
153 sdata->local->csa_channel = new_ch;
154
155 if (sw_elem->count <= 1) {
156 queue_work(sdata->local->hw.workqueue, &ifsta->chswitch_work);
157 } else {
158 ieee80211_stop_queues_by_reason(&sdata->local->hw,
159 IEEE80211_QUEUE_STOP_REASON_CSA);
160 ifsta->flags |= IEEE80211_STA_CSA_RECEIVED;
161 mod_timer(&ifsta->chswitch_timer,
162 jiffies +
163 msecs_to_jiffies(sw_elem->count *
164 bss->cbss.beacon_interval));
165 }
166}
167
168void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
169 u16 capab_info, u8 *pwr_constr_elem,
170 u8 pwr_constr_elem_len)
171{
172 struct ieee80211_conf *conf = &sdata->local->hw.conf;
173
174 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
175 return;
176
177 /* Power constraint IE length should be 1 octet */
178 if (pwr_constr_elem_len != 1)
179 return;
180
181 if ((*pwr_constr_elem <= conf->channel->max_power) &&
182 (*pwr_constr_elem != sdata->local->power_constr_level)) {
183 sdata->local->power_constr_level = *pwr_constr_elem;
184 ieee80211_hw_config(sdata->local, 0);
185 }
186}
187
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 10c5539c20a..634f65c0130 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -194,12 +194,41 @@ void sta_info_destroy(struct sta_info *sta)
194 dev_kfree_skb_any(skb); 194 dev_kfree_skb_any(skb);
195 195
196 for (i = 0; i < STA_TID_NUM; i++) { 196 for (i = 0; i < STA_TID_NUM; i++) {
197 struct tid_ampdu_rx *tid_rx;
198 struct tid_ampdu_tx *tid_tx;
199
197 spin_lock_bh(&sta->lock); 200 spin_lock_bh(&sta->lock);
198 if (sta->ampdu_mlme.tid_rx[i]) 201 tid_rx = sta->ampdu_mlme.tid_rx[i];
199 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); 202 /* Make sure timer won't free the tid_rx struct, see below */
200 if (sta->ampdu_mlme.tid_tx[i]) 203 if (tid_rx)
201 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); 204 tid_rx->shutdown = true;
202 spin_unlock_bh(&sta->lock); 205 spin_unlock_bh(&sta->lock);
206
207 /*
208 * Outside spinlock - shutdown is true now so that the timer
209 * won't free tid_rx, we have to do that now. Can't let the
210 * timer do it because we have to sync the timer outside the
211 * lock that it takes itself.
212 */
213 if (tid_rx) {
214 del_timer_sync(&tid_rx->session_timer);
215 kfree(tid_rx);
216 }
217
218 /*
219 * No need to do such complications for TX agg sessions, the
220 * path leading to freeing the tid_tx struct goes via a call
221 * from the driver, and thus needs to look up the sta struct
222 * again, which cannot be found when we get here. Hence, we
223 * just need to delete the timer and free the aggregation
224 * info; we won't be telling the peer about it then but that
225 * doesn't matter if we're not talking to it again anyway.
226 */
227 tid_tx = sta->ampdu_mlme.tid_tx[i];
228 if (tid_tx) {
229 del_timer_sync(&tid_tx->addba_resp_timer);
230 kfree(tid_tx);
231 }
203 } 232 }
204 233
205 __sta_info_free(local, sta); 234 __sta_info_free(local, sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index e49a5b99cf1..d9653231992 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -34,6 +34,7 @@
34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the 34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
37 */ 38 */
38enum ieee80211_sta_info_flags { 39enum ieee80211_sta_info_flags {
39 WLAN_STA_AUTH = 1<<0, 40 WLAN_STA_AUTH = 1<<0,
@@ -46,6 +47,7 @@ enum ieee80211_sta_info_flags {
46 WLAN_STA_WDS = 1<<7, 47 WLAN_STA_WDS = 1<<7,
47 WLAN_STA_PSPOLL = 1<<8, 48 WLAN_STA_PSPOLL = 1<<8,
48 WLAN_STA_CLEAR_PS_FILT = 1<<9, 49 WLAN_STA_CLEAR_PS_FILT = 1<<9,
50 WLAN_STA_MFP = 1<<10,
49}; 51};
50 52
51#define STA_TID_NUM 16 53#define STA_TID_NUM 16
@@ -63,7 +65,6 @@ enum ieee80211_sta_info_flags {
63#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ 65#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
64 HT_ADDBA_DRV_READY_MSK | \ 66 HT_ADDBA_DRV_READY_MSK | \
65 HT_ADDBA_RECEIVED_MSK) 67 HT_ADDBA_RECEIVED_MSK)
66#define HT_AGG_STATE_DEBUGFS_CTL BIT(7)
67 68
68/** 69/**
69 * struct tid_ampdu_tx - TID aggregation information (Tx). 70 * struct tid_ampdu_tx - TID aggregation information (Tx).
@@ -87,7 +88,7 @@ struct tid_ampdu_tx {
87 * @stored_mpdu_num: number of MPDUs in reordering buffer 88 * @stored_mpdu_num: number of MPDUs in reordering buffer
88 * @ssn: Starting Sequence Number expected to be aggregated. 89 * @ssn: Starting Sequence Number expected to be aggregated.
89 * @buf_size: buffer size for incoming A-MPDUs 90 * @buf_size: buffer size for incoming A-MPDUs
90 * @timeout: reset timer value. 91 * @timeout: reset timer value (in TUs).
91 * @dialog_token: dialog token for aggregation session 92 * @dialog_token: dialog token for aggregation session
92 */ 93 */
93struct tid_ampdu_rx { 94struct tid_ampdu_rx {
@@ -99,6 +100,7 @@ struct tid_ampdu_rx {
99 u16 buf_size; 100 u16 buf_size;
100 u16 timeout; 101 u16 timeout;
101 u8 dialog_token; 102 u8 dialog_token;
103 bool shutdown;
102}; 104};
103 105
104/** 106/**
@@ -382,8 +384,6 @@ static inline u32 get_sta_flags(struct sta_info *sta)
382} 384}
383 385
384 386
385/* Maximum number of concurrently registered stations */
386#define MAX_STA_COUNT 2007
387 387
388#define STA_HASH_SIZE 256 388#define STA_HASH_SIZE 256
389#define STA_HASH(sta) (sta[5]) 389#define STA_HASH(sta) (sta[5])
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 94de5033f0b..33926831c64 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -35,6 +35,7 @@
35#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
36#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
37#define IEEE80211_TX_FRAG_AGAIN 2 37#define IEEE80211_TX_FRAG_AGAIN 2
38#define IEEE80211_TX_PENDING 3
38 39
39/* misc utils */ 40/* misc utils */
40 41
@@ -330,6 +331,22 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
330 return TX_CONTINUE; 331 return TX_CONTINUE;
331} 332}
332 333
334static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
335 struct sk_buff *skb)
336{
337 if (!ieee80211_is_mgmt(fc))
338 return 0;
339
340 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
341 return 0;
342
343 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
344 skb->data))
345 return 0;
346
347 return 1;
348}
349
333static ieee80211_tx_result 350static ieee80211_tx_result
334ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 351ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
335{ 352{
@@ -409,11 +426,17 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
409 tx->key = NULL; 426 tx->key = NULL;
410 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 427 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
411 tx->key = key; 428 tx->key = key;
429 else if (ieee80211_is_mgmt(hdr->frame_control) &&
430 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
431 tx->key = key;
412 else if ((key = rcu_dereference(tx->sdata->default_key))) 432 else if ((key = rcu_dereference(tx->sdata->default_key)))
413 tx->key = key; 433 tx->key = key;
414 else if (tx->sdata->drop_unencrypted && 434 else if (tx->sdata->drop_unencrypted &&
415 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && 435 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) &&
416 !(info->flags & IEEE80211_TX_CTL_INJECTED)) { 436 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
437 (!ieee80211_is_robust_mgmt_frame(hdr) ||
438 (ieee80211_is_action(hdr->frame_control) &&
439 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
417 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 440 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
418 return TX_DROP; 441 return TX_DROP;
419 } else 442 } else
@@ -428,10 +451,19 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
428 if (ieee80211_is_auth(hdr->frame_control)) 451 if (ieee80211_is_auth(hdr->frame_control))
429 break; 452 break;
430 case ALG_TKIP: 453 case ALG_TKIP:
431 case ALG_CCMP:
432 if (!ieee80211_is_data_present(hdr->frame_control)) 454 if (!ieee80211_is_data_present(hdr->frame_control))
433 tx->key = NULL; 455 tx->key = NULL;
434 break; 456 break;
457 case ALG_CCMP:
458 if (!ieee80211_is_data_present(hdr->frame_control) &&
459 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
460 tx->skb))
461 tx->key = NULL;
462 break;
463 case ALG_AES_CMAC:
464 if (!ieee80211_is_mgmt(hdr->frame_control))
465 tx->key = NULL;
466 break;
435 } 467 }
436 } 468 }
437 469
@@ -787,6 +819,8 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
787 return ieee80211_crypto_tkip_encrypt(tx); 819 return ieee80211_crypto_tkip_encrypt(tx);
788 case ALG_CCMP: 820 case ALG_CCMP:
789 return ieee80211_crypto_ccmp_encrypt(tx); 821 return ieee80211_crypto_ccmp_encrypt(tx);
822 case ALG_AES_CMAC:
823 return ieee80211_crypto_aes_cmac_encrypt(tx);
790 } 824 }
791 825
792 /* not reached */ 826 /* not reached */
@@ -1052,8 +1086,7 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1052 1086
1053 if (skb) { 1087 if (skb) {
1054 if (netif_subqueue_stopped(local->mdev, skb)) 1088 if (netif_subqueue_stopped(local->mdev, skb))
1055 return IEEE80211_TX_AGAIN; 1089 return IEEE80211_TX_PENDING;
1056 info = IEEE80211_SKB_CB(skb);
1057 1090
1058 ret = local->ops->tx(local_to_hw(local), skb); 1091 ret = local->ops->tx(local_to_hw(local), skb);
1059 if (ret) 1092 if (ret)
@@ -1179,8 +1212,9 @@ retry:
1179 * queues, there's no reason for a driver to reject 1212 * queues, there's no reason for a driver to reject
1180 * a frame there, warn and drop it. 1213 * a frame there, warn and drop it.
1181 */ 1214 */
1182 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 1215 if (ret != IEEE80211_TX_PENDING)
1183 goto drop; 1216 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1217 goto drop;
1184 1218
1185 store = &local->pending_packet[queue]; 1219 store = &local->pending_packet[queue];
1186 1220
@@ -1296,6 +1330,19 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1296 return 0; 1330 return 0;
1297 } 1331 }
1298 1332
1333 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1334 local->hw.conf.dynamic_ps_timeout > 0) {
1335 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1336 ieee80211_stop_queues_by_reason(&local->hw,
1337 IEEE80211_QUEUE_STOP_REASON_PS);
1338 queue_work(local->hw.workqueue,
1339 &local->dynamic_ps_disable_work);
1340 }
1341
1342 mod_timer(&local->dynamic_ps_timer, jiffies +
1343 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1344 }
1345
1299 memset(info, 0, sizeof(*info)); 1346 memset(info, 0, sizeof(*info));
1300 1347
1301 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1348 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -1390,10 +1437,31 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1390 struct net_device *dev) 1437 struct net_device *dev)
1391{ 1438{
1392 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1439 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1440 struct ieee80211_channel *chan = local->hw.conf.channel;
1393 struct ieee80211_radiotap_header *prthdr = 1441 struct ieee80211_radiotap_header *prthdr =
1394 (struct ieee80211_radiotap_header *)skb->data; 1442 (struct ieee80211_radiotap_header *)skb->data;
1395 u16 len_rthdr; 1443 u16 len_rthdr;
1396 1444
1445 /*
1446 * Frame injection is not allowed if beaconing is not allowed
1447 * or if we need radar detection. Beaconing is usually not allowed when
1448 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1449 * Passive scan is also used in world regulatory domains where
1450 * your country is not known and as such it should be treated as
1451 * NO TX unless the channel is explicitly allowed in which case
1452 * your current regulatory domain would not have the passive scan
1453 * flag.
1454 *
1455 * Since AP mode uses monitor interfaces to inject/TX management
1456 * frames we can make AP mode the exception to this rule once it
1457 * supports radar detection as its implementation can deal with
1458 * radar detection by itself. We can do that later by adding a
1459 * monitor flag interfaces used for AP support.
1460 */
1461 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1462 IEEE80211_CHAN_PASSIVE_SCAN)))
1463 goto fail;
1464
1397 /* check for not even having the fixed radiotap header part */ 1465 /* check for not even having the fixed radiotap header part */
1398 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) 1466 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1399 goto fail; /* too short to be possibly valid */ 1467 goto fail; /* too short to be possibly valid */
@@ -1477,19 +1545,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1477 goto fail; 1545 goto fail;
1478 } 1546 }
1479 1547
1480 if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) &&
1481 local->dynamic_ps_timeout > 0) {
1482 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1483 ieee80211_stop_queues_by_reason(&local->hw,
1484 IEEE80211_QUEUE_STOP_REASON_PS);
1485 queue_work(local->hw.workqueue,
1486 &local->dynamic_ps_disable_work);
1487 }
1488
1489 mod_timer(&local->dynamic_ps_timer, jiffies +
1490 msecs_to_jiffies(local->dynamic_ps_timeout));
1491 }
1492
1493 nh_pos = skb_network_header(skb) - skb->data; 1548 nh_pos = skb_network_header(skb) - skb->data;
1494 h_pos = skb_transport_header(skb) - skb->data; 1549 h_pos = skb_transport_header(skb) - skb->data;
1495 1550
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index fb89e1d0aa0..73c7d7345ab 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -41,6 +41,15 @@ const unsigned char rfc1042_header[] __aligned(2) =
41const unsigned char bridge_tunnel_header[] __aligned(2) = 41const unsigned char bridge_tunnel_header[] __aligned(2) =
42 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 42 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
43 43
44struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
45{
46 struct ieee80211_local *local;
47 BUG_ON(!wiphy);
48
49 local = wiphy_priv(wiphy);
50 return &local->hw;
51}
52EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
44 53
45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 54u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
46 enum nl80211_iftype type) 55 enum nl80211_iftype type)
@@ -352,8 +361,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
352 } 361 }
353} 362}
354 363
355void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 364static void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
356 enum queue_stop_reason reason) 365 enum queue_stop_reason reason)
357{ 366{
358 struct ieee80211_local *local = hw_to_local(hw); 367 struct ieee80211_local *local = hw_to_local(hw);
359 unsigned long flags; 368 unsigned long flags;
@@ -382,8 +391,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
382 netif_stop_subqueue(local->mdev, queue); 391 netif_stop_subqueue(local->mdev, queue);
383} 392}
384 393
385void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 394static void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
386 enum queue_stop_reason reason) 395 enum queue_stop_reason reason)
387{ 396{
388 struct ieee80211_local *local = hw_to_local(hw); 397 struct ieee80211_local *local = hw_to_local(hw);
389 unsigned long flags; 398 unsigned long flags;
@@ -459,7 +468,7 @@ void ieee80211_iterate_active_interfaces(
459 struct ieee80211_local *local = hw_to_local(hw); 468 struct ieee80211_local *local = hw_to_local(hw);
460 struct ieee80211_sub_if_data *sdata; 469 struct ieee80211_sub_if_data *sdata;
461 470
462 rtnl_lock(); 471 mutex_lock(&local->iflist_mtx);
463 472
464 list_for_each_entry(sdata, &local->interfaces, list) { 473 list_for_each_entry(sdata, &local->interfaces, list) {
465 switch (sdata->vif.type) { 474 switch (sdata->vif.type) {
@@ -480,7 +489,7 @@ void ieee80211_iterate_active_interfaces(
480 &sdata->vif); 489 &sdata->vif);
481 } 490 }
482 491
483 rtnl_unlock(); 492 mutex_unlock(&local->iflist_mtx);
484} 493}
485EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); 494EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
486 495
@@ -653,6 +662,10 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
653 elems->pwr_constr_elem = pos; 662 elems->pwr_constr_elem = pos;
654 elems->pwr_constr_elem_len = elen; 663 elems->pwr_constr_elem_len = elen;
655 break; 664 break;
665 case WLAN_EID_TIMEOUT_INTERVAL:
666 elems->timeout_int = pos;
667 elems->timeout_int_len = elen;
668 break;
656 default: 669 default:
657 break; 670 break;
658 } 671 }
@@ -727,12 +740,12 @@ int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
727 return ret; 740 return ret;
728} 741}
729 742
730u64 ieee80211_mandatory_rates(struct ieee80211_local *local, 743u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
731 enum ieee80211_band band) 744 enum ieee80211_band band)
732{ 745{
733 struct ieee80211_supported_band *sband; 746 struct ieee80211_supported_band *sband;
734 struct ieee80211_rate *bitrates; 747 struct ieee80211_rate *bitrates;
735 u64 mandatory_rates; 748 u32 mandatory_rates;
736 enum ieee80211_rate_flags mandatory_flag; 749 enum ieee80211_rate_flags mandatory_flag;
737 int i; 750 int i;
738 751
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 7162d5816f3..2b023dce8b2 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -37,7 +37,14 @@ static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta
37 struct ieee80211_key *key; 37 struct ieee80211_key *key;
38 int err; 38 int err;
39 39
40 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { 40 if (alg == ALG_AES_CMAC) {
41 if (idx < NUM_DEFAULT_KEYS ||
42 idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
43 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d "
44 "(BIP)\n", sdata->dev->name, idx);
45 return -EINVAL;
46 }
47 } else if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
41 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", 48 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
42 sdata->dev->name, idx); 49 sdata->dev->name, idx);
43 return -EINVAL; 50 return -EINVAL;
@@ -103,6 +110,9 @@ static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta
103 110
104 if (set_tx_key || (!sta && !sdata->default_key && key)) 111 if (set_tx_key || (!sta && !sdata->default_key && key))
105 ieee80211_set_default_key(sdata, idx); 112 ieee80211_set_default_key(sdata, idx);
113 if (alg == ALG_AES_CMAC &&
114 (set_tx_key || (!sta && !sdata->default_mgmt_key && key)))
115 ieee80211_set_default_mgmt_key(sdata, idx);
106 } 116 }
107 117
108 out_unlock: 118 out_unlock:
@@ -135,6 +145,21 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
135 return -EOPNOTSUPP; 145 return -EOPNOTSUPP;
136} 146}
137 147
148static u8 ieee80211_get_wstats_flags(struct ieee80211_local *local)
149{
150 u8 wstats_flags = 0;
151
152 wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
153 IEEE80211_HW_SIGNAL_DBM) ?
154 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
155 wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
156 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
157 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
158 wstats_flags |= IW_QUAL_DBM;
159
160 return wstats_flags;
161}
162
138static int ieee80211_ioctl_giwrange(struct net_device *dev, 163static int ieee80211_ioctl_giwrange(struct net_device *dev,
139 struct iw_request_info *info, 164 struct iw_request_info *info,
140 struct iw_point *data, char *extra) 165 struct iw_point *data, char *extra)
@@ -163,9 +188,9 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
163 range->num_encoding_sizes = 2; 188 range->num_encoding_sizes = 2;
164 range->max_encoding_tokens = NUM_DEFAULT_KEYS; 189 range->max_encoding_tokens = NUM_DEFAULT_KEYS;
165 190
166 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC || 191 /* cfg80211 requires this, and enforces 0..100 */
167 local->hw.flags & IEEE80211_HW_SIGNAL_DB) 192 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
168 range->max_qual.level = local->hw.max_signal; 193 range->max_qual.level = 100;
169 else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 194 else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
170 range->max_qual.level = -110; 195 range->max_qual.level = -110;
171 else 196 else
@@ -177,13 +202,13 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
177 range->max_qual.noise = 0; 202 range->max_qual.noise = 0;
178 203
179 range->max_qual.qual = 100; 204 range->max_qual.qual = 100;
180 range->max_qual.updated = local->wstats_flags; 205 range->max_qual.updated = ieee80211_get_wstats_flags(local);
181 206
182 range->avg_qual.qual = 50; 207 range->avg_qual.qual = 50;
183 /* not always true but better than nothing */ 208 /* not always true but better than nothing */
184 range->avg_qual.level = range->max_qual.level / 2; 209 range->avg_qual.level = range->max_qual.level / 2;
185 range->avg_qual.noise = range->max_qual.noise / 2; 210 range->avg_qual.noise = range->max_qual.noise / 2;
186 range->avg_qual.updated = local->wstats_flags; 211 range->avg_qual.updated = ieee80211_get_wstats_flags(local);
187 212
188 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 213 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
189 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 214 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
@@ -230,13 +255,15 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
230{ 255{
231 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 256 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
232 257
233 if (sdata->vif.type == NL80211_IFTYPE_STATION) 258 if (sdata->vif.type == NL80211_IFTYPE_ADHOC ||
259 sdata->vif.type == NL80211_IFTYPE_STATION)
234 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; 260 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
235 261
236 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ 262 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
237 if (freq->e == 0) { 263 if (freq->e == 0) {
238 if (freq->m < 0) { 264 if (freq->m < 0) {
239 if (sdata->vif.type == NL80211_IFTYPE_STATION) 265 if (sdata->vif.type == NL80211_IFTYPE_ADHOC ||
266 sdata->vif.type == NL80211_IFTYPE_STATION)
240 sdata->u.sta.flags |= 267 sdata->u.sta.flags |=
241 IEEE80211_STA_AUTO_CHANNEL_SEL; 268 IEEE80211_STA_AUTO_CHANNEL_SEL;
242 return 0; 269 return 0;
@@ -404,58 +431,6 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
404} 431}
405 432
406 433
407static int ieee80211_ioctl_siwscan(struct net_device *dev,
408 struct iw_request_info *info,
409 union iwreq_data *wrqu, char *extra)
410{
411 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
412 struct iw_scan_req *req = NULL;
413 u8 *ssid = NULL;
414 size_t ssid_len = 0;
415
416 if (!netif_running(dev))
417 return -ENETDOWN;
418
419 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
420 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
421 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
422 return -EOPNOTSUPP;
423
424 /* if SSID was specified explicitly then use that */
425 if (wrqu->data.length == sizeof(struct iw_scan_req) &&
426 wrqu->data.flags & IW_SCAN_THIS_ESSID) {
427 req = (struct iw_scan_req *)extra;
428 ssid = req->essid;
429 ssid_len = req->essid_len;
430 }
431
432 return ieee80211_request_scan(sdata, ssid, ssid_len);
433}
434
435
436static int ieee80211_ioctl_giwscan(struct net_device *dev,
437 struct iw_request_info *info,
438 struct iw_point *data, char *extra)
439{
440 int res;
441 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
442 struct ieee80211_sub_if_data *sdata;
443
444 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
445
446 if (local->sw_scanning || local->hw_scanning)
447 return -EAGAIN;
448
449 res = ieee80211_scan_results(local, info, extra, data->length);
450 if (res >= 0) {
451 data->length = res;
452 return 0;
453 }
454 data->length = 0;
455 return res;
456}
457
458
459static int ieee80211_ioctl_siwrate(struct net_device *dev, 434static int ieee80211_ioctl_siwrate(struct net_device *dev,
460 struct iw_request_info *info, 435 struct iw_request_info *info,
461 struct iw_param *rate, char *extra) 436 struct iw_param *rate, char *extra)
@@ -549,10 +524,9 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
549 else /* Automatic power level setting */ 524 else /* Automatic power level setting */
550 new_power_level = chan->max_power; 525 new_power_level = chan->max_power;
551 526
552 if (local->hw.conf.power_level != new_power_level) { 527 local->user_power_level = new_power_level;
553 local->hw.conf.power_level = new_power_level; 528 if (local->hw.conf.power_level != new_power_level)
554 reconf_flags |= IEEE80211_CONF_CHANGE_POWER; 529 reconf_flags |= IEEE80211_CONF_CHANGE_POWER;
555 }
556 530
557 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { 531 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
558 local->hw.conf.radio_enabled = !(data->txpower.disabled); 532 local->hw.conf.radio_enabled = !(data->txpower.disabled);
@@ -836,6 +810,9 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
836 int ret = 0, timeout = 0; 810 int ret = 0, timeout = 0;
837 bool ps; 811 bool ps;
838 812
813 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
814 return -EOPNOTSUPP;
815
839 if (sdata->vif.type != NL80211_IFTYPE_STATION) 816 if (sdata->vif.type != NL80211_IFTYPE_STATION)
840 return -EINVAL; 817 return -EINVAL;
841 818
@@ -852,31 +829,49 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
852 ps = true; 829 ps = true;
853 break; 830 break;
854 default: /* Otherwise we ignore */ 831 default: /* Otherwise we ignore */
855 break; 832 return -EINVAL;
856 } 833 }
857 834
835 if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT))
836 return -EINVAL;
837
858 if (wrq->flags & IW_POWER_TIMEOUT) 838 if (wrq->flags & IW_POWER_TIMEOUT)
859 timeout = wrq->value / 1000; 839 timeout = wrq->value / 1000;
860 840
861set: 841 set:
862 if (ps == local->powersave && timeout == local->dynamic_ps_timeout) 842 if (ps == local->powersave && timeout == conf->dynamic_ps_timeout)
863 return ret; 843 return ret;
864 844
865 local->powersave = ps; 845 local->powersave = ps;
866 local->dynamic_ps_timeout = timeout; 846 conf->dynamic_ps_timeout = timeout;
867 847
868 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { 848 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
869 if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) && 849 ret = ieee80211_hw_config(local,
870 local->dynamic_ps_timeout > 0) 850 IEEE80211_CONF_CHANGE_DYNPS_TIMEOUT);
871 mod_timer(&local->dynamic_ps_timer, jiffies + 851
872 msecs_to_jiffies(local->dynamic_ps_timeout)); 852 if (!(sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED))
873 else { 853 return ret;
874 if (local->powersave) 854
875 conf->flags |= IEEE80211_CONF_PS; 855 if (conf->dynamic_ps_timeout > 0 &&
876 else 856 !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
877 conf->flags &= ~IEEE80211_CONF_PS; 857 mod_timer(&local->dynamic_ps_timer, jiffies +
858 msecs_to_jiffies(conf->dynamic_ps_timeout));
859 } else {
860 if (local->powersave) {
861 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
862 ieee80211_send_nullfunc(local, sdata, 1);
863 conf->flags |= IEEE80211_CONF_PS;
864 ret = ieee80211_hw_config(local,
865 IEEE80211_CONF_CHANGE_PS);
866 } else {
867 conf->flags &= ~IEEE80211_CONF_PS;
868 ret = ieee80211_hw_config(local,
869 IEEE80211_CONF_CHANGE_PS);
870 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
871 ieee80211_send_nullfunc(local, sdata, 0);
872 del_timer_sync(&local->dynamic_ps_timer);
873 cancel_work_sync(&local->dynamic_ps_enable_work);
878 } 874 }
879 ret = ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
880 } 875 }
881 876
882 return ret; 877 return ret;
@@ -903,11 +898,22 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
903 898
904 switch (data->flags & IW_AUTH_INDEX) { 899 switch (data->flags & IW_AUTH_INDEX) {
905 case IW_AUTH_WPA_VERSION: 900 case IW_AUTH_WPA_VERSION:
906 case IW_AUTH_CIPHER_PAIRWISE:
907 case IW_AUTH_CIPHER_GROUP: 901 case IW_AUTH_CIPHER_GROUP:
908 case IW_AUTH_WPA_ENABLED: 902 case IW_AUTH_WPA_ENABLED:
909 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 903 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
910 case IW_AUTH_KEY_MGMT: 904 case IW_AUTH_KEY_MGMT:
905 case IW_AUTH_CIPHER_GROUP_MGMT:
906 break;
907 case IW_AUTH_CIPHER_PAIRWISE:
908 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
909 if (data->value & (IW_AUTH_CIPHER_WEP40 |
910 IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_TKIP))
911 sdata->u.sta.flags |=
912 IEEE80211_STA_TKIP_WEP_USED;
913 else
914 sdata->u.sta.flags &=
915 ~IEEE80211_STA_TKIP_WEP_USED;
916 }
911 break; 917 break;
912 case IW_AUTH_DROP_UNENCRYPTED: 918 case IW_AUTH_DROP_UNENCRYPTED:
913 sdata->drop_unencrypted = !!data->value; 919 sdata->drop_unencrypted = !!data->value;
@@ -934,6 +940,29 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
934 else 940 else
935 ret = -EOPNOTSUPP; 941 ret = -EOPNOTSUPP;
936 break; 942 break;
943 case IW_AUTH_MFP:
944 if (!(sdata->local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) {
945 ret = -EOPNOTSUPP;
946 break;
947 }
948 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
949 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
950 switch (data->value) {
951 case IW_AUTH_MFP_DISABLED:
952 sdata->u.sta.mfp = IEEE80211_MFP_DISABLED;
953 break;
954 case IW_AUTH_MFP_OPTIONAL:
955 sdata->u.sta.mfp = IEEE80211_MFP_OPTIONAL;
956 break;
957 case IW_AUTH_MFP_REQUIRED:
958 sdata->u.sta.mfp = IEEE80211_MFP_REQUIRED;
959 break;
960 default:
961 ret = -EINVAL;
962 }
963 } else
964 ret = -EOPNOTSUPP;
965 break;
937 default: 966 default:
938 ret = -EOPNOTSUPP; 967 ret = -EOPNOTSUPP;
939 break; 968 break;
@@ -965,7 +994,7 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
965 wstats->qual.level = sta->last_signal; 994 wstats->qual.level = sta->last_signal;
966 wstats->qual.qual = sta->last_qual; 995 wstats->qual.qual = sta->last_qual;
967 wstats->qual.noise = sta->last_noise; 996 wstats->qual.noise = sta->last_noise;
968 wstats->qual.updated = local->wstats_flags; 997 wstats->qual.updated = ieee80211_get_wstats_flags(local);
969 } 998 }
970 999
971 rcu_read_unlock(); 1000 rcu_read_unlock();
@@ -1017,6 +1046,9 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1017 case IW_ENCODE_ALG_CCMP: 1046 case IW_ENCODE_ALG_CCMP:
1018 alg = ALG_CCMP; 1047 alg = ALG_CCMP;
1019 break; 1048 break;
1049 case IW_ENCODE_ALG_AES_CMAC:
1050 alg = ALG_AES_CMAC;
1051 break;
1020 default: 1052 default:
1021 return -EOPNOTSUPP; 1053 return -EOPNOTSUPP;
1022 } 1054 }
@@ -1025,20 +1057,41 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1025 remove = 1; 1057 remove = 1;
1026 1058
1027 idx = erq->flags & IW_ENCODE_INDEX; 1059 idx = erq->flags & IW_ENCODE_INDEX;
1028 if (idx < 1 || idx > 4) { 1060 if (alg == ALG_AES_CMAC) {
1029 idx = -1; 1061 if (idx < NUM_DEFAULT_KEYS + 1 ||
1030 if (!sdata->default_key) 1062 idx > NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
1031 idx = 0; 1063 idx = -1;
1032 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1064 if (!sdata->default_mgmt_key)
1033 if (sdata->default_key == sdata->keys[i]) { 1065 idx = 0;
1034 idx = i; 1066 else for (i = NUM_DEFAULT_KEYS;
1035 break; 1067 i < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1068 i++) {
1069 if (sdata->default_mgmt_key == sdata->keys[i])
1070 {
1071 idx = i;
1072 break;
1073 }
1036 } 1074 }
1037 } 1075 if (idx < 0)
1038 if (idx < 0) 1076 return -EINVAL;
1039 return -EINVAL; 1077 } else
1040 } else 1078 idx--;
1041 idx--; 1079 } else {
1080 if (idx < 1 || idx > 4) {
1081 idx = -1;
1082 if (!sdata->default_key)
1083 idx = 0;
1084 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1085 if (sdata->default_key == sdata->keys[i]) {
1086 idx = i;
1087 break;
1088 }
1089 }
1090 if (idx < 0)
1091 return -EINVAL;
1092 } else
1093 idx--;
1094 }
1042 1095
1043 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg, 1096 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1044 remove, 1097 remove,
@@ -1076,8 +1129,8 @@ static const iw_handler ieee80211_handler[] =
1076 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */ 1129 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */
1077 (iw_handler) ieee80211_ioctl_siwmlme, /* SIOCSIWMLME */ 1130 (iw_handler) ieee80211_ioctl_siwmlme, /* SIOCSIWMLME */
1078 (iw_handler) NULL, /* SIOCGIWAPLIST */ 1131 (iw_handler) NULL, /* SIOCGIWAPLIST */
1079 (iw_handler) ieee80211_ioctl_siwscan, /* SIOCSIWSCAN */ 1132 (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */
1080 (iw_handler) ieee80211_ioctl_giwscan, /* SIOCGIWSCAN */ 1133 (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */
1081 (iw_handler) ieee80211_ioctl_siwessid, /* SIOCSIWESSID */ 1134 (iw_handler) ieee80211_ioctl_siwessid, /* SIOCSIWESSID */
1082 (iw_handler) ieee80211_ioctl_giwessid, /* SIOCGIWESSID */ 1135 (iw_handler) ieee80211_ioctl_giwessid, /* SIOCGIWESSID */
1083 (iw_handler) NULL, /* SIOCSIWNICKN */ 1136 (iw_handler) NULL, /* SIOCSIWNICKN */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7aa63caf8d5..9101b48ec2a 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright 2002-2004, Instant802 Networks, Inc. 2 * Copyright 2002-2004, Instant802 Networks, Inc.
3 * Copyright 2008, Jouni Malinen <j@w1.fi>
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +20,7 @@
19#include "michael.h" 20#include "michael.h"
20#include "tkip.h" 21#include "tkip.h"
21#include "aes_ccm.h" 22#include "aes_ccm.h"
23#include "aes_cmac.h"
22#include "wpa.h" 24#include "wpa.h"
23 25
24ieee80211_tx_result 26ieee80211_tx_result
@@ -266,7 +268,7 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
266 int encrypted) 268 int encrypted)
267{ 269{
268 __le16 mask_fc; 270 __le16 mask_fc;
269 int a4_included; 271 int a4_included, mgmt;
270 u8 qos_tid; 272 u8 qos_tid;
271 u8 *b_0, *aad; 273 u8 *b_0, *aad;
272 u16 data_len, len_a; 274 u16 data_len, len_a;
@@ -277,12 +279,15 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
277 aad = scratch + 4 * AES_BLOCK_LEN; 279 aad = scratch + 4 * AES_BLOCK_LEN;
278 280
279 /* 281 /*
280 * Mask FC: zero subtype b4 b5 b6 282 * Mask FC: zero subtype b4 b5 b6 (if not mgmt)
281 * Retry, PwrMgt, MoreData; set Protected 283 * Retry, PwrMgt, MoreData; set Protected
282 */ 284 */
285 mgmt = ieee80211_is_mgmt(hdr->frame_control);
283 mask_fc = hdr->frame_control; 286 mask_fc = hdr->frame_control;
284 mask_fc &= ~cpu_to_le16(0x0070 | IEEE80211_FCTL_RETRY | 287 mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
285 IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); 288 IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
289 if (!mgmt)
290 mask_fc &= ~cpu_to_le16(0x0070);
286 mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 291 mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
287 292
288 hdrlen = ieee80211_hdrlen(hdr->frame_control); 293 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -300,8 +305,10 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
300 305
301 /* First block, b_0 */ 306 /* First block, b_0 */
302 b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ 307 b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
303 /* Nonce: QoS Priority | A2 | PN */ 308 /* Nonce: Nonce Flags | A2 | PN
304 b_0[1] = qos_tid; 309 * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
310 */
311 b_0[1] = qos_tid | (mgmt << 4);
305 memcpy(&b_0[2], hdr->addr2, ETH_ALEN); 312 memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
306 memcpy(&b_0[8], pn, CCMP_PN_LEN); 313 memcpy(&b_0[8], pn, CCMP_PN_LEN);
307 /* l(m) */ 314 /* l(m) */
@@ -360,9 +367,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
360 int hdrlen, len, tail; 367 int hdrlen, len, tail;
361 u8 *pos, *pn; 368 u8 *pos, *pn;
362 int i; 369 int i;
370 bool skip_hw;
371
372 skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
373 ieee80211_is_mgmt(hdr->frame_control);
363 374
364 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 375 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
365 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 376 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
377 !skip_hw) {
366 /* hwaccel - with no need for preallocated room for CCMP 378 /* hwaccel - with no need for preallocated room for CCMP
367 * header or MIC fields */ 379 * header or MIC fields */
368 info->control.hw_key = &tx->key->conf; 380 info->control.hw_key = &tx->key->conf;
@@ -397,7 +409,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
397 409
398 ccmp_pn2hdr(pos, pn, key->conf.keyidx); 410 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
399 411
400 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 412 if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) {
401 /* hwaccel - with preallocated room for CCMP header */ 413 /* hwaccel - with preallocated room for CCMP header */
402 info->control.hw_key = &tx->key->conf; 414 info->control.hw_key = &tx->key->conf;
403 return 0; 415 return 0;
@@ -446,7 +458,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
446 458
447 hdrlen = ieee80211_hdrlen(hdr->frame_control); 459 hdrlen = ieee80211_hdrlen(hdr->frame_control);
448 460
449 if (!ieee80211_is_data(hdr->frame_control)) 461 if (!ieee80211_is_data(hdr->frame_control) &&
462 !ieee80211_is_robust_mgmt_frame(hdr))
450 return RX_CONTINUE; 463 return RX_CONTINUE;
451 464
452 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; 465 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
@@ -485,3 +498,126 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
485 498
486 return RX_CONTINUE; 499 return RX_CONTINUE;
487} 500}
501
502
503static void bip_aad(struct sk_buff *skb, u8 *aad)
504{
505 /* BIP AAD: FC(masked) || A1 || A2 || A3 */
506
507 /* FC type/subtype */
508 aad[0] = skb->data[0];
509 /* Mask FC Retry, PwrMgt, MoreData flags to zero */
510 aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6));
511 /* A1 || A2 || A3 */
512 memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN);
513}
514
515
516static inline void bip_ipn_swap(u8 *d, const u8 *s)
517{
518 *d++ = s[5];
519 *d++ = s[4];
520 *d++ = s[3];
521 *d++ = s[2];
522 *d++ = s[1];
523 *d = s[0];
524}
525
526
527ieee80211_tx_result
528ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
529{
530 struct sk_buff *skb = tx->skb;
531 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
532 struct ieee80211_key *key = tx->key;
533 struct ieee80211_mmie *mmie;
534 u8 *pn, aad[20];
535 int i;
536
537 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
538 /* hwaccel */
539 info->control.hw_key = &tx->key->conf;
540 return 0;
541 }
542
543 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
544 return TX_DROP;
545
546 mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie));
547 mmie->element_id = WLAN_EID_MMIE;
548 mmie->length = sizeof(*mmie) - 2;
549 mmie->key_id = cpu_to_le16(key->conf.keyidx);
550
551 /* PN = PN + 1 */
552 pn = key->u.aes_cmac.tx_pn;
553
554 for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) {
555 pn[i]++;
556 if (pn[i])
557 break;
558 }
559 bip_ipn_swap(mmie->sequence_number, pn);
560
561 bip_aad(skb, aad);
562
563 /*
564 * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64)
565 */
566 ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf,
567 aad, skb->data + 24, skb->len - 24, mmie->mic);
568
569 return TX_CONTINUE;
570}
571
572
573ieee80211_rx_result
574ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
575{
576 struct sk_buff *skb = rx->skb;
577 struct ieee80211_key *key = rx->key;
578 struct ieee80211_mmie *mmie;
579 u8 aad[20], mic[8], ipn[6];
580 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
581
582 if (!ieee80211_is_mgmt(hdr->frame_control))
583 return RX_CONTINUE;
584
585 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
586 (rx->status->flag & RX_FLAG_IV_STRIPPED))
587 return RX_CONTINUE;
588
589 if (skb->len < 24 + sizeof(*mmie))
590 return RX_DROP_UNUSABLE;
591
592 mmie = (struct ieee80211_mmie *)
593 (skb->data + skb->len - sizeof(*mmie));
594 if (mmie->element_id != WLAN_EID_MMIE ||
595 mmie->length != sizeof(*mmie) - 2)
596 return RX_DROP_UNUSABLE; /* Invalid MMIE */
597
598 bip_ipn_swap(ipn, mmie->sequence_number);
599
600 if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
601 key->u.aes_cmac.replays++;
602 return RX_DROP_UNUSABLE;
603 }
604
605 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
606 /* hardware didn't decrypt/verify MIC */
607 bip_aad(skb, aad);
608 ieee80211_aes_cmac(key->u.aes_cmac.tfm,
609 key->u.aes_cmac.rx_crypto_buf, aad,
610 skb->data + 24, skb->len - 24, mic);
611 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
612 key->u.aes_cmac.icverrors++;
613 return RX_DROP_UNUSABLE;
614 }
615 }
616
617 memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
618
619 /* Remove MMIE */
620 skb_trim(skb, skb->len - sizeof(*mmie));
621
622 return RX_CONTINUE;
623}
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h
index d42d221d8a1..baba0608313 100644
--- a/net/mac80211/wpa.h
+++ b/net/mac80211/wpa.h
@@ -28,4 +28,9 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx);
28ieee80211_rx_result 28ieee80211_rx_result
29ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx); 29ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx);
30 30
31ieee80211_tx_result
32ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
33ieee80211_rx_result
34ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
35
31#endif /* WPA_H */ 36#endif /* WPA_H */
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 6be5d4efa51..5c48378a852 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -149,8 +149,8 @@ static struct task_struct *sync_backup_thread;
149/* multicast addr */ 149/* multicast addr */
150static struct sockaddr_in mcast_addr = { 150static struct sockaddr_in mcast_addr = {
151 .sin_family = AF_INET, 151 .sin_family = AF_INET,
152 .sin_port = __constant_htons(IP_VS_SYNC_PORT), 152 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
153 .sin_addr.s_addr = __constant_htonl(IP_VS_SYNC_GROUP), 153 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
154}; 154};
155 155
156 156
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 4f8fcf49854..07d9d8857e5 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -177,7 +177,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
177 .me = THIS_MODULE, 177 .me = THIS_MODULE,
178 .help = amanda_help, 178 .help = amanda_help,
179 .tuple.src.l3num = AF_INET, 179 .tuple.src.l3num = AF_INET,
180 .tuple.src.u.udp.port = __constant_htons(10080), 180 .tuple.src.u.udp.port = cpu_to_be16(10080),
181 .tuple.dst.protonum = IPPROTO_UDP, 181 .tuple.dst.protonum = IPPROTO_UDP,
182 .expect_policy = &amanda_exp_policy, 182 .expect_policy = &amanda_exp_policy,
183 }, 183 },
@@ -186,7 +186,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
186 .me = THIS_MODULE, 186 .me = THIS_MODULE,
187 .help = amanda_help, 187 .help = amanda_help,
188 .tuple.src.l3num = AF_INET6, 188 .tuple.src.l3num = AF_INET6,
189 .tuple.src.u.udp.port = __constant_htons(10080), 189 .tuple.src.u.udp.port = cpu_to_be16(10080),
190 .tuple.dst.protonum = IPPROTO_UDP, 190 .tuple.dst.protonum = IPPROTO_UDP,
191 .expect_policy = &amanda_exp_policy, 191 .expect_policy = &amanda_exp_policy,
192 }, 192 },
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 687bd633c3d..66369490230 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1167,7 +1167,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1167 .name = "Q.931", 1167 .name = "Q.931",
1168 .me = THIS_MODULE, 1168 .me = THIS_MODULE,
1169 .tuple.src.l3num = AF_INET, 1169 .tuple.src.l3num = AF_INET,
1170 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1170 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
1171 .tuple.dst.protonum = IPPROTO_TCP, 1171 .tuple.dst.protonum = IPPROTO_TCP,
1172 .help = q931_help, 1172 .help = q931_help,
1173 .expect_policy = &q931_exp_policy, 1173 .expect_policy = &q931_exp_policy,
@@ -1176,7 +1176,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1176 .name = "Q.931", 1176 .name = "Q.931",
1177 .me = THIS_MODULE, 1177 .me = THIS_MODULE,
1178 .tuple.src.l3num = AF_INET6, 1178 .tuple.src.l3num = AF_INET6,
1179 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1179 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
1180 .tuple.dst.protonum = IPPROTO_TCP, 1180 .tuple.dst.protonum = IPPROTO_TCP,
1181 .help = q931_help, 1181 .help = q931_help,
1182 .expect_policy = &q931_exp_policy, 1182 .expect_policy = &q931_exp_policy,
@@ -1741,7 +1741,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1741 .name = "RAS", 1741 .name = "RAS",
1742 .me = THIS_MODULE, 1742 .me = THIS_MODULE,
1743 .tuple.src.l3num = AF_INET, 1743 .tuple.src.l3num = AF_INET,
1744 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1744 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1745 .tuple.dst.protonum = IPPROTO_UDP, 1745 .tuple.dst.protonum = IPPROTO_UDP,
1746 .help = ras_help, 1746 .help = ras_help,
1747 .expect_policy = &ras_exp_policy, 1747 .expect_policy = &ras_exp_policy,
@@ -1750,7 +1750,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1750 .name = "RAS", 1750 .name = "RAS",
1751 .me = THIS_MODULE, 1751 .me = THIS_MODULE,
1752 .tuple.src.l3num = AF_INET6, 1752 .tuple.src.l3num = AF_INET6,
1753 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1753 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1754 .tuple.dst.protonum = IPPROTO_UDP, 1754 .tuple.dst.protonum = IPPROTO_UDP,
1755 .help = ras_help, 1755 .help = ras_help,
1756 .expect_policy = &ras_exp_policy, 1756 .expect_policy = &ras_exp_policy,
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 5af4273b466..8a3875e36ec 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -105,7 +105,7 @@ static struct nf_conntrack_expect_policy exp_policy = {
105static struct nf_conntrack_helper helper __read_mostly = { 105static struct nf_conntrack_helper helper __read_mostly = {
106 .name = "netbios-ns", 106 .name = "netbios-ns",
107 .tuple.src.l3num = AF_INET, 107 .tuple.src.l3num = AF_INET,
108 .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), 108 .tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT),
109 .tuple.dst.protonum = IPPROTO_UDP, 109 .tuple.dst.protonum = IPPROTO_UDP,
110 .me = THIS_MODULE, 110 .me = THIS_MODULE,
111 .help = help, 111 .help = help,
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 9e169ef2e85..72cca638a82 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -591,7 +591,7 @@ static struct nf_conntrack_helper pptp __read_mostly = {
591 .name = "pptp", 591 .name = "pptp",
592 .me = THIS_MODULE, 592 .me = THIS_MODULE,
593 .tuple.src.l3num = AF_INET, 593 .tuple.src.l3num = AF_INET,
594 .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), 594 .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
595 .tuple.dst.protonum = IPPROTO_TCP, 595 .tuple.dst.protonum = IPPROTO_TCP,
596 .help = conntrack_pptp_help, 596 .help = conntrack_pptp_help,
597 .destroy = pptp_destroy_siblings, 597 .destroy = pptp_destroy_siblings,
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index cdc97f3105a..5490fc37c92 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -71,6 +71,7 @@ int
71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
72{ 72{
73 if (inet_sk(sk)->transparent) { 73 if (inet_sk(sk)->transparent) {
74 skb_orphan(skb);
74 skb->sk = sk; 75 skb->sk = sk;
75 skb->destructor = nf_tproxy_destructor; 76 skb->destructor = nf_tproxy_destructor;
76 return 1; 77 return 1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9eb895c7a2a..6ee69c27f80 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -950,6 +950,7 @@ struct netlink_broadcast_data {
950 u32 pid; 950 u32 pid;
951 u32 group; 951 u32 group;
952 int failure; 952 int failure;
953 int delivery_failure;
953 int congested; 954 int congested;
954 int delivered; 955 int delivered;
955 gfp_t allocation; 956 gfp_t allocation;
@@ -999,6 +1000,7 @@ static inline int do_one_broadcast(struct sock *sk,
999 p->skb2 = NULL; 1000 p->skb2 = NULL;
1000 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { 1001 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1001 netlink_overrun(sk); 1002 netlink_overrun(sk);
1003 p->delivery_failure = 1;
1002 } else { 1004 } else {
1003 p->congested |= val; 1005 p->congested |= val;
1004 p->delivered = 1; 1006 p->delivered = 1;
@@ -1025,6 +1027,7 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1025 info.pid = pid; 1027 info.pid = pid;
1026 info.group = group; 1028 info.group = group;
1027 info.failure = 0; 1029 info.failure = 0;
1030 info.delivery_failure = 0;
1028 info.congested = 0; 1031 info.congested = 0;
1029 info.delivered = 0; 1032 info.delivered = 0;
1030 info.allocation = allocation; 1033 info.allocation = allocation;
@@ -1045,13 +1048,14 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1045 if (info.skb2) 1048 if (info.skb2)
1046 kfree_skb(info.skb2); 1049 kfree_skb(info.skb2);
1047 1050
1051 if (info.delivery_failure || info.failure)
1052 return -ENOBUFS;
1053
1048 if (info.delivered) { 1054 if (info.delivered) {
1049 if (info.congested && (allocation & __GFP_WAIT)) 1055 if (info.congested && (allocation & __GFP_WAIT))
1050 yield(); 1056 yield();
1051 return 0; 1057 return 0;
1052 } 1058 }
1053 if (info.failure)
1054 return -ENOBUFS;
1055 return -ESRCH; 1059 return -ESRCH;
1056} 1060}
1057EXPORT_SYMBOL(netlink_broadcast); 1061EXPORT_SYMBOL(netlink_broadcast);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index e9c05b8f4f4..cba7849de98 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1432,7 +1432,7 @@ static int __init nr_proto_init(void)
1432 struct net_device *dev; 1432 struct net_device *dev;
1433 1433
1434 sprintf(name, "nr%d", i); 1434 sprintf(name, "nr%d", i);
1435 dev = alloc_netdev(sizeof(struct nr_private), name, nr_setup); 1435 dev = alloc_netdev(0, name, nr_setup);
1436 if (!dev) { 1436 if (!dev) {
1437 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); 1437 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
1438 goto fail; 1438 goto fail;
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 6caf459665f..351372463fe 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -42,7 +42,7 @@
42 42
43int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) 43int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
44{ 44{
45 struct net_device_stats *stats = netdev_priv(dev); 45 struct net_device_stats *stats = &dev->stats;
46 46
47 if (!netif_running(dev)) { 47 if (!netif_running(dev)) {
48 stats->rx_dropped++; 48 stats->rx_dropped++;
@@ -171,8 +171,7 @@ static int nr_close(struct net_device *dev)
171 171
172static int nr_xmit(struct sk_buff *skb, struct net_device *dev) 172static int nr_xmit(struct sk_buff *skb, struct net_device *dev)
173{ 173{
174 struct nr_private *nr = netdev_priv(dev); 174 struct net_device_stats *stats = &dev->stats;
175 struct net_device_stats *stats = &nr->stats;
176 unsigned int len = skb->len; 175 unsigned int len = skb->len;
177 176
178 if (!nr_route_frame(skb, NULL)) { 177 if (!nr_route_frame(skb, NULL)) {
@@ -187,34 +186,27 @@ static int nr_xmit(struct sk_buff *skb, struct net_device *dev)
187 return 0; 186 return 0;
188} 187}
189 188
190static struct net_device_stats *nr_get_stats(struct net_device *dev)
191{
192 struct nr_private *nr = netdev_priv(dev);
193
194 return &nr->stats;
195}
196
197static const struct header_ops nr_header_ops = { 189static const struct header_ops nr_header_ops = {
198 .create = nr_header, 190 .create = nr_header,
199 .rebuild= nr_rebuild_header, 191 .rebuild= nr_rebuild_header,
200}; 192};
201 193
194static const struct net_device_ops nr_netdev_ops = {
195 .ndo_open = nr_open,
196 .ndo_stop = nr_close,
197 .ndo_start_xmit = nr_xmit,
198 .ndo_set_mac_address = nr_set_mac_address,
199};
202 200
203void nr_setup(struct net_device *dev) 201void nr_setup(struct net_device *dev)
204{ 202{
205 dev->mtu = NR_MAX_PACKET_SIZE; 203 dev->mtu = NR_MAX_PACKET_SIZE;
206 dev->hard_start_xmit = nr_xmit; 204 dev->netdev_ops = &nr_netdev_ops;
207 dev->open = nr_open;
208 dev->stop = nr_close;
209
210 dev->header_ops = &nr_header_ops; 205 dev->header_ops = &nr_header_ops;
211 dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; 206 dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
212 dev->addr_len = AX25_ADDR_LEN; 207 dev->addr_len = AX25_ADDR_LEN;
213 dev->type = ARPHRD_NETROM; 208 dev->type = ARPHRD_NETROM;
214 dev->set_mac_address = nr_set_mac_address;
215 209
216 /* New-style flags. */ 210 /* New-style flags. */
217 dev->flags = IFF_NOARP; 211 dev->flags = IFF_NOARP;
218
219 dev->get_stats = nr_get_stats;
220} 212}
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 13cb323f8c3..81795ea8779 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -275,8 +275,6 @@ static inline int can_respond(struct sk_buff *skb)
275 return 0; 275 return 0;
276 276
277 ph = pn_hdr(skb); 277 ph = pn_hdr(skb);
278 if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev)
279 return 0; /* we are not the destination */
280 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) 278 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
281 return 0; 279 return 0;
282 if (ph->pn_res == PN_COMMGR) /* indications */ 280 if (ph->pn_res == PN_COMMGR) /* indications */
@@ -344,8 +342,8 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
344 struct packet_type *pkttype, 342 struct packet_type *pkttype,
345 struct net_device *orig_dev) 343 struct net_device *orig_dev)
346{ 344{
345 struct net *net = dev_net(dev);
347 struct phonethdr *ph; 346 struct phonethdr *ph;
348 struct sock *sk;
349 struct sockaddr_pn sa; 347 struct sockaddr_pn sa;
350 u16 len; 348 u16 len;
351 349
@@ -364,28 +362,28 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
364 skb_reset_transport_header(skb); 362 skb_reset_transport_header(skb);
365 363
366 pn_skb_get_dst_sockaddr(skb, &sa); 364 pn_skb_get_dst_sockaddr(skb, &sa);
367 if (pn_sockaddr_get_addr(&sa) == 0)
368 goto out; /* currently, we cannot be device 0 */
369 365
370 sk = pn_find_sock_by_sa(dev_net(dev), &sa); 366 /* check if we are the destination */
371 if (sk == NULL) { 367 if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
368 /* Phonet packet input */
369 struct sock *sk = pn_find_sock_by_sa(net, &sa);
370
371 if (sk)
372 return sk_receive_skb(sk, skb, 0);
373
372 if (can_respond(skb)) { 374 if (can_respond(skb)) {
373 send_obj_unreachable(skb); 375 send_obj_unreachable(skb);
374 send_reset_indications(skb); 376 send_reset_indications(skb);
375 } 377 }
376 goto out;
377 } 378 }
378 379
379 /* Push data to the socket (or other sockets connected to it). */
380 return sk_receive_skb(sk, skb, 0);
381
382out: 380out:
383 kfree_skb(skb); 381 kfree_skb(skb);
384 return NET_RX_DROP; 382 return NET_RX_DROP;
385} 383}
386 384
387static struct packet_type phonet_packet_type = { 385static struct packet_type phonet_packet_type = {
388 .type = __constant_htons(ETH_P_PHONET), 386 .type = cpu_to_be16(ETH_P_PHONET),
389 .dev = NULL, 387 .dev = NULL,
390 .func = phonet_rcv, 388 .func = phonet_rcv,
391}; 389};
@@ -428,16 +426,18 @@ static int __init phonet_init(void)
428{ 426{
429 int err; 427 int err;
430 428
429 err = phonet_device_init();
430 if (err)
431 return err;
432
431 err = sock_register(&phonet_proto_family); 433 err = sock_register(&phonet_proto_family);
432 if (err) { 434 if (err) {
433 printk(KERN_ALERT 435 printk(KERN_ALERT
434 "phonet protocol family initialization failed\n"); 436 "phonet protocol family initialization failed\n");
435 return err; 437 goto err_sock;
436 } 438 }
437 439
438 phonet_device_init();
439 dev_add_pack(&phonet_packet_type); 440 dev_add_pack(&phonet_packet_type);
440 phonet_netlink_register();
441 phonet_sysctl_init(); 441 phonet_sysctl_init();
442 442
443 err = isi_register(); 443 err = isi_register();
@@ -449,6 +449,7 @@ err:
449 phonet_sysctl_exit(); 449 phonet_sysctl_exit();
450 sock_unregister(PF_PHONET); 450 sock_unregister(PF_PHONET);
451 dev_remove_pack(&phonet_packet_type); 451 dev_remove_pack(&phonet_packet_type);
452err_sock:
452 phonet_device_exit(); 453 phonet_device_exit();
453 return err; 454 return err;
454} 455}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 5491bf5e354..80a322d7790 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -28,32 +28,41 @@
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/phonet.h> 29#include <linux/phonet.h>
30#include <net/sock.h> 30#include <net/sock.h>
31#include <net/netns/generic.h>
31#include <net/phonet/pn_dev.h> 32#include <net/phonet/pn_dev.h>
32 33
33/* when accessing, remember to lock with spin_lock(&pndevs.lock); */ 34struct phonet_net {
34struct phonet_device_list pndevs = { 35 struct phonet_device_list pndevs;
35 .list = LIST_HEAD_INIT(pndevs.list),
36 .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock),
37}; 36};
38 37
38int phonet_net_id;
39
40struct phonet_device_list *phonet_device_list(struct net *net)
41{
42 struct phonet_net *pnn = net_generic(net, phonet_net_id);
43 return &pnn->pndevs;
44}
45
39/* Allocate new Phonet device. */ 46/* Allocate new Phonet device. */
40static struct phonet_device *__phonet_device_alloc(struct net_device *dev) 47static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
41{ 48{
49 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
42 struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); 50 struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
43 if (pnd == NULL) 51 if (pnd == NULL)
44 return NULL; 52 return NULL;
45 pnd->netdev = dev; 53 pnd->netdev = dev;
46 bitmap_zero(pnd->addrs, 64); 54 bitmap_zero(pnd->addrs, 64);
47 55
48 list_add(&pnd->list, &pndevs.list); 56 list_add(&pnd->list, &pndevs->list);
49 return pnd; 57 return pnd;
50} 58}
51 59
52static struct phonet_device *__phonet_get(struct net_device *dev) 60static struct phonet_device *__phonet_get(struct net_device *dev)
53{ 61{
62 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
54 struct phonet_device *pnd; 63 struct phonet_device *pnd;
55 64
56 list_for_each_entry(pnd, &pndevs.list, list) { 65 list_for_each_entry(pnd, &pndevs->list, list) {
57 if (pnd->netdev == dev) 66 if (pnd->netdev == dev)
58 return pnd; 67 return pnd;
59 } 68 }
@@ -68,32 +77,33 @@ static void __phonet_device_free(struct phonet_device *pnd)
68 77
69struct net_device *phonet_device_get(struct net *net) 78struct net_device *phonet_device_get(struct net *net)
70{ 79{
80 struct phonet_device_list *pndevs = phonet_device_list(net);
71 struct phonet_device *pnd; 81 struct phonet_device *pnd;
72 struct net_device *dev; 82 struct net_device *dev;
73 83
74 spin_lock_bh(&pndevs.lock); 84 spin_lock_bh(&pndevs->lock);
75 list_for_each_entry(pnd, &pndevs.list, list) { 85 list_for_each_entry(pnd, &pndevs->list, list) {
76 dev = pnd->netdev; 86 dev = pnd->netdev;
77 BUG_ON(!dev); 87 BUG_ON(!dev);
78 88
79 if (net_eq(dev_net(dev), net) && 89 if ((dev->reg_state == NETREG_REGISTERED) &&
80 (dev->reg_state == NETREG_REGISTERED) &&
81 ((pnd->netdev->flags & IFF_UP)) == IFF_UP) 90 ((pnd->netdev->flags & IFF_UP)) == IFF_UP)
82 break; 91 break;
83 dev = NULL; 92 dev = NULL;
84 } 93 }
85 if (dev) 94 if (dev)
86 dev_hold(dev); 95 dev_hold(dev);
87 spin_unlock_bh(&pndevs.lock); 96 spin_unlock_bh(&pndevs->lock);
88 return dev; 97 return dev;
89} 98}
90 99
91int phonet_address_add(struct net_device *dev, u8 addr) 100int phonet_address_add(struct net_device *dev, u8 addr)
92{ 101{
102 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
93 struct phonet_device *pnd; 103 struct phonet_device *pnd;
94 int err = 0; 104 int err = 0;
95 105
96 spin_lock_bh(&pndevs.lock); 106 spin_lock_bh(&pndevs->lock);
97 /* Find or create Phonet-specific device data */ 107 /* Find or create Phonet-specific device data */
98 pnd = __phonet_get(dev); 108 pnd = __phonet_get(dev);
99 if (pnd == NULL) 109 if (pnd == NULL)
@@ -102,31 +112,33 @@ int phonet_address_add(struct net_device *dev, u8 addr)
102 err = -ENOMEM; 112 err = -ENOMEM;
103 else if (test_and_set_bit(addr >> 2, pnd->addrs)) 113 else if (test_and_set_bit(addr >> 2, pnd->addrs))
104 err = -EEXIST; 114 err = -EEXIST;
105 spin_unlock_bh(&pndevs.lock); 115 spin_unlock_bh(&pndevs->lock);
106 return err; 116 return err;
107} 117}
108 118
109int phonet_address_del(struct net_device *dev, u8 addr) 119int phonet_address_del(struct net_device *dev, u8 addr)
110{ 120{
121 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
111 struct phonet_device *pnd; 122 struct phonet_device *pnd;
112 int err = 0; 123 int err = 0;
113 124
114 spin_lock_bh(&pndevs.lock); 125 spin_lock_bh(&pndevs->lock);
115 pnd = __phonet_get(dev); 126 pnd = __phonet_get(dev);
116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) 127 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
117 err = -EADDRNOTAVAIL; 128 err = -EADDRNOTAVAIL;
118 else if (bitmap_empty(pnd->addrs, 64)) 129 else if (bitmap_empty(pnd->addrs, 64))
119 __phonet_device_free(pnd); 130 __phonet_device_free(pnd);
120 spin_unlock_bh(&pndevs.lock); 131 spin_unlock_bh(&pndevs->lock);
121 return err; 132 return err;
122} 133}
123 134
124/* Gets a source address toward a destination, through a interface. */ 135/* Gets a source address toward a destination, through a interface. */
125u8 phonet_address_get(struct net_device *dev, u8 addr) 136u8 phonet_address_get(struct net_device *dev, u8 addr)
126{ 137{
138 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
127 struct phonet_device *pnd; 139 struct phonet_device *pnd;
128 140
129 spin_lock_bh(&pndevs.lock); 141 spin_lock_bh(&pndevs->lock);
130 pnd = __phonet_get(dev); 142 pnd = __phonet_get(dev);
131 if (pnd) { 143 if (pnd) {
132 BUG_ON(bitmap_empty(pnd->addrs, 64)); 144 BUG_ON(bitmap_empty(pnd->addrs, 64));
@@ -136,30 +148,31 @@ u8 phonet_address_get(struct net_device *dev, u8 addr)
136 addr = find_first_bit(pnd->addrs, 64) << 2; 148 addr = find_first_bit(pnd->addrs, 64) << 2;
137 } else 149 } else
138 addr = PN_NO_ADDR; 150 addr = PN_NO_ADDR;
139 spin_unlock_bh(&pndevs.lock); 151 spin_unlock_bh(&pndevs->lock);
140 return addr; 152 return addr;
141} 153}
142 154
143int phonet_address_lookup(struct net *net, u8 addr) 155int phonet_address_lookup(struct net *net, u8 addr)
144{ 156{
157 struct phonet_device_list *pndevs = phonet_device_list(net);
145 struct phonet_device *pnd; 158 struct phonet_device *pnd;
159 int err = -EADDRNOTAVAIL;
146 160
147 spin_lock_bh(&pndevs.lock); 161 spin_lock_bh(&pndevs->lock);
148 list_for_each_entry(pnd, &pndevs.list, list) { 162 list_for_each_entry(pnd, &pndevs->list, list) {
149 if (!net_eq(dev_net(pnd->netdev), net))
150 continue;
151 /* Don't allow unregistering devices! */ 163 /* Don't allow unregistering devices! */
152 if ((pnd->netdev->reg_state != NETREG_REGISTERED) || 164 if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
153 ((pnd->netdev->flags & IFF_UP)) != IFF_UP) 165 ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
154 continue; 166 continue;
155 167
156 if (test_bit(addr >> 2, pnd->addrs)) { 168 if (test_bit(addr >> 2, pnd->addrs)) {
157 spin_unlock_bh(&pndevs.lock); 169 err = 0;
158 return 0; 170 goto found;
159 } 171 }
160 } 172 }
161 spin_unlock_bh(&pndevs.lock); 173found:
162 return -EADDRNOTAVAIL; 174 spin_unlock_bh(&pndevs->lock);
175 return err;
163} 176}
164 177
165/* notify Phonet of device events */ 178/* notify Phonet of device events */
@@ -169,14 +182,16 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what,
169 struct net_device *dev = arg; 182 struct net_device *dev = arg;
170 183
171 if (what == NETDEV_UNREGISTER) { 184 if (what == NETDEV_UNREGISTER) {
185 struct phonet_device_list *pndevs;
172 struct phonet_device *pnd; 186 struct phonet_device *pnd;
173 187
174 /* Destroy phonet-specific device data */ 188 /* Destroy phonet-specific device data */
175 spin_lock_bh(&pndevs.lock); 189 pndevs = phonet_device_list(dev_net(dev));
190 spin_lock_bh(&pndevs->lock);
176 pnd = __phonet_get(dev); 191 pnd = __phonet_get(dev);
177 if (pnd) 192 if (pnd)
178 __phonet_device_free(pnd); 193 __phonet_device_free(pnd);
179 spin_unlock_bh(&pndevs.lock); 194 spin_unlock_bh(&pndevs->lock);
180 } 195 }
181 return 0; 196 return 0;
182 197
@@ -187,24 +202,52 @@ static struct notifier_block phonet_device_notifier = {
187 .priority = 0, 202 .priority = 0,
188}; 203};
189 204
190/* Initialize Phonet devices list */ 205/* Per-namespace Phonet devices handling */
191void phonet_device_init(void) 206static int phonet_init_net(struct net *net)
192{ 207{
193 register_netdevice_notifier(&phonet_device_notifier); 208 struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
209 if (!pnn)
210 return -ENOMEM;
211
212 INIT_LIST_HEAD(&pnn->pndevs.list);
213 spin_lock_init(&pnn->pndevs.lock);
214 net_assign_generic(net, phonet_net_id, pnn);
215 return 0;
194} 216}
195 217
196void phonet_device_exit(void) 218static void phonet_exit_net(struct net *net)
197{ 219{
220 struct phonet_net *pnn = net_generic(net, phonet_net_id);
198 struct phonet_device *pnd, *n; 221 struct phonet_device *pnd, *n;
199 222
200 rtnl_unregister_all(PF_PHONET); 223 list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list)
201 rtnl_lock();
202 spin_lock_bh(&pndevs.lock);
203
204 list_for_each_entry_safe(pnd, n, &pndevs.list, list)
205 __phonet_device_free(pnd); 224 __phonet_device_free(pnd);
206 225
207 spin_unlock_bh(&pndevs.lock); 226 kfree(pnn);
208 rtnl_unlock(); 227}
228
229static struct pernet_operations phonet_net_ops = {
230 .init = phonet_init_net,
231 .exit = phonet_exit_net,
232};
233
234/* Initialize Phonet devices list */
235int __init phonet_device_init(void)
236{
237 int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops);
238 if (err)
239 return err;
240
241 register_netdevice_notifier(&phonet_device_notifier);
242 err = phonet_netlink_register();
243 if (err)
244 phonet_device_exit();
245 return err;
246}
247
248void phonet_device_exit(void)
249{
250 rtnl_unregister_all(PF_PHONET);
209 unregister_netdevice_notifier(&phonet_device_notifier); 251 unregister_netdevice_notifier(&phonet_device_notifier);
252 unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops);
210} 253}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 242fe8f8c32..1ceea1f9241 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -123,17 +123,16 @@ nla_put_failure:
123 123
124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
125{ 125{
126 struct net *net = sock_net(skb->sk); 126 struct phonet_device_list *pndevs;
127 struct phonet_device *pnd; 127 struct phonet_device *pnd;
128 int dev_idx = 0, dev_start_idx = cb->args[0]; 128 int dev_idx = 0, dev_start_idx = cb->args[0];
129 int addr_idx = 0, addr_start_idx = cb->args[1]; 129 int addr_idx = 0, addr_start_idx = cb->args[1];
130 130
131 spin_lock_bh(&pndevs.lock); 131 pndevs = phonet_device_list(sock_net(skb->sk));
132 list_for_each_entry(pnd, &pndevs.list, list) { 132 spin_lock_bh(&pndevs->lock);
133 list_for_each_entry(pnd, &pndevs->list, list) {
133 u8 addr; 134 u8 addr;
134 135
135 if (!net_eq(dev_net(pnd->netdev), net))
136 continue;
137 if (dev_idx > dev_start_idx) 136 if (dev_idx > dev_start_idx)
138 addr_start_idx = 0; 137 addr_start_idx = 0;
139 if (dev_idx++ < dev_start_idx) 138 if (dev_idx++ < dev_start_idx)
@@ -153,16 +152,21 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
153 } 152 }
154 153
155out: 154out:
156 spin_unlock_bh(&pndevs.lock); 155 spin_unlock_bh(&pndevs->lock);
157 cb->args[0] = dev_idx; 156 cb->args[0] = dev_idx;
158 cb->args[1] = addr_idx; 157 cb->args[1] = addr_idx;
159 158
160 return skb->len; 159 return skb->len;
161} 160}
162 161
163void __init phonet_netlink_register(void) 162int __init phonet_netlink_register(void)
164{ 163{
165 rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); 164 int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL);
166 rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); 165 if (err)
167 rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); 166 return err;
167
168 /* Further __rtnl_register() cannot fail */
169 __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL);
170 __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit);
171 return 0;
168} 172}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 01392649b46..65013962658 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1587,8 +1587,7 @@ static int __init rose_proto_init(void)
1587 char name[IFNAMSIZ]; 1587 char name[IFNAMSIZ];
1588 1588
1589 sprintf(name, "rose%d", i); 1589 sprintf(name, "rose%d", i);
1590 dev = alloc_netdev(sizeof(struct net_device_stats), 1590 dev = alloc_netdev(0, name, rose_setup);
1591 name, rose_setup);
1592 if (!dev) { 1591 if (!dev) {
1593 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1592 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1594 rc = -ENOMEM; 1593 rc = -ENOMEM;
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 12cfcf09556..7dcf2569613 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -57,7 +57,7 @@ static int rose_rebuild_header(struct sk_buff *skb)
57{ 57{
58#ifdef CONFIG_INET 58#ifdef CONFIG_INET
59 struct net_device *dev = skb->dev; 59 struct net_device *dev = skb->dev;
60 struct net_device_stats *stats = netdev_priv(dev); 60 struct net_device_stats *stats = &dev->stats;
61 unsigned char *bp = (unsigned char *)skb->data; 61 unsigned char *bp = (unsigned char *)skb->data;
62 struct sk_buff *skbn; 62 struct sk_buff *skbn;
63 unsigned int len; 63 unsigned int len;
@@ -133,7 +133,7 @@ static int rose_close(struct net_device *dev)
133 133
134static int rose_xmit(struct sk_buff *skb, struct net_device *dev) 134static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
135{ 135{
136 struct net_device_stats *stats = netdev_priv(dev); 136 struct net_device_stats *stats = &dev->stats;
137 137
138 if (!netif_running(dev)) { 138 if (!netif_running(dev)) {
139 printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); 139 printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
@@ -144,30 +144,28 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
144 return 0; 144 return 0;
145} 145}
146 146
147static struct net_device_stats *rose_get_stats(struct net_device *dev)
148{
149 return netdev_priv(dev);
150}
151
152static const struct header_ops rose_header_ops = { 147static const struct header_ops rose_header_ops = {
153 .create = rose_header, 148 .create = rose_header,
154 .rebuild= rose_rebuild_header, 149 .rebuild= rose_rebuild_header,
155}; 150};
156 151
152static const struct net_device_ops rose_netdev_ops = {
153 .ndo_open = rose_open,
154 .ndo_stop = rose_close,
155 .ndo_start_xmit = rose_xmit,
156 .ndo_set_mac_address = rose_set_mac_address,
157};
158
157void rose_setup(struct net_device *dev) 159void rose_setup(struct net_device *dev)
158{ 160{
159 dev->mtu = ROSE_MAX_PACKET_SIZE - 2; 161 dev->mtu = ROSE_MAX_PACKET_SIZE - 2;
160 dev->hard_start_xmit = rose_xmit; 162 dev->netdev_ops = &rose_netdev_ops;
161 dev->open = rose_open;
162 dev->stop = rose_close;
163 163
164 dev->header_ops = &rose_header_ops; 164 dev->header_ops = &rose_header_ops;
165 dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 165 dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
166 dev->addr_len = ROSE_ADDR_LEN; 166 dev->addr_len = ROSE_ADDR_LEN;
167 dev->type = ARPHRD_ROSE; 167 dev->type = ARPHRD_ROSE;
168 dev->set_mac_address = rose_set_mac_address;
169 168
170 /* New-style flags. */ 169 /* New-style flags. */
171 dev->flags = IFF_NOARP; 170 dev->flags = IFF_NOARP;
172 dev->get_stats = rose_get_stats;
173} 171}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0fc4a18fd96..32009793307 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -444,6 +444,17 @@ out:
444} 444}
445EXPORT_SYMBOL(qdisc_calculate_pkt_len); 445EXPORT_SYMBOL(qdisc_calculate_pkt_len);
446 446
447void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
448{
449 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
450 printk(KERN_WARNING
451 "%s: %s qdisc %X: is non-work-conserving?\n",
452 txt, qdisc->ops->id, qdisc->handle >> 16);
453 qdisc->flags |= TCQ_F_WARN_NONWC;
454 }
455}
456EXPORT_SYMBOL(qdisc_warn_nonwc);
457
447static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 458static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
448{ 459{
449 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 460 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 45c31b1a4e1..74226b26552 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -887,8 +887,7 @@ qdisc_peek_len(struct Qdisc *sch)
887 887
888 skb = sch->ops->peek(sch); 888 skb = sch->ops->peek(sch);
889 if (skb == NULL) { 889 if (skb == NULL) {
890 if (net_ratelimit()) 890 qdisc_warn_nonwc("qdisc_peek_len", sch);
891 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
892 return 0; 891 return 0;
893 } 892 }
894 len = qdisc_pkt_len(skb); 893 len = qdisc_pkt_len(skb);
@@ -1642,8 +1641,7 @@ hfsc_dequeue(struct Qdisc *sch)
1642 1641
1643 skb = qdisc_dequeue_peeked(cl->qdisc); 1642 skb = qdisc_dequeue_peeked(cl->qdisc);
1644 if (skb == NULL) { 1643 if (skb == NULL) {
1645 if (net_ratelimit()) 1644 qdisc_warn_nonwc("HFSC", cl->qdisc);
1646 printk("HFSC: Non-work-conserving qdisc ?\n");
1647 return NULL; 1645 return NULL;
1648 } 1646 }
1649 1647
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2f0f0b04d3f..355974f610c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -35,6 +35,7 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/compiler.h> 36#include <linux/compiler.h>
37#include <linux/rbtree.h> 37#include <linux/rbtree.h>
38#include <linux/workqueue.h>
38#include <net/netlink.h> 39#include <net/netlink.h>
39#include <net/pkt_sched.h> 40#include <net/pkt_sched.h>
40 41
@@ -114,8 +115,6 @@ struct htb_class {
114 struct tcf_proto *filter_list; 115 struct tcf_proto *filter_list;
115 int filter_cnt; 116 int filter_cnt;
116 117
117 int warned; /* only one warning about non work conserving .. */
118
119 /* token bucket parameters */ 118 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */ 119 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 120 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
@@ -155,6 +154,10 @@ struct htb_sched {
155 int direct_qlen; /* max qlen of above */ 154 int direct_qlen; /* max qlen of above */
156 155
157 long direct_pkts; 156 long direct_pkts;
157
158#define HTB_WARN_TOOMANYEVENTS 0x1
159 unsigned int warned; /* only one warning */
160 struct work_struct work;
158}; 161};
159 162
160/* find class in global hash table using given handle */ 163/* find class in global hash table using given handle */
@@ -658,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
658 * htb_do_events - make mode changes to classes at the level 661 * htb_do_events - make mode changes to classes at the level
659 * 662 *
660 * Scans event queue for pending events and applies them. Returns time of 663 * Scans event queue for pending events and applies them. Returns time of
661 * next pending event (0 for no event in pq). 664 * next pending event (0 for no event in pq, q->now for too many events).
662 * Note: Applied are events whose have cl->pq_key <= q->now. 665 * Note: Applied are events whose have cl->pq_key <= q->now.
663 */ 666 */
664static psched_time_t htb_do_events(struct htb_sched *q, int level, 667static psched_time_t htb_do_events(struct htb_sched *q, int level,
@@ -686,8 +689,14 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
686 if (cl->cmode != HTB_CAN_SEND) 689 if (cl->cmode != HTB_CAN_SEND)
687 htb_add_to_wait_tree(q, cl, diff); 690 htb_add_to_wait_tree(q, cl, diff);
688 } 691 }
689 /* too much load - let's continue on next jiffie (including above) */ 692
690 return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ; 693 /* too much load - let's continue after a break for scheduling */
694 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
695 printk(KERN_WARNING "htb: too many events!\n");
696 q->warned |= HTB_WARN_TOOMANYEVENTS;
697 }
698
699 return q->now;
691} 700}
692 701
693/* Returns class->node+prio from id-tree where classe's id is >= id. NULL 702/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -809,13 +818,8 @@ next:
809 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); 818 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
810 if (likely(skb != NULL)) 819 if (likely(skb != NULL))
811 break; 820 break;
812 if (!cl->warned) {
813 printk(KERN_WARNING
814 "htb: class %X isn't work conserving ?!\n",
815 cl->common.classid);
816 cl->warned = 1;
817 }
818 821
822 qdisc_warn_nonwc("htb", cl->un.leaf.q);
819 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 823 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
820 ptr[0]) + prio); 824 ptr[0]) + prio);
821 cl = htb_lookup_leaf(q->row[level] + prio, prio, 825 cl = htb_lookup_leaf(q->row[level] + prio, prio,
@@ -892,7 +896,10 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
892 } 896 }
893 } 897 }
894 sch->qstats.overlimits++; 898 sch->qstats.overlimits++;
895 qdisc_watchdog_schedule(&q->watchdog, next_event); 899 if (likely(next_event > q->now))
900 qdisc_watchdog_schedule(&q->watchdog, next_event);
901 else
902 schedule_work(&q->work);
896fin: 903fin:
897 return skb; 904 return skb;
898} 905}
@@ -962,6 +969,14 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
962 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 969 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
963}; 970};
964 971
972static void htb_work_func(struct work_struct *work)
973{
974 struct htb_sched *q = container_of(work, struct htb_sched, work);
975 struct Qdisc *sch = q->watchdog.qdisc;
976
977 __netif_schedule(qdisc_root(sch));
978}
979
965static int htb_init(struct Qdisc *sch, struct nlattr *opt) 980static int htb_init(struct Qdisc *sch, struct nlattr *opt)
966{ 981{
967 struct htb_sched *q = qdisc_priv(sch); 982 struct htb_sched *q = qdisc_priv(sch);
@@ -996,6 +1011,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
996 INIT_LIST_HEAD(q->drops + i); 1011 INIT_LIST_HEAD(q->drops + i);
997 1012
998 qdisc_watchdog_init(&q->watchdog, sch); 1013 qdisc_watchdog_init(&q->watchdog, sch);
1014 INIT_WORK(&q->work, htb_work_func);
999 skb_queue_head_init(&q->direct_queue); 1015 skb_queue_head_init(&q->direct_queue);
1000 1016
1001 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; 1017 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
@@ -1188,7 +1204,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1188 kfree(cl); 1204 kfree(cl);
1189} 1205}
1190 1206
1191/* always caled under BH & queue lock */
1192static void htb_destroy(struct Qdisc *sch) 1207static void htb_destroy(struct Qdisc *sch)
1193{ 1208{
1194 struct htb_sched *q = qdisc_priv(sch); 1209 struct htb_sched *q = qdisc_priv(sch);
@@ -1196,6 +1211,7 @@ static void htb_destroy(struct Qdisc *sch)
1196 struct htb_class *cl; 1211 struct htb_class *cl;
1197 unsigned int i; 1212 unsigned int i;
1198 1213
1214 cancel_work_sync(&q->work);
1199 qdisc_watchdog_cancel(&q->watchdog); 1215 qdisc_watchdog_cancel(&q->watchdog);
1200 /* This line used to be after htb_destroy_class call below 1216 /* This line used to be after htb_destroy_class call below
1201 and surprisingly it worked in 2.4. But it must precede it 1217 and surprisingly it worked in 2.4. But it must precede it
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7e151861794..91273120304 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -202,7 +202,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
202 int i; 202 int i;
203 203
204 if (!netif_is_multiqueue(qdisc_dev(sch))) 204 if (!netif_is_multiqueue(qdisc_dev(sch)))
205 return -EINVAL; 205 return -EOPNOTSUPP;
206 if (nla_len(opt) < sizeof(*qopt)) 206 if (nla_len(opt) < sizeof(*qopt))
207 return -EINVAL; 207 return -EINVAL;
208 208
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ceaa4aa066e..78622756669 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,8 +97,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
97 if (addr) { 97 if (addr) {
98 addr->a.v6.sin6_family = AF_INET6; 98 addr->a.v6.sin6_family = AF_INET6;
99 addr->a.v6.sin6_port = 0; 99 addr->a.v6.sin6_port = 0;
100 memcpy(&addr->a.v6.sin6_addr, &ifa->addr, 100 ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
101 sizeof(struct in6_addr));
102 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 101 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
103 addr->valid = 1; 102 addr->valid = 1;
104 spin_lock_bh(&sctp_local_addr_lock); 103 spin_lock_bh(&sctp_local_addr_lock);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 73639355157..47bfba6c03e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -367,7 +367,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
367 struct sctp_transport *tp = packet->transport; 367 struct sctp_transport *tp = packet->transport;
368 struct sctp_association *asoc = tp->asoc; 368 struct sctp_association *asoc = tp->asoc;
369 struct sctphdr *sh; 369 struct sctphdr *sh;
370 __be32 crc32 = __constant_cpu_to_be32(0); 370 __be32 crc32 = cpu_to_be32(0);
371 struct sk_buff *nskb; 371 struct sk_buff *nskb;
372 struct sctp_chunk *chunk, *tmp; 372 struct sctp_chunk *chunk, *tmp;
373 struct sock *sk; 373 struct sock *sk;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fd8acb48c3f..b40e95f9851 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -100,11 +100,11 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk)
100 */ 100 */
101static const struct sctp_paramhdr ecap_param = { 101static const struct sctp_paramhdr ecap_param = {
102 SCTP_PARAM_ECN_CAPABLE, 102 SCTP_PARAM_ECN_CAPABLE,
103 __constant_htons(sizeof(struct sctp_paramhdr)), 103 cpu_to_be16(sizeof(struct sctp_paramhdr)),
104}; 104};
105static const struct sctp_paramhdr prsctp_param = { 105static const struct sctp_paramhdr prsctp_param = {
106 SCTP_PARAM_FWD_TSN_SUPPORT, 106 SCTP_PARAM_FWD_TSN_SUPPORT,
107 __constant_htons(sizeof(struct sctp_paramhdr)), 107 cpu_to_be16(sizeof(struct sctp_paramhdr)),
108}; 108};
109 109
110/* A helper to initialize to initialize an op error inside a 110/* A helper to initialize to initialize an op error inside a
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 5cbb404c4cd..b49e434c094 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1215,6 +1215,23 @@ out:
1215 read_unlock(&sk->sk_callback_lock); 1215 read_unlock(&sk->sk_callback_lock);
1216} 1216}
1217 1217
1218static void xs_write_space(struct sock *sk)
1219{
1220 struct socket *sock;
1221 struct rpc_xprt *xprt;
1222
1223 if (unlikely(!(sock = sk->sk_socket)))
1224 return;
1225 clear_bit(SOCK_NOSPACE, &sock->flags);
1226
1227 if (unlikely(!(xprt = xprt_from_sock(sk))))
1228 return;
1229 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1230 return;
1231
1232 xprt_write_space(xprt);
1233}
1234
1218/** 1235/**
1219 * xs_udp_write_space - callback invoked when socket buffer space 1236 * xs_udp_write_space - callback invoked when socket buffer space
1220 * becomes available 1237 * becomes available
@@ -1230,23 +1247,9 @@ static void xs_udp_write_space(struct sock *sk)
1230 read_lock(&sk->sk_callback_lock); 1247 read_lock(&sk->sk_callback_lock);
1231 1248
1232 /* from net/core/sock.c:sock_def_write_space */ 1249 /* from net/core/sock.c:sock_def_write_space */
1233 if (sock_writeable(sk)) { 1250 if (sock_writeable(sk))
1234 struct socket *sock; 1251 xs_write_space(sk);
1235 struct rpc_xprt *xprt;
1236
1237 if (unlikely(!(sock = sk->sk_socket)))
1238 goto out;
1239 clear_bit(SOCK_NOSPACE, &sock->flags);
1240
1241 if (unlikely(!(xprt = xprt_from_sock(sk))))
1242 goto out;
1243 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1244 goto out;
1245
1246 xprt_write_space(xprt);
1247 }
1248 1252
1249 out:
1250 read_unlock(&sk->sk_callback_lock); 1253 read_unlock(&sk->sk_callback_lock);
1251} 1254}
1252 1255
@@ -1265,23 +1268,9 @@ static void xs_tcp_write_space(struct sock *sk)
1265 read_lock(&sk->sk_callback_lock); 1268 read_lock(&sk->sk_callback_lock);
1266 1269
1267 /* from net/core/stream.c:sk_stream_write_space */ 1270 /* from net/core/stream.c:sk_stream_write_space */
1268 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 1271 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
1269 struct socket *sock; 1272 xs_write_space(sk);
1270 struct rpc_xprt *xprt;
1271
1272 if (unlikely(!(sock = sk->sk_socket)))
1273 goto out;
1274 clear_bit(SOCK_NOSPACE, &sock->flags);
1275 1273
1276 if (unlikely(!(xprt = xprt_from_sock(sk))))
1277 goto out;
1278 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1279 goto out;
1280
1281 xprt_write_space(xprt);
1282 }
1283
1284 out:
1285 read_unlock(&sk->sk_callback_lock); 1274 read_unlock(&sk->sk_callback_lock);
1286} 1275}
1287 1276
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index cb3b4ad5368..5d149c1b5f0 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -258,7 +258,6 @@ EXPORT_SYMBOL_GPL(wimax_msg_len);
258 */ 258 */
259int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) 259int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
260{ 260{
261 int result;
262 struct device *dev = wimax_dev->net_dev->dev.parent; 261 struct device *dev = wimax_dev->net_dev->dev.parent;
263 void *msg = skb->data; 262 void *msg = skb->data;
264 size_t size = skb->len; 263 size_t size = skb->len;
@@ -266,11 +265,9 @@ int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
266 265
267 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); 266 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
268 d_dump(2, dev, msg, size); 267 d_dump(2, dev, msg, size);
269 result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); 268 genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
270 d_printf(1, dev, "CTX: genl multicast result %d\n", result); 269 d_printf(1, dev, "CTX: genl multicast done\n");
271 if (result == -ESRCH) /* Nobody connected, ignore it */ 270 return 0;
272 result = 0; /* btw, the skb is freed already */
273 return result;
274} 271}
275EXPORT_SYMBOL_GPL(wimax_msg_send); 272EXPORT_SYMBOL_GPL(wimax_msg_send);
276 273
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3869c032788..a0ee76b5251 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -163,16 +163,12 @@ int wimax_gnl_re_state_change_send(
163 struct device *dev = wimax_dev_to_dev(wimax_dev); 163 struct device *dev = wimax_dev_to_dev(wimax_dev);
164 d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n", 164 d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
165 wimax_dev, report_skb); 165 wimax_dev, report_skb);
166 if (report_skb == NULL) 166 if (report_skb == NULL) {
167 result = -ENOMEM;
167 goto out; 168 goto out;
168 genlmsg_end(report_skb, header);
169 result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
170 if (result == -ESRCH) /* Nobody connected, ignore it */
171 result = 0; /* btw, the skb is freed already */
172 if (result < 0) {
173 dev_err(dev, "RE_STCH: Error sending: %d\n", result);
174 nlmsg_free(report_skb);
175 } 169 }
170 genlmsg_end(report_skb, header);
171 genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
176out: 172out:
177 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n", 173 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
178 wimax_dev, report_skb, result); 174 wimax_dev, report_skb, result);
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 938a334c8db..dad43c24f69 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o 5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o
9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o 9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
10cfg80211-$(CONFIG_NL80211) += nl80211.o 10cfg80211-$(CONFIG_NL80211) += nl80211.o
11 11
diff --git a/net/wireless/core.c b/net/wireless/core.c
index b96fc0c3f1c..0668b2bfc1d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -240,6 +240,8 @@ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
240 mutex_init(&drv->mtx); 240 mutex_init(&drv->mtx);
241 mutex_init(&drv->devlist_mtx); 241 mutex_init(&drv->devlist_mtx);
242 INIT_LIST_HEAD(&drv->netdev_list); 242 INIT_LIST_HEAD(&drv->netdev_list);
243 spin_lock_init(&drv->bss_lock);
244 INIT_LIST_HEAD(&drv->bss_list);
243 245
244 device_initialize(&drv->wiphy.dev); 246 device_initialize(&drv->wiphy.dev);
245 drv->wiphy.dev.class = &ieee80211_class; 247 drv->wiphy.dev.class = &ieee80211_class;
@@ -259,6 +261,9 @@ int wiphy_register(struct wiphy *wiphy)
259 int i; 261 int i;
260 u16 ifmodes = wiphy->interface_modes; 262 u16 ifmodes = wiphy->interface_modes;
261 263
264 if (WARN_ON(wiphy->max_scan_ssids < 1))
265 return -EINVAL;
266
262 /* sanity check ifmodes */ 267 /* sanity check ifmodes */
263 WARN_ON(!ifmodes); 268 WARN_ON(!ifmodes);
264 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 269 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -273,10 +278,16 @@ int wiphy_register(struct wiphy *wiphy)
273 278
274 sband->band = band; 279 sband->band = band;
275 280
276 if (!sband->n_channels || !sband->n_bitrates) { 281 if (WARN_ON(!sband->n_channels || !sband->n_bitrates))
277 WARN_ON(1); 282 return -EINVAL;
283
284 /*
285 * Since we use a u32 for rate bitmaps in
286 * ieee80211_get_response_rate, we cannot
287 * have more than 32 legacy rates.
288 */
289 if (WARN_ON(sband->n_bitrates > 32))
278 return -EINVAL; 290 return -EINVAL;
279 }
280 291
281 for (i = 0; i < sband->n_channels; i++) { 292 for (i = 0; i < sband->n_channels; i++) {
282 sband->channels[i].orig_flags = 293 sband->channels[i].orig_flags =
@@ -361,8 +372,11 @@ EXPORT_SYMBOL(wiphy_unregister);
361 372
362void cfg80211_dev_free(struct cfg80211_registered_device *drv) 373void cfg80211_dev_free(struct cfg80211_registered_device *drv)
363{ 374{
375 struct cfg80211_internal_bss *scan, *tmp;
364 mutex_destroy(&drv->mtx); 376 mutex_destroy(&drv->mtx);
365 mutex_destroy(&drv->devlist_mtx); 377 mutex_destroy(&drv->devlist_mtx);
378 list_for_each_entry_safe(scan, tmp, &drv->bss_list, list)
379 cfg80211_put_bss(&scan->pub);
366 kfree(drv); 380 kfree(drv);
367} 381}
368 382
diff --git a/net/wireless/core.h b/net/wireless/core.h
index f7fb9f41302..e29ad4cd464 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -8,6 +8,8 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h>
12#include <linux/rbtree.h>
11#include <net/genetlink.h> 13#include <net/genetlink.h>
12#include <net/wireless.h> 14#include <net/wireless.h>
13#include <net/cfg80211.h> 15#include <net/cfg80211.h>
@@ -41,6 +43,13 @@ struct cfg80211_registered_device {
41 struct mutex devlist_mtx; 43 struct mutex devlist_mtx;
42 struct list_head netdev_list; 44 struct list_head netdev_list;
43 45
46 /* BSSes/scanning */
47 spinlock_t bss_lock;
48 struct list_head bss_list;
49 struct rb_root bss_tree;
50 u32 bss_generation;
51 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
52
44 /* must be last because of the way we do wiphy_priv(), 53 /* must be last because of the way we do wiphy_priv(),
45 * and it should at least be aligned to NETDEV_ALIGN */ 54 * and it should at least be aligned to NETDEV_ALIGN */
46 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 55 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -56,6 +65,15 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
56extern struct mutex cfg80211_drv_mutex; 65extern struct mutex cfg80211_drv_mutex;
57extern struct list_head cfg80211_drv_list; 66extern struct list_head cfg80211_drv_list;
58 67
68struct cfg80211_internal_bss {
69 struct list_head list;
70 struct rb_node rbn;
71 unsigned long ts;
72 struct kref ref;
73 /* must be last because of priv member */
74 struct cfg80211_bss pub;
75};
76
59/* 77/*
60 * This function returns a pointer to the driver 78 * This function returns a pointer to the driver
61 * that the genl_info item that is passed refers to. 79 * that the genl_info item that is passed refers to.
@@ -94,4 +112,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
94void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 112void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
95void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby); 113void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby);
96 114
115void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
116
97#endif /* __NET_WIRELESS_CORE_H */ 117#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1e728fff474..298a4de5994 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14,6 +14,7 @@
14#include <linux/nl80211.h> 14#include <linux/nl80211.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/netlink.h> 16#include <linux/netlink.h>
17#include <linux/etherdevice.h>
17#include <net/genetlink.h> 18#include <net/genetlink.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
19#include "core.h" 20#include "core.h"
@@ -105,6 +106,12 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
105 106
106 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 107 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
107 .len = NL80211_HT_CAPABILITY_LEN }, 108 .len = NL80211_HT_CAPABILITY_LEN },
109
110 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
111 [NL80211_ATTR_IE] = { .type = NLA_BINARY,
112 .len = IEEE80211_MAX_DATA_LEN },
113 [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED },
114 [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED },
108}; 115};
109 116
110/* message building helper */ 117/* message building helper */
@@ -137,6 +144,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
137 144
138 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 145 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
139 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 146 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
147 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
148 dev->wiphy.max_scan_ssids);
140 149
141 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 150 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
142 if (!nl_modes) 151 if (!nl_modes)
@@ -738,7 +747,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
738 if (info->attrs[NL80211_ATTR_KEY_IDX]) 747 if (info->attrs[NL80211_ATTR_KEY_IDX])
739 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); 748 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
740 749
741 if (key_idx > 3) 750 if (key_idx > 5)
742 return -EINVAL; 751 return -EINVAL;
743 752
744 if (info->attrs[NL80211_ATTR_MAC]) 753 if (info->attrs[NL80211_ATTR_MAC])
@@ -804,30 +813,41 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
804 int err; 813 int err;
805 struct net_device *dev; 814 struct net_device *dev;
806 u8 key_idx; 815 u8 key_idx;
816 int (*func)(struct wiphy *wiphy, struct net_device *netdev,
817 u8 key_index);
807 818
808 if (!info->attrs[NL80211_ATTR_KEY_IDX]) 819 if (!info->attrs[NL80211_ATTR_KEY_IDX])
809 return -EINVAL; 820 return -EINVAL;
810 821
811 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); 822 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
812 823
813 if (key_idx > 3) 824 if (info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) {
825 if (key_idx < 4 || key_idx > 5)
826 return -EINVAL;
827 } else if (key_idx > 3)
814 return -EINVAL; 828 return -EINVAL;
815 829
816 /* currently only support setting default key */ 830 /* currently only support setting default key */
817 if (!info->attrs[NL80211_ATTR_KEY_DEFAULT]) 831 if (!info->attrs[NL80211_ATTR_KEY_DEFAULT] &&
832 !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT])
818 return -EINVAL; 833 return -EINVAL;
819 834
820 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 835 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
821 if (err) 836 if (err)
822 return err; 837 return err;
823 838
824 if (!drv->ops->set_default_key) { 839 if (info->attrs[NL80211_ATTR_KEY_DEFAULT])
840 func = drv->ops->set_default_key;
841 else
842 func = drv->ops->set_default_mgmt_key;
843
844 if (!func) {
825 err = -EOPNOTSUPP; 845 err = -EOPNOTSUPP;
826 goto out; 846 goto out;
827 } 847 }
828 848
829 rtnl_lock(); 849 rtnl_lock();
830 err = drv->ops->set_default_key(&drv->wiphy, dev, key_idx); 850 err = func(&drv->wiphy, dev, key_idx);
831 rtnl_unlock(); 851 rtnl_unlock();
832 852
833 out: 853 out:
@@ -863,7 +883,7 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
863 if (info->attrs[NL80211_ATTR_MAC]) 883 if (info->attrs[NL80211_ATTR_MAC])
864 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 884 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
865 885
866 if (key_idx > 3) 886 if (key_idx > 5)
867 return -EINVAL; 887 return -EINVAL;
868 888
869 /* 889 /*
@@ -894,6 +914,10 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
894 if (params.key_len != 13) 914 if (params.key_len != 13)
895 return -EINVAL; 915 return -EINVAL;
896 break; 916 break;
917 case WLAN_CIPHER_SUITE_AES_CMAC:
918 if (params.key_len != 16)
919 return -EINVAL;
920 break;
897 default: 921 default:
898 return -EINVAL; 922 return -EINVAL;
899 } 923 }
@@ -928,7 +952,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
928 if (info->attrs[NL80211_ATTR_KEY_IDX]) 952 if (info->attrs[NL80211_ATTR_KEY_IDX])
929 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); 953 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
930 954
931 if (key_idx > 3) 955 if (key_idx > 5)
932 return -EINVAL; 956 return -EINVAL;
933 957
934 if (info->attrs[NL80211_ATTR_MAC]) 958 if (info->attrs[NL80211_ATTR_MAC])
@@ -1889,6 +1913,11 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
1889 mutex_lock(&cfg80211_drv_mutex); 1913 mutex_lock(&cfg80211_drv_mutex);
1890 r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, 0, ENVIRON_ANY); 1914 r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, 0, ENVIRON_ANY);
1891 mutex_unlock(&cfg80211_drv_mutex); 1915 mutex_unlock(&cfg80211_drv_mutex);
1916 /* This means the regulatory domain was already set, however
1917 * we don't want to confuse userspace with a "successful error"
1918 * message so lets just treat it as a success */
1919 if (r == -EALREADY)
1920 r = 0;
1892 return r; 1921 return r;
1893} 1922}
1894 1923
@@ -2069,6 +2098,81 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2069 2098
2070#undef FILL_IN_MESH_PARAM_IF_SET 2099#undef FILL_IN_MESH_PARAM_IF_SET
2071 2100
2101static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
2102{
2103 struct sk_buff *msg;
2104 void *hdr = NULL;
2105 struct nlattr *nl_reg_rules;
2106 unsigned int i;
2107 int err = -EINVAL;
2108
2109 mutex_lock(&cfg80211_drv_mutex);
2110
2111 if (!cfg80211_regdomain)
2112 goto out;
2113
2114 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2115 if (!msg) {
2116 err = -ENOBUFS;
2117 goto out;
2118 }
2119
2120 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
2121 NL80211_CMD_GET_REG);
2122 if (!hdr)
2123 goto nla_put_failure;
2124
2125 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
2126 cfg80211_regdomain->alpha2);
2127
2128 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
2129 if (!nl_reg_rules)
2130 goto nla_put_failure;
2131
2132 for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) {
2133 struct nlattr *nl_reg_rule;
2134 const struct ieee80211_reg_rule *reg_rule;
2135 const struct ieee80211_freq_range *freq_range;
2136 const struct ieee80211_power_rule *power_rule;
2137
2138 reg_rule = &cfg80211_regdomain->reg_rules[i];
2139 freq_range = &reg_rule->freq_range;
2140 power_rule = &reg_rule->power_rule;
2141
2142 nl_reg_rule = nla_nest_start(msg, i);
2143 if (!nl_reg_rule)
2144 goto nla_put_failure;
2145
2146 NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS,
2147 reg_rule->flags);
2148 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START,
2149 freq_range->start_freq_khz);
2150 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END,
2151 freq_range->end_freq_khz);
2152 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
2153 freq_range->max_bandwidth_khz);
2154 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
2155 power_rule->max_antenna_gain);
2156 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
2157 power_rule->max_eirp);
2158
2159 nla_nest_end(msg, nl_reg_rule);
2160 }
2161
2162 nla_nest_end(msg, nl_reg_rules);
2163
2164 genlmsg_end(msg, hdr);
2165 err = genlmsg_unicast(msg, info->snd_pid);
2166 goto out;
2167
2168nla_put_failure:
2169 genlmsg_cancel(msg, hdr);
2170 err = -EMSGSIZE;
2171out:
2172 mutex_unlock(&cfg80211_drv_mutex);
2173 return err;
2174}
2175
2072static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) 2176static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2073{ 2177{
2074 struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; 2178 struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
@@ -2134,6 +2238,283 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2134 return -EINVAL; 2238 return -EINVAL;
2135} 2239}
2136 2240
2241static int nl80211_set_mgmt_extra_ie(struct sk_buff *skb,
2242 struct genl_info *info)
2243{
2244 struct cfg80211_registered_device *drv;
2245 int err;
2246 struct net_device *dev;
2247 struct mgmt_extra_ie_params params;
2248
2249 memset(&params, 0, sizeof(params));
2250
2251 if (!info->attrs[NL80211_ATTR_MGMT_SUBTYPE])
2252 return -EINVAL;
2253 params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]);
2254 if (params.subtype > 15)
2255 return -EINVAL; /* FC Subtype field is 4 bits (0..15) */
2256
2257 if (info->attrs[NL80211_ATTR_IE]) {
2258 params.ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2259 params.ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2260 }
2261
2262 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2263 if (err)
2264 return err;
2265
2266 if (drv->ops->set_mgmt_extra_ie) {
2267 rtnl_lock();
2268 err = drv->ops->set_mgmt_extra_ie(&drv->wiphy, dev, &params);
2269 rtnl_unlock();
2270 } else
2271 err = -EOPNOTSUPP;
2272
2273 cfg80211_put_dev(drv);
2274 dev_put(dev);
2275 return err;
2276}
2277
2278static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2279{
2280 struct cfg80211_registered_device *drv;
2281 struct net_device *dev;
2282 struct cfg80211_scan_request *request;
2283 struct cfg80211_ssid *ssid;
2284 struct ieee80211_channel *channel;
2285 struct nlattr *attr;
2286 struct wiphy *wiphy;
2287 int err, tmp, n_ssids = 0, n_channels = 0, i;
2288 enum ieee80211_band band;
2289
2290 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2291 if (err)
2292 return err;
2293
2294 wiphy = &drv->wiphy;
2295
2296 if (!drv->ops->scan) {
2297 err = -EOPNOTSUPP;
2298 goto out;
2299 }
2300
2301 rtnl_lock();
2302
2303 if (drv->scan_req) {
2304 err = -EBUSY;
2305 goto out_unlock;
2306 }
2307
2308 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
2309 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp)
2310 n_channels++;
2311 if (!n_channels) {
2312 err = -EINVAL;
2313 goto out_unlock;
2314 }
2315 } else {
2316 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
2317 if (wiphy->bands[band])
2318 n_channels += wiphy->bands[band]->n_channels;
2319 }
2320
2321 if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
2322 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
2323 n_ssids++;
2324
2325 if (n_ssids > wiphy->max_scan_ssids) {
2326 err = -EINVAL;
2327 goto out_unlock;
2328 }
2329
2330 request = kzalloc(sizeof(*request)
2331 + sizeof(*ssid) * n_ssids
2332 + sizeof(channel) * n_channels, GFP_KERNEL);
2333 if (!request) {
2334 err = -ENOMEM;
2335 goto out_unlock;
2336 }
2337
2338 request->channels = (void *)((char *)request + sizeof(*request));
2339 request->n_channels = n_channels;
2340 if (n_ssids)
2341 request->ssids = (void *)(request->channels + n_channels);
2342 request->n_ssids = n_ssids;
2343
2344 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
2345 /* user specified, bail out if channel not found */
2346 request->n_channels = n_channels;
2347 i = 0;
2348 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) {
2349 request->channels[i] = ieee80211_get_channel(wiphy, nla_get_u32(attr));
2350 if (!request->channels[i]) {
2351 err = -EINVAL;
2352 goto out_free;
2353 }
2354 i++;
2355 }
2356 } else {
2357 /* all channels */
2358 i = 0;
2359 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2360 int j;
2361 if (!wiphy->bands[band])
2362 continue;
2363 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
2364 request->channels[i] = &wiphy->bands[band]->channels[j];
2365 i++;
2366 }
2367 }
2368 }
2369
2370 i = 0;
2371 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
2372 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
2373 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
2374 err = -EINVAL;
2375 goto out_free;
2376 }
2377 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
2378 request->ssids[i].ssid_len = nla_len(attr);
2379 i++;
2380 }
2381 }
2382
2383 request->ifidx = dev->ifindex;
2384 request->wiphy = &drv->wiphy;
2385
2386 drv->scan_req = request;
2387 err = drv->ops->scan(&drv->wiphy, dev, request);
2388
2389 out_free:
2390 if (err) {
2391 drv->scan_req = NULL;
2392 kfree(request);
2393 }
2394 out_unlock:
2395 rtnl_unlock();
2396 out:
2397 cfg80211_put_dev(drv);
2398 dev_put(dev);
2399 return err;
2400}
2401
2402static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
2403 struct cfg80211_registered_device *rdev,
2404 struct net_device *dev,
2405 struct cfg80211_bss *res)
2406{
2407 void *hdr;
2408 struct nlattr *bss;
2409
2410 hdr = nl80211hdr_put(msg, pid, seq, flags,
2411 NL80211_CMD_NEW_SCAN_RESULTS);
2412 if (!hdr)
2413 return -1;
2414
2415 NLA_PUT_U32(msg, NL80211_ATTR_SCAN_GENERATION,
2416 rdev->bss_generation);
2417 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
2418
2419 bss = nla_nest_start(msg, NL80211_ATTR_BSS);
2420 if (!bss)
2421 goto nla_put_failure;
2422 if (!is_zero_ether_addr(res->bssid))
2423 NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid);
2424 if (res->information_elements && res->len_information_elements)
2425 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
2426 res->len_information_elements,
2427 res->information_elements);
2428 if (res->tsf)
2429 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
2430 if (res->beacon_interval)
2431 NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval);
2432 NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability);
2433 NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq);
2434
2435 switch (res->signal_type) {
2436 case CFG80211_SIGNAL_TYPE_MBM:
2437 NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal);
2438 break;
2439 case CFG80211_SIGNAL_TYPE_UNSPEC:
2440 NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal);
2441 break;
2442 default:
2443 break;
2444 }
2445
2446 nla_nest_end(msg, bss);
2447
2448 return genlmsg_end(msg, hdr);
2449
2450 nla_put_failure:
2451 genlmsg_cancel(msg, hdr);
2452 return -EMSGSIZE;
2453}
2454
2455static int nl80211_dump_scan(struct sk_buff *skb,
2456 struct netlink_callback *cb)
2457{
2458 struct cfg80211_registered_device *dev;
2459 struct net_device *netdev;
2460 struct cfg80211_internal_bss *scan;
2461 int ifidx = cb->args[0];
2462 int start = cb->args[1], idx = 0;
2463 int err;
2464
2465 if (!ifidx) {
2466 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
2467 nl80211_fam.attrbuf, nl80211_fam.maxattr,
2468 nl80211_policy);
2469 if (err)
2470 return err;
2471
2472 if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
2473 return -EINVAL;
2474
2475 ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
2476 if (!ifidx)
2477 return -EINVAL;
2478 cb->args[0] = ifidx;
2479 }
2480
2481 netdev = dev_get_by_index(&init_net, ifidx);
2482 if (!netdev)
2483 return -ENODEV;
2484
2485 dev = cfg80211_get_dev_from_ifindex(ifidx);
2486 if (IS_ERR(dev)) {
2487 err = PTR_ERR(dev);
2488 goto out_put_netdev;
2489 }
2490
2491 spin_lock_bh(&dev->bss_lock);
2492 cfg80211_bss_expire(dev);
2493
2494 list_for_each_entry(scan, &dev->bss_list, list) {
2495 if (++idx <= start)
2496 continue;
2497 if (nl80211_send_bss(skb,
2498 NETLINK_CB(cb->skb).pid,
2499 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2500 dev, netdev, &scan->pub) < 0) {
2501 idx--;
2502 goto out;
2503 }
2504 }
2505
2506 out:
2507 spin_unlock_bh(&dev->bss_lock);
2508
2509 cb->args[1] = idx;
2510 err = skb->len;
2511 cfg80211_put_dev(dev);
2512 out_put_netdev:
2513 dev_put(netdev);
2514
2515 return err;
2516}
2517
2137static struct genl_ops nl80211_ops[] = { 2518static struct genl_ops nl80211_ops[] = {
2138 { 2519 {
2139 .cmd = NL80211_CMD_GET_WIPHY, 2520 .cmd = NL80211_CMD_GET_WIPHY,
@@ -2272,6 +2653,12 @@ static struct genl_ops nl80211_ops[] = {
2272 .flags = GENL_ADMIN_PERM, 2653 .flags = GENL_ADMIN_PERM,
2273 }, 2654 },
2274 { 2655 {
2656 .cmd = NL80211_CMD_GET_REG,
2657 .doit = nl80211_get_reg,
2658 .policy = nl80211_policy,
2659 /* can be retrieved by unprivileged users */
2660 },
2661 {
2275 .cmd = NL80211_CMD_SET_REG, 2662 .cmd = NL80211_CMD_SET_REG,
2276 .doit = nl80211_set_reg, 2663 .doit = nl80211_set_reg,
2277 .policy = nl80211_policy, 2664 .policy = nl80211_policy,
@@ -2295,12 +2682,32 @@ static struct genl_ops nl80211_ops[] = {
2295 .policy = nl80211_policy, 2682 .policy = nl80211_policy,
2296 .flags = GENL_ADMIN_PERM, 2683 .flags = GENL_ADMIN_PERM,
2297 }, 2684 },
2685 {
2686 .cmd = NL80211_CMD_SET_MGMT_EXTRA_IE,
2687 .doit = nl80211_set_mgmt_extra_ie,
2688 .policy = nl80211_policy,
2689 .flags = GENL_ADMIN_PERM,
2690 },
2691 {
2692 .cmd = NL80211_CMD_TRIGGER_SCAN,
2693 .doit = nl80211_trigger_scan,
2694 .policy = nl80211_policy,
2695 .flags = GENL_ADMIN_PERM,
2696 },
2697 {
2698 .cmd = NL80211_CMD_GET_SCAN,
2699 .policy = nl80211_policy,
2700 .dumpit = nl80211_dump_scan,
2701 },
2298}; 2702};
2299 2703
2300/* multicast groups */ 2704/* multicast groups */
2301static struct genl_multicast_group nl80211_config_mcgrp = { 2705static struct genl_multicast_group nl80211_config_mcgrp = {
2302 .name = "config", 2706 .name = "config",
2303}; 2707};
2708static struct genl_multicast_group nl80211_scan_mcgrp = {
2709 .name = "scan",
2710};
2304 2711
2305/* notification functions */ 2712/* notification functions */
2306 2713
@@ -2320,6 +2727,66 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
2320 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); 2727 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL);
2321} 2728}
2322 2729
2730static int nl80211_send_scan_donemsg(struct sk_buff *msg,
2731 struct cfg80211_registered_device *rdev,
2732 struct net_device *netdev,
2733 u32 pid, u32 seq, int flags,
2734 u32 cmd)
2735{
2736 void *hdr;
2737
2738 hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
2739 if (!hdr)
2740 return -1;
2741
2742 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->idx);
2743 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
2744
2745 /* XXX: we should probably bounce back the request? */
2746
2747 return genlmsg_end(msg, hdr);
2748
2749 nla_put_failure:
2750 genlmsg_cancel(msg, hdr);
2751 return -EMSGSIZE;
2752}
2753
2754void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
2755 struct net_device *netdev)
2756{
2757 struct sk_buff *msg;
2758
2759 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2760 if (!msg)
2761 return;
2762
2763 if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0,
2764 NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
2765 nlmsg_free(msg);
2766 return;
2767 }
2768
2769 genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL);
2770}
2771
2772void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
2773 struct net_device *netdev)
2774{
2775 struct sk_buff *msg;
2776
2777 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2778 if (!msg)
2779 return;
2780
2781 if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0,
2782 NL80211_CMD_SCAN_ABORTED) < 0) {
2783 nlmsg_free(msg);
2784 return;
2785 }
2786
2787 genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL);
2788}
2789
2323/* initialisation/exit functions */ 2790/* initialisation/exit functions */
2324 2791
2325int nl80211_init(void) 2792int nl80211_init(void)
@@ -2340,6 +2807,10 @@ int nl80211_init(void)
2340 if (err) 2807 if (err)
2341 goto err_out; 2808 goto err_out;
2342 2809
2810 err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp);
2811 if (err)
2812 goto err_out;
2813
2343 return 0; 2814 return 0;
2344 err_out: 2815 err_out:
2345 genl_unregister_family(&nl80211_fam); 2816 genl_unregister_family(&nl80211_fam);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f3ea5c029ae..b565a5f84e9 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -7,6 +7,10 @@
7extern int nl80211_init(void); 7extern int nl80211_init(void);
8extern void nl80211_exit(void); 8extern void nl80211_exit(void);
9extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 9extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
10extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
11 struct net_device *netdev);
12extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
13 struct net_device *netdev);
10#else 14#else
11static inline int nl80211_init(void) 15static inline int nl80211_init(void)
12{ 16{
@@ -19,6 +23,10 @@ static inline void nl80211_notify_dev_rename(
19 struct cfg80211_registered_device *rdev) 23 struct cfg80211_registered_device *rdev)
20{ 24{
21} 25}
26static inline void
27nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
28 struct net_device *netdev)
29{}
22#endif /* CONFIG_NL80211 */ 30#endif /* CONFIG_NL80211 */
23 31
24#endif /* __NET_WIRELESS_NL80211_H */ 32#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 85c9034c59b..2323644330c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -42,38 +42,6 @@
42#include "core.h" 42#include "core.h"
43#include "reg.h" 43#include "reg.h"
44 44
45/**
46 * struct regulatory_request - receipt of last regulatory request
47 *
48 * @wiphy: this is set if this request's initiator is
49 * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This
50 * can be used by the wireless core to deal with conflicts
51 * and potentially inform users of which devices specifically
52 * cased the conflicts.
53 * @initiator: indicates who sent this request, could be any of
54 * of those set in reg_set_by, %REGDOM_SET_BY_*
55 * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
56 * regulatory domain. We have a few special codes:
57 * 00 - World regulatory domain
58 * 99 - built by driver but a specific alpha2 cannot be determined
59 * 98 - result of an intersection between two regulatory domains
60 * @intersect: indicates whether the wireless core should intersect
61 * the requested regulatory domain with the presently set regulatory
62 * domain.
63 * @country_ie_checksum: checksum of the last processed and accepted
64 * country IE
65 * @country_ie_env: lets us know if the AP is telling us we are outdoor,
66 * indoor, or if it doesn't matter
67 */
68struct regulatory_request {
69 struct wiphy *wiphy;
70 enum reg_set_by initiator;
71 char alpha2[2];
72 bool intersect;
73 u32 country_ie_checksum;
74 enum environment_cap country_ie_env;
75};
76
77/* Receipt of information from last regulatory request */ 45/* Receipt of information from last regulatory request */
78static struct regulatory_request *last_request; 46static struct regulatory_request *last_request;
79 47
@@ -89,7 +57,7 @@ static u32 supported_bandwidths[] = {
89/* Central wireless core regulatory domains, we only need two, 57/* Central wireless core regulatory domains, we only need two,
90 * the current one and a world regulatory domain in case we have no 58 * the current one and a world regulatory domain in case we have no
91 * information to give us an alpha2 */ 59 * information to give us an alpha2 */
92static const struct ieee80211_regdomain *cfg80211_regdomain; 60const struct ieee80211_regdomain *cfg80211_regdomain;
93 61
94/* We use this as a place for the rd structure built from the 62/* We use this as a place for the rd structure built from the
95 * last parsed country IE to rest until CRDA gets back to us with 63 * last parsed country IE to rest until CRDA gets back to us with
@@ -790,42 +758,35 @@ static u32 map_regdom_flags(u32 rd_flags)
790 return channel_flags; 758 return channel_flags;
791} 759}
792 760
793/** 761static int freq_reg_info_regd(struct wiphy *wiphy,
794 * freq_reg_info - get regulatory information for the given frequency 762 u32 center_freq,
795 * @center_freq: Frequency in KHz for which we want regulatory information for 763 u32 *bandwidth,
796 * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one 764 const struct ieee80211_reg_rule **reg_rule,
797 * you can set this to 0. If this frequency is allowed we then set 765 const struct ieee80211_regdomain *custom_regd)
798 * this value to the maximum allowed bandwidth.
799 * @reg_rule: the regulatory rule which we have for this frequency
800 *
801 * Use this function to get the regulatory rule for a specific frequency on
802 * a given wireless device. If the device has a specific regulatory domain
803 * it wants to follow we respect that unless a country IE has been received
804 * and processed already.
805 *
806 * Returns 0 if it was able to find a valid regulatory rule which does
807 * apply to the given center_freq otherwise it returns non-zero. It will
808 * also return -ERANGE if we determine the given center_freq does not even have
809 * a regulatory rule for a frequency range in the center_freq's band. See
810 * freq_in_rule_band() for our current definition of a band -- this is purely
811 * subjective and right now its 802.11 specific.
812 */
813static int freq_reg_info(u32 center_freq, u32 *bandwidth,
814 const struct ieee80211_reg_rule **reg_rule)
815{ 766{
816 int i; 767 int i;
817 bool band_rule_found = false; 768 bool band_rule_found = false;
769 const struct ieee80211_regdomain *regd;
818 u32 max_bandwidth = 0; 770 u32 max_bandwidth = 0;
819 771
820 if (!cfg80211_regdomain) 772 regd = custom_regd ? custom_regd : cfg80211_regdomain;
773
774 /* Follow the driver's regulatory domain, if present, unless a country
775 * IE has been processed or a user wants to help complaince further */
776 if (last_request->initiator != REGDOM_SET_BY_COUNTRY_IE &&
777 last_request->initiator != REGDOM_SET_BY_USER &&
778 wiphy->regd)
779 regd = wiphy->regd;
780
781 if (!regd)
821 return -EINVAL; 782 return -EINVAL;
822 783
823 for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { 784 for (i = 0; i < regd->n_reg_rules; i++) {
824 const struct ieee80211_reg_rule *rr; 785 const struct ieee80211_reg_rule *rr;
825 const struct ieee80211_freq_range *fr = NULL; 786 const struct ieee80211_freq_range *fr = NULL;
826 const struct ieee80211_power_rule *pr = NULL; 787 const struct ieee80211_power_rule *pr = NULL;
827 788
828 rr = &cfg80211_regdomain->reg_rules[i]; 789 rr = &regd->reg_rules[i];
829 fr = &rr->freq_range; 790 fr = &rr->freq_range;
830 pr = &rr->power_rule; 791 pr = &rr->power_rule;
831 792
@@ -849,6 +810,14 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth,
849 810
850 return !max_bandwidth; 811 return !max_bandwidth;
851} 812}
813EXPORT_SYMBOL(freq_reg_info);
814
815int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
816 const struct ieee80211_reg_rule **reg_rule)
817{
818 return freq_reg_info_regd(wiphy, center_freq,
819 bandwidth, reg_rule, NULL);
820}
852 821
853static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 822static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
854 unsigned int chan_idx) 823 unsigned int chan_idx)
@@ -867,7 +836,7 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
867 836
868 flags = chan->orig_flags; 837 flags = chan->orig_flags;
869 838
870 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), 839 r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq),
871 &max_bandwidth, &reg_rule); 840 &max_bandwidth, &reg_rule);
872 841
873 if (r) { 842 if (r) {
@@ -907,6 +876,22 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
907 876
908 power_rule = &reg_rule->power_rule; 877 power_rule = &reg_rule->power_rule;
909 878
879 if (last_request->initiator == REGDOM_SET_BY_DRIVER &&
880 last_request->wiphy && last_request->wiphy == wiphy &&
881 last_request->wiphy->strict_regulatory) {
882 /* This gaurantees the driver's requested regulatory domain
883 * will always be used as a base for further regulatory
884 * settings */
885 chan->flags = chan->orig_flags =
886 map_regdom_flags(reg_rule->flags);
887 chan->max_antenna_gain = chan->orig_mag =
888 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
889 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth);
890 chan->max_power = chan->orig_mpwr =
891 (int) MBM_TO_DBM(power_rule->max_eirp);
892 return;
893 }
894
910 chan->flags = flags | map_regdom_flags(reg_rule->flags); 895 chan->flags = flags | map_regdom_flags(reg_rule->flags);
911 chan->max_antenna_gain = min(chan->orig_mag, 896 chan->max_antenna_gain = min(chan->orig_mag,
912 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 897 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
@@ -935,7 +920,12 @@ static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby)
935 if (!last_request) 920 if (!last_request)
936 return true; 921 return true;
937 if (setby == REGDOM_SET_BY_CORE && 922 if (setby == REGDOM_SET_BY_CORE &&
938 wiphy->fw_handles_regulatory) 923 wiphy->custom_regulatory)
924 return true;
925 /* wiphy->regd will be set once the device has its own
926 * desired regulatory domain set */
927 if (wiphy->strict_regulatory && !wiphy->regd &&
928 !is_world_regdom(last_request->alpha2))
939 return true; 929 return true;
940 return false; 930 return false;
941} 931}
@@ -945,20 +935,103 @@ static void update_all_wiphy_regulatory(enum reg_set_by setby)
945 struct cfg80211_registered_device *drv; 935 struct cfg80211_registered_device *drv;
946 936
947 list_for_each_entry(drv, &cfg80211_drv_list, list) 937 list_for_each_entry(drv, &cfg80211_drv_list, list)
948 if (!ignore_reg_update(&drv->wiphy, setby)) 938 wiphy_update_regulatory(&drv->wiphy, setby);
949 wiphy_update_regulatory(&drv->wiphy, setby);
950} 939}
951 940
952void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby) 941void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
953{ 942{
954 enum ieee80211_band band; 943 enum ieee80211_band band;
944
945 if (ignore_reg_update(wiphy, setby))
946 return;
955 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 947 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
956 if (wiphy->bands[band]) 948 if (wiphy->bands[band])
957 handle_band(wiphy, band); 949 handle_band(wiphy, band);
958 if (wiphy->reg_notifier) 950 }
959 wiphy->reg_notifier(wiphy, setby); 951 if (wiphy->reg_notifier)
952 wiphy->reg_notifier(wiphy, last_request);
953}
954
955static void handle_channel_custom(struct wiphy *wiphy,
956 enum ieee80211_band band,
957 unsigned int chan_idx,
958 const struct ieee80211_regdomain *regd)
959{
960 int r;
961 u32 max_bandwidth = 0;
962 const struct ieee80211_reg_rule *reg_rule = NULL;
963 const struct ieee80211_power_rule *power_rule = NULL;
964 struct ieee80211_supported_band *sband;
965 struct ieee80211_channel *chan;
966
967 sband = wiphy->bands[band];
968 BUG_ON(chan_idx >= sband->n_channels);
969 chan = &sband->channels[chan_idx];
970
971 r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
972 &max_bandwidth, &reg_rule, regd);
973
974 if (r) {
975 chan->flags = IEEE80211_CHAN_DISABLED;
976 return;
977 }
978
979 power_rule = &reg_rule->power_rule;
980
981 chan->flags |= map_regdom_flags(reg_rule->flags);
982 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
983 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth);
984 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
985}
986
987static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band,
988 const struct ieee80211_regdomain *regd)
989{
990 unsigned int i;
991 struct ieee80211_supported_band *sband;
992
993 BUG_ON(!wiphy->bands[band]);
994 sband = wiphy->bands[band];
995
996 for (i = 0; i < sband->n_channels; i++)
997 handle_channel_custom(wiphy, band, i, regd);
998}
999
1000/* Used by drivers prior to wiphy registration */
1001void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1002 const struct ieee80211_regdomain *regd)
1003{
1004 enum ieee80211_band band;
1005 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1006 if (wiphy->bands[band])
1007 handle_band_custom(wiphy, band, regd);
960 } 1008 }
961} 1009}
1010EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1011
1012static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
1013 const struct ieee80211_regdomain *src_regd)
1014{
1015 struct ieee80211_regdomain *regd;
1016 int size_of_regd = 0;
1017 unsigned int i;
1018
1019 size_of_regd = sizeof(struct ieee80211_regdomain) +
1020 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
1021
1022 regd = kzalloc(size_of_regd, GFP_KERNEL);
1023 if (!regd)
1024 return -ENOMEM;
1025
1026 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
1027
1028 for (i = 0; i < src_regd->n_reg_rules; i++)
1029 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
1030 sizeof(struct ieee80211_reg_rule));
1031
1032 *dst_regd = regd;
1033 return 0;
1034}
962 1035
963/* Return value which can be used by ignore_request() to indicate 1036/* Return value which can be used by ignore_request() to indicate
964 * it has been determined we should intersect two regulatory domains */ 1037 * it has been determined we should intersect two regulatory domains */
@@ -1007,9 +1080,14 @@ static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
1007 } 1080 }
1008 return REG_INTERSECT; 1081 return REG_INTERSECT;
1009 case REGDOM_SET_BY_DRIVER: 1082 case REGDOM_SET_BY_DRIVER:
1010 if (last_request->initiator == REGDOM_SET_BY_DRIVER) 1083 if (last_request->initiator == REGDOM_SET_BY_CORE) {
1084 if (is_old_static_regdom(cfg80211_regdomain))
1085 return 0;
1086 if (!alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
1087 return 0;
1011 return -EALREADY; 1088 return -EALREADY;
1012 return 0; 1089 }
1090 return REG_INTERSECT;
1013 case REGDOM_SET_BY_USER: 1091 case REGDOM_SET_BY_USER:
1014 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) 1092 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
1015 return REG_INTERSECT; 1093 return REG_INTERSECT;
@@ -1018,6 +1096,20 @@ static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
1018 if (last_request->initiator == REGDOM_SET_BY_USER && 1096 if (last_request->initiator == REGDOM_SET_BY_USER &&
1019 last_request->intersect) 1097 last_request->intersect)
1020 return -EOPNOTSUPP; 1098 return -EOPNOTSUPP;
1099 /* Process user requests only after previous user/driver/core
1100 * requests have been processed */
1101 if (last_request->initiator == REGDOM_SET_BY_CORE ||
1102 last_request->initiator == REGDOM_SET_BY_DRIVER ||
1103 last_request->initiator == REGDOM_SET_BY_USER) {
1104 if (!alpha2_equal(last_request->alpha2,
1105 cfg80211_regdomain->alpha2))
1106 return -EAGAIN;
1107 }
1108
1109 if (!is_old_static_regdom(cfg80211_regdomain) &&
1110 alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
1111 return -EALREADY;
1112
1021 return 0; 1113 return 0;
1022 } 1114 }
1023 1115
@@ -1036,11 +1128,28 @@ int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
1036 1128
1037 r = ignore_request(wiphy, set_by, alpha2); 1129 r = ignore_request(wiphy, set_by, alpha2);
1038 1130
1039 if (r == REG_INTERSECT) 1131 if (r == REG_INTERSECT) {
1132 if (set_by == REGDOM_SET_BY_DRIVER) {
1133 r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
1134 if (r)
1135 return r;
1136 }
1040 intersect = true; 1137 intersect = true;
1041 else if (r) 1138 } else if (r) {
1139 /* If the regulatory domain being requested by the
1140 * driver has already been set just copy it to the
1141 * wiphy */
1142 if (r == -EALREADY && set_by == REGDOM_SET_BY_DRIVER) {
1143 r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
1144 if (r)
1145 return r;
1146 r = -EALREADY;
1147 goto new_request;
1148 }
1042 return r; 1149 return r;
1150 }
1043 1151
1152new_request:
1044 request = kzalloc(sizeof(struct regulatory_request), 1153 request = kzalloc(sizeof(struct regulatory_request),
1045 GFP_KERNEL); 1154 GFP_KERNEL);
1046 if (!request) 1155 if (!request)
@@ -1056,6 +1165,11 @@ int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
1056 1165
1057 kfree(last_request); 1166 kfree(last_request);
1058 last_request = request; 1167 last_request = request;
1168
1169 /* When r == REG_INTERSECT we do need to call CRDA */
1170 if (r < 0)
1171 return r;
1172
1059 /* 1173 /*
1060 * Note: When CONFIG_WIRELESS_OLD_REGULATORY is enabled 1174 * Note: When CONFIG_WIRELESS_OLD_REGULATORY is enabled
1061 * AND if CRDA is NOT present nothing will happen, if someone 1175 * AND if CRDA is NOT present nothing will happen, if someone
@@ -1071,10 +1185,15 @@ int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
1071 1185
1072void regulatory_hint(struct wiphy *wiphy, const char *alpha2) 1186void regulatory_hint(struct wiphy *wiphy, const char *alpha2)
1073{ 1187{
1188 int r;
1074 BUG_ON(!alpha2); 1189 BUG_ON(!alpha2);
1075 1190
1076 mutex_lock(&cfg80211_drv_mutex); 1191 mutex_lock(&cfg80211_drv_mutex);
1077 __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER, alpha2, 0, ENVIRON_ANY); 1192 r = __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER,
1193 alpha2, 0, ENVIRON_ANY);
1194 /* This is required so that the orig_* parameters are saved */
1195 if (r == -EALREADY && wiphy->strict_regulatory)
1196 wiphy_update_regulatory(wiphy, REGDOM_SET_BY_DRIVER);
1078 mutex_unlock(&cfg80211_drv_mutex); 1197 mutex_unlock(&cfg80211_drv_mutex);
1079} 1198}
1080EXPORT_SYMBOL(regulatory_hint); 1199EXPORT_SYMBOL(regulatory_hint);
@@ -1247,7 +1366,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
1247 "domain intersected: \n"); 1366 "domain intersected: \n");
1248 } else 1367 } else
1249 printk(KERN_INFO "cfg80211: Current regulatory " 1368 printk(KERN_INFO "cfg80211: Current regulatory "
1250 "intersected: \n"); 1369 "domain intersected: \n");
1251 } else if (is_world_regdom(rd->alpha2)) 1370 } else if (is_world_regdom(rd->alpha2))
1252 printk(KERN_INFO "cfg80211: World regulatory " 1371 printk(KERN_INFO "cfg80211: World regulatory "
1253 "domain updated:\n"); 1372 "domain updated:\n");
@@ -1349,6 +1468,23 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
1349 } 1468 }
1350 1469
1351 if (!last_request->intersect) { 1470 if (!last_request->intersect) {
1471 int r;
1472
1473 if (last_request->initiator != REGDOM_SET_BY_DRIVER) {
1474 reset_regdomains();
1475 cfg80211_regdomain = rd;
1476 return 0;
1477 }
1478
1479 /* For a driver hint, lets copy the regulatory domain the
1480 * driver wanted to the wiphy to deal with conflicts */
1481
1482 BUG_ON(last_request->wiphy->regd);
1483
1484 r = reg_copy_regd(&last_request->wiphy->regd, rd);
1485 if (r)
1486 return r;
1487
1352 reset_regdomains(); 1488 reset_regdomains();
1353 cfg80211_regdomain = rd; 1489 cfg80211_regdomain = rd;
1354 return 0; 1490 return 0;
@@ -1362,8 +1498,14 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
1362 if (!intersected_rd) 1498 if (!intersected_rd)
1363 return -EINVAL; 1499 return -EINVAL;
1364 1500
1365 /* We can trash what CRDA provided now */ 1501 /* We can trash what CRDA provided now.
1366 kfree(rd); 1502 * However if a driver requested this specific regulatory
1503 * domain we keep it for its private use */
1504 if (last_request->initiator == REGDOM_SET_BY_DRIVER)
1505 last_request->wiphy->regd = rd;
1506 else
1507 kfree(rd);
1508
1367 rd = NULL; 1509 rd = NULL;
1368 1510
1369 reset_regdomains(); 1511 reset_regdomains();
@@ -1447,6 +1589,7 @@ int set_regdom(const struct ieee80211_regdomain *rd)
1447/* Caller must hold cfg80211_drv_mutex */ 1589/* Caller must hold cfg80211_drv_mutex */
1448void reg_device_remove(struct wiphy *wiphy) 1590void reg_device_remove(struct wiphy *wiphy)
1449{ 1591{
1592 kfree(wiphy->regd);
1450 if (!last_request || !last_request->wiphy) 1593 if (!last_request || !last_request->wiphy)
1451 return; 1594 return;
1452 if (last_request->wiphy != wiphy) 1595 if (last_request->wiphy != wiphy)
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index a76ea3ff7cd..fe8c83f34fb 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -1,6 +1,8 @@
1#ifndef __NET_WIRELESS_REG_H 1#ifndef __NET_WIRELESS_REG_H
2#define __NET_WIRELESS_REG_H 2#define __NET_WIRELESS_REG_H
3 3
4extern const struct ieee80211_regdomain *cfg80211_regdomain;
5
4bool is_world_regdom(const char *alpha2); 6bool is_world_regdom(const char *alpha2);
5bool reg_is_valid_request(const char *alpha2); 7bool reg_is_valid_request(const char *alpha2);
6 8
@@ -11,13 +13,6 @@ void regulatory_exit(void);
11 13
12int set_regdom(const struct ieee80211_regdomain *rd); 14int set_regdom(const struct ieee80211_regdomain *rd);
13 15
14enum environment_cap {
15 ENVIRON_ANY,
16 ENVIRON_INDOOR,
17 ENVIRON_OUTDOOR,
18};
19
20
21/** 16/**
22 * __regulatory_hint - hint to the wireless core a regulatory domain 17 * __regulatory_hint - hint to the wireless core a regulatory domain
23 * @wiphy: if the hint comes from country information from an AP, this 18 * @wiphy: if the hint comes from country information from an AP, this
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
new file mode 100644
index 00000000000..b1893c863b9
--- /dev/null
+++ b/net/wireless/scan.c
@@ -0,0 +1,836 @@
1/*
2 * cfg80211 scan result handling
3 *
4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
5 */
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/netdevice.h>
9#include <linux/wireless.h>
10#include <linux/nl80211.h>
11#include <linux/etherdevice.h>
12#include <net/arp.h>
13#include <net/cfg80211.h>
14#include <net/iw_handler.h>
15#include "core.h"
16#include "nl80211.h"
17
18#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
19
20void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
21{
22 struct net_device *dev;
23#ifdef CONFIG_WIRELESS_EXT
24 union iwreq_data wrqu;
25#endif
26
27 dev = dev_get_by_index(&init_net, request->ifidx);
28 if (!dev)
29 goto out;
30
31 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
32 wiphy_to_dev(request->wiphy)->scan_req = NULL;
33
34 if (aborted)
35 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev);
36 else
37 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev);
38
39#ifdef CONFIG_WIRELESS_EXT
40 if (!aborted) {
41 memset(&wrqu, 0, sizeof(wrqu));
42
43 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
44 }
45#endif
46
47 dev_put(dev);
48
49 out:
50 kfree(request);
51}
52EXPORT_SYMBOL(cfg80211_scan_done);
53
54static void bss_release(struct kref *ref)
55{
56 struct cfg80211_internal_bss *bss;
57
58 bss = container_of(ref, struct cfg80211_internal_bss, ref);
59 if (bss->pub.free_priv)
60 bss->pub.free_priv(&bss->pub);
61 kfree(bss);
62}
63
64/* must hold dev->bss_lock! */
65void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
66{
67 struct cfg80211_internal_bss *bss, *tmp;
68 bool expired = false;
69
70 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
71 if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
72 continue;
73 list_del(&bss->list);
74 rb_erase(&bss->rbn, &dev->bss_tree);
75 kref_put(&bss->ref, bss_release);
76 expired = true;
77 }
78
79 if (expired)
80 dev->bss_generation++;
81}
82
83static u8 *find_ie(u8 num, u8 *ies, size_t len)
84{
85 while (len > 2 && ies[0] != num) {
86 len -= ies[1] + 2;
87 ies += ies[1] + 2;
88 }
89 if (len < 2)
90 return NULL;
91 if (len < 2 + ies[1])
92 return NULL;
93 return ies;
94}
95
96static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
97{
98 const u8 *ie1 = find_ie(num, ies1, len1);
99 const u8 *ie2 = find_ie(num, ies2, len2);
100 int r;
101
102 if (!ie1 && !ie2)
103 return 0;
104 if (!ie1)
105 return -1;
106
107 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1]));
108 if (r == 0 && ie1[1] != ie2[1])
109 return ie2[1] - ie1[1];
110 return r;
111}
112
113static bool is_bss(struct cfg80211_bss *a,
114 const u8 *bssid,
115 const u8 *ssid, size_t ssid_len)
116{
117 const u8 *ssidie;
118
119 if (bssid && compare_ether_addr(a->bssid, bssid))
120 return false;
121
122 if (!ssid)
123 return true;
124
125 ssidie = find_ie(WLAN_EID_SSID,
126 a->information_elements,
127 a->len_information_elements);
128 if (!ssidie)
129 return false;
130 if (ssidie[1] != ssid_len)
131 return false;
132 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
133}
134
135static bool is_mesh(struct cfg80211_bss *a,
136 const u8 *meshid, size_t meshidlen,
137 const u8 *meshcfg)
138{
139 const u8 *ie;
140
141 if (!is_zero_ether_addr(a->bssid))
142 return false;
143
144 ie = find_ie(WLAN_EID_MESH_ID,
145 a->information_elements,
146 a->len_information_elements);
147 if (!ie)
148 return false;
149 if (ie[1] != meshidlen)
150 return false;
151 if (memcmp(ie + 2, meshid, meshidlen))
152 return false;
153
154 ie = find_ie(WLAN_EID_MESH_CONFIG,
155 a->information_elements,
156 a->len_information_elements);
157 if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
158 return false;
159
160 /*
161 * Ignore mesh capability (last two bytes of the IE) when
162 * comparing since that may differ between stations taking
163 * part in the same mesh.
164 */
165 return memcmp(ie + 2, meshcfg, IEEE80211_MESH_CONFIG_LEN - 2) == 0;
166}
167
168static int cmp_bss(struct cfg80211_bss *a,
169 struct cfg80211_bss *b)
170{
171 int r;
172
173 if (a->channel != b->channel)
174 return b->channel->center_freq - a->channel->center_freq;
175
176 r = memcmp(a->bssid, b->bssid, ETH_ALEN);
177 if (r)
178 return r;
179
180 if (is_zero_ether_addr(a->bssid)) {
181 r = cmp_ies(WLAN_EID_MESH_ID,
182 a->information_elements,
183 a->len_information_elements,
184 b->information_elements,
185 b->len_information_elements);
186 if (r)
187 return r;
188 return cmp_ies(WLAN_EID_MESH_CONFIG,
189 a->information_elements,
190 a->len_information_elements,
191 b->information_elements,
192 b->len_information_elements);
193 }
194
195 return cmp_ies(WLAN_EID_SSID,
196 a->information_elements,
197 a->len_information_elements,
198 b->information_elements,
199 b->len_information_elements);
200}
201
202struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
203 struct ieee80211_channel *channel,
204 const u8 *bssid,
205 const u8 *ssid, size_t ssid_len,
206 u16 capa_mask, u16 capa_val)
207{
208 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
209 struct cfg80211_internal_bss *bss, *res = NULL;
210
211 spin_lock_bh(&dev->bss_lock);
212
213 list_for_each_entry(bss, &dev->bss_list, list) {
214 if ((bss->pub.capability & capa_mask) != capa_val)
215 continue;
216 if (channel && bss->pub.channel != channel)
217 continue;
218 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
219 res = bss;
220 kref_get(&res->ref);
221 break;
222 }
223 }
224
225 spin_unlock_bh(&dev->bss_lock);
226 if (!res)
227 return NULL;
228 return &res->pub;
229}
230EXPORT_SYMBOL(cfg80211_get_bss);
231
232struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
233 struct ieee80211_channel *channel,
234 const u8 *meshid, size_t meshidlen,
235 const u8 *meshcfg)
236{
237 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
238 struct cfg80211_internal_bss *bss, *res = NULL;
239
240 spin_lock_bh(&dev->bss_lock);
241
242 list_for_each_entry(bss, &dev->bss_list, list) {
243 if (channel && bss->pub.channel != channel)
244 continue;
245 if (is_mesh(&bss->pub, meshid, meshidlen, meshcfg)) {
246 res = bss;
247 kref_get(&res->ref);
248 break;
249 }
250 }
251
252 spin_unlock_bh(&dev->bss_lock);
253 if (!res)
254 return NULL;
255 return &res->pub;
256}
257EXPORT_SYMBOL(cfg80211_get_mesh);
258
259
260static void rb_insert_bss(struct cfg80211_registered_device *dev,
261 struct cfg80211_internal_bss *bss)
262{
263 struct rb_node **p = &dev->bss_tree.rb_node;
264 struct rb_node *parent = NULL;
265 struct cfg80211_internal_bss *tbss;
266 int cmp;
267
268 while (*p) {
269 parent = *p;
270 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn);
271
272 cmp = cmp_bss(&bss->pub, &tbss->pub);
273
274 if (WARN_ON(!cmp)) {
275 /* will sort of leak this BSS */
276 return;
277 }
278
279 if (cmp < 0)
280 p = &(*p)->rb_left;
281 else
282 p = &(*p)->rb_right;
283 }
284
285 rb_link_node(&bss->rbn, parent, p);
286 rb_insert_color(&bss->rbn, &dev->bss_tree);
287}
288
289static struct cfg80211_internal_bss *
290rb_find_bss(struct cfg80211_registered_device *dev,
291 struct cfg80211_internal_bss *res)
292{
293 struct rb_node *n = dev->bss_tree.rb_node;
294 struct cfg80211_internal_bss *bss;
295 int r;
296
297 while (n) {
298 bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
299 r = cmp_bss(&res->pub, &bss->pub);
300
301 if (r == 0)
302 return bss;
303 else if (r < 0)
304 n = n->rb_left;
305 else
306 n = n->rb_right;
307 }
308
309 return NULL;
310}
311
312static struct cfg80211_internal_bss *
313cfg80211_bss_update(struct cfg80211_registered_device *dev,
314 struct cfg80211_internal_bss *res,
315 bool overwrite)
316{
317 struct cfg80211_internal_bss *found = NULL;
318 const u8 *meshid, *meshcfg;
319
320 /*
321 * The reference to "res" is donated to this function.
322 */
323
324 if (WARN_ON(!res->pub.channel)) {
325 kref_put(&res->ref, bss_release);
326 return NULL;
327 }
328
329 res->ts = jiffies;
330
331 if (is_zero_ether_addr(res->pub.bssid)) {
332 /* must be mesh, verify */
333 meshid = find_ie(WLAN_EID_MESH_ID, res->pub.information_elements,
334 res->pub.len_information_elements);
335 meshcfg = find_ie(WLAN_EID_MESH_CONFIG,
336 res->pub.information_elements,
337 res->pub.len_information_elements);
338 if (!meshid || !meshcfg ||
339 meshcfg[1] != IEEE80211_MESH_CONFIG_LEN) {
340 /* bogus mesh */
341 kref_put(&res->ref, bss_release);
342 return NULL;
343 }
344 }
345
346 spin_lock_bh(&dev->bss_lock);
347
348 found = rb_find_bss(dev, res);
349
350 if (found && overwrite) {
351 list_replace(&found->list, &res->list);
352 rb_replace_node(&found->rbn, &res->rbn,
353 &dev->bss_tree);
354 kref_put(&found->ref, bss_release);
355 found = res;
356 } else if (found) {
357 kref_get(&found->ref);
358 found->pub.beacon_interval = res->pub.beacon_interval;
359 found->pub.tsf = res->pub.tsf;
360 found->pub.signal = res->pub.signal;
361 found->pub.signal_type = res->pub.signal_type;
362 found->pub.capability = res->pub.capability;
363 found->ts = res->ts;
364 kref_put(&res->ref, bss_release);
365 } else {
366 /* this "consumes" the reference */
367 list_add_tail(&res->list, &dev->bss_list);
368 rb_insert_bss(dev, res);
369 found = res;
370 }
371
372 dev->bss_generation++;
373 spin_unlock_bh(&dev->bss_lock);
374
375 kref_get(&found->ref);
376 return found;
377}
378
379struct cfg80211_bss *
380cfg80211_inform_bss_frame(struct wiphy *wiphy,
381 struct ieee80211_channel *channel,
382 struct ieee80211_mgmt *mgmt, size_t len,
383 s32 signal, enum cfg80211_signal_type sigtype,
384 gfp_t gfp)
385{
386 struct cfg80211_internal_bss *res;
387 size_t ielen = len - offsetof(struct ieee80211_mgmt,
388 u.probe_resp.variable);
389 bool overwrite;
390 size_t privsz = wiphy->bss_priv_size;
391
392 if (WARN_ON(sigtype == NL80211_BSS_SIGNAL_UNSPEC &&
393 (signal < 0 || signal > 100)))
394 return NULL;
395
396 if (WARN_ON(!mgmt || !wiphy ||
397 len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
398 return NULL;
399
400 res = kzalloc(sizeof(*res) + privsz + ielen, gfp);
401 if (!res)
402 return NULL;
403
404 memcpy(res->pub.bssid, mgmt->bssid, ETH_ALEN);
405 res->pub.channel = channel;
406 res->pub.signal_type = sigtype;
407 res->pub.signal = signal;
408 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
409 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
410 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
411 /* point to after the private area */
412 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
413 memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen);
414 res->pub.len_information_elements = ielen;
415
416 kref_init(&res->ref);
417
418 overwrite = ieee80211_is_probe_resp(mgmt->frame_control);
419
420 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
421 if (!res)
422 return NULL;
423
424 /* cfg80211_bss_update gives us a referenced result */
425 return &res->pub;
426}
427EXPORT_SYMBOL(cfg80211_inform_bss_frame);
428
429void cfg80211_put_bss(struct cfg80211_bss *pub)
430{
431 struct cfg80211_internal_bss *bss;
432
433 if (!pub)
434 return;
435
436 bss = container_of(pub, struct cfg80211_internal_bss, pub);
437 kref_put(&bss->ref, bss_release);
438}
439EXPORT_SYMBOL(cfg80211_put_bss);
440
441void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
442{
443 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
444 struct cfg80211_internal_bss *bss;
445
446 if (WARN_ON(!pub))
447 return;
448
449 bss = container_of(pub, struct cfg80211_internal_bss, pub);
450
451 spin_lock_bh(&dev->bss_lock);
452
453 list_del(&bss->list);
454 rb_erase(&bss->rbn, &dev->bss_tree);
455
456 spin_unlock_bh(&dev->bss_lock);
457
458 kref_put(&bss->ref, bss_release);
459}
460EXPORT_SYMBOL(cfg80211_unlink_bss);
461
462#ifdef CONFIG_WIRELESS_EXT
463int cfg80211_wext_siwscan(struct net_device *dev,
464 struct iw_request_info *info,
465 union iwreq_data *wrqu, char *extra)
466{
467 struct cfg80211_registered_device *rdev;
468 struct wiphy *wiphy;
469 struct iw_scan_req *wreq = NULL;
470 struct cfg80211_scan_request *creq;
471 int i, err, n_channels = 0;
472 enum ieee80211_band band;
473
474 if (!netif_running(dev))
475 return -ENETDOWN;
476
477 rdev = cfg80211_get_dev_from_ifindex(dev->ifindex);
478
479 if (IS_ERR(rdev))
480 return PTR_ERR(rdev);
481
482 if (rdev->scan_req) {
483 err = -EBUSY;
484 goto out;
485 }
486
487 wiphy = &rdev->wiphy;
488
489 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
490 if (wiphy->bands[band])
491 n_channels += wiphy->bands[band]->n_channels;
492
493 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
494 n_channels * sizeof(void *),
495 GFP_ATOMIC);
496 if (!creq) {
497 err = -ENOMEM;
498 goto out;
499 }
500
501 creq->wiphy = wiphy;
502 creq->ifidx = dev->ifindex;
503 creq->ssids = (void *)(creq + 1);
504 creq->channels = (void *)(creq->ssids + 1);
505 creq->n_channels = n_channels;
506 creq->n_ssids = 1;
507
508 /* all channels */
509 i = 0;
510 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
511 int j;
512 if (!wiphy->bands[band])
513 continue;
514 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
515 creq->channels[i] = &wiphy->bands[band]->channels[j];
516 i++;
517 }
518 }
519
520 /* translate scan request */
521 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
522 wreq = (struct iw_scan_req *)extra;
523
524 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
525 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
526 return -EINVAL;
527 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
528 creq->ssids[0].ssid_len = wreq->essid_len;
529 }
530 if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
531 creq->n_ssids = 0;
532 }
533
534 rdev->scan_req = creq;
535 err = rdev->ops->scan(wiphy, dev, creq);
536 if (err) {
537 rdev->scan_req = NULL;
538 kfree(creq);
539 }
540 out:
541 cfg80211_put_dev(rdev);
542 return err;
543}
544EXPORT_SYMBOL(cfg80211_wext_siwscan);
545
546static void ieee80211_scan_add_ies(struct iw_request_info *info,
547 struct cfg80211_bss *bss,
548 char **current_ev, char *end_buf)
549{
550 u8 *pos, *end, *next;
551 struct iw_event iwe;
552
553 if (!bss->information_elements ||
554 !bss->len_information_elements)
555 return;
556
557 /*
558 * If needed, fragment the IEs buffer (at IE boundaries) into short
559 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
560 */
561 pos = bss->information_elements;
562 end = pos + bss->len_information_elements;
563
564 while (end - pos > IW_GENERIC_IE_MAX) {
565 next = pos + 2 + pos[1];
566 while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
567 next = next + 2 + next[1];
568
569 memset(&iwe, 0, sizeof(iwe));
570 iwe.cmd = IWEVGENIE;
571 iwe.u.data.length = next - pos;
572 *current_ev = iwe_stream_add_point(info, *current_ev,
573 end_buf, &iwe, pos);
574
575 pos = next;
576 }
577
578 if (end > pos) {
579 memset(&iwe, 0, sizeof(iwe));
580 iwe.cmd = IWEVGENIE;
581 iwe.u.data.length = end - pos;
582 *current_ev = iwe_stream_add_point(info, *current_ev,
583 end_buf, &iwe, pos);
584 }
585}
586
587
588static char *
589ieee80211_bss(struct iw_request_info *info,
590 struct cfg80211_internal_bss *bss,
591 char *current_ev, char *end_buf)
592{
593 struct iw_event iwe;
594 u8 *buf, *cfg, *p;
595 u8 *ie = bss->pub.information_elements;
596 int rem = bss->pub.len_information_elements, i;
597 bool ismesh = false;
598
599 memset(&iwe, 0, sizeof(iwe));
600 iwe.cmd = SIOCGIWAP;
601 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
602 memcpy(iwe.u.ap_addr.sa_data, bss->pub.bssid, ETH_ALEN);
603 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
604 IW_EV_ADDR_LEN);
605
606 memset(&iwe, 0, sizeof(iwe));
607 iwe.cmd = SIOCGIWFREQ;
608 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->pub.channel->center_freq);
609 iwe.u.freq.e = 0;
610 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
611 IW_EV_FREQ_LEN);
612
613 memset(&iwe, 0, sizeof(iwe));
614 iwe.cmd = SIOCGIWFREQ;
615 iwe.u.freq.m = bss->pub.channel->center_freq;
616 iwe.u.freq.e = 6;
617 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
618 IW_EV_FREQ_LEN);
619
620 if (bss->pub.signal_type != CFG80211_SIGNAL_TYPE_NONE) {
621 memset(&iwe, 0, sizeof(iwe));
622 iwe.cmd = IWEVQUAL;
623 iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED |
624 IW_QUAL_NOISE_INVALID |
625 IW_QUAL_QUAL_INVALID;
626 switch (bss->pub.signal_type) {
627 case CFG80211_SIGNAL_TYPE_MBM:
628 iwe.u.qual.level = bss->pub.signal / 100;
629 iwe.u.qual.updated |= IW_QUAL_DBM;
630 break;
631 case CFG80211_SIGNAL_TYPE_UNSPEC:
632 iwe.u.qual.level = bss->pub.signal;
633 break;
634 default:
635 /* not reached */
636 break;
637 }
638 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
639 &iwe, IW_EV_QUAL_LEN);
640 }
641
642 memset(&iwe, 0, sizeof(iwe));
643 iwe.cmd = SIOCGIWENCODE;
644 if (bss->pub.capability & WLAN_CAPABILITY_PRIVACY)
645 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
646 else
647 iwe.u.data.flags = IW_ENCODE_DISABLED;
648 iwe.u.data.length = 0;
649 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
650 &iwe, "");
651
652 while (rem >= 2) {
653 /* invalid data */
654 if (ie[1] > rem - 2)
655 break;
656
657 switch (ie[0]) {
658 case WLAN_EID_SSID:
659 memset(&iwe, 0, sizeof(iwe));
660 iwe.cmd = SIOCGIWESSID;
661 iwe.u.data.length = ie[1];
662 iwe.u.data.flags = 1;
663 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
664 &iwe, ie + 2);
665 break;
666 case WLAN_EID_MESH_ID:
667 memset(&iwe, 0, sizeof(iwe));
668 iwe.cmd = SIOCGIWESSID;
669 iwe.u.data.length = ie[1];
670 iwe.u.data.flags = 1;
671 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
672 &iwe, ie + 2);
673 break;
674 case WLAN_EID_MESH_CONFIG:
675 ismesh = true;
676 if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
677 break;
678 buf = kmalloc(50, GFP_ATOMIC);
679 if (!buf)
680 break;
681 cfg = ie + 2;
682 memset(&iwe, 0, sizeof(iwe));
683 iwe.cmd = IWEVCUSTOM;
684 sprintf(buf, "Mesh network (version %d)", cfg[0]);
685 iwe.u.data.length = strlen(buf);
686 current_ev = iwe_stream_add_point(info, current_ev,
687 end_buf,
688 &iwe, buf);
689 sprintf(buf, "Path Selection Protocol ID: "
690 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
691 cfg[4]);
692 iwe.u.data.length = strlen(buf);
693 current_ev = iwe_stream_add_point(info, current_ev,
694 end_buf,
695 &iwe, buf);
696 sprintf(buf, "Path Selection Metric ID: "
697 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
698 cfg[8]);
699 iwe.u.data.length = strlen(buf);
700 current_ev = iwe_stream_add_point(info, current_ev,
701 end_buf,
702 &iwe, buf);
703 sprintf(buf, "Congestion Control Mode ID: "
704 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
705 cfg[11], cfg[12]);
706 iwe.u.data.length = strlen(buf);
707 current_ev = iwe_stream_add_point(info, current_ev,
708 end_buf,
709 &iwe, buf);
710 sprintf(buf, "Channel Precedence: "
711 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
712 cfg[15], cfg[16]);
713 iwe.u.data.length = strlen(buf);
714 current_ev = iwe_stream_add_point(info, current_ev,
715 end_buf,
716 &iwe, buf);
717 kfree(buf);
718 break;
719 case WLAN_EID_SUPP_RATES:
720 case WLAN_EID_EXT_SUPP_RATES:
721 /* display all supported rates in readable format */
722 p = current_ev + iwe_stream_lcp_len(info);
723
724 memset(&iwe, 0, sizeof(iwe));
725 iwe.cmd = SIOCGIWRATE;
726 /* Those two flags are ignored... */
727 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
728
729 for (i = 0; i < ie[1]; i++) {
730 iwe.u.bitrate.value =
731 ((ie[i + 2] & 0x7f) * 500000);
732 p = iwe_stream_add_value(info, current_ev, p,
733 end_buf, &iwe, IW_EV_PARAM_LEN);
734 }
735 current_ev = p;
736 break;
737 }
738 rem -= ie[1] + 2;
739 ie += ie[1] + 2;
740 }
741
742 if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
743 || ismesh) {
744 memset(&iwe, 0, sizeof(iwe));
745 iwe.cmd = SIOCGIWMODE;
746 if (ismesh)
747 iwe.u.mode = IW_MODE_MESH;
748 else if (bss->pub.capability & WLAN_CAPABILITY_ESS)
749 iwe.u.mode = IW_MODE_MASTER;
750 else
751 iwe.u.mode = IW_MODE_ADHOC;
752 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
753 &iwe, IW_EV_UINT_LEN);
754 }
755
756 buf = kmalloc(30, GFP_ATOMIC);
757 if (buf) {
758 memset(&iwe, 0, sizeof(iwe));
759 iwe.cmd = IWEVCUSTOM;
760 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->pub.tsf));
761 iwe.u.data.length = strlen(buf);
762 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
763 &iwe, buf);
764 memset(&iwe, 0, sizeof(iwe));
765 iwe.cmd = IWEVCUSTOM;
766 sprintf(buf, " Last beacon: %dms ago",
767 jiffies_to_msecs(jiffies - bss->ts));
768 iwe.u.data.length = strlen(buf);
769 current_ev = iwe_stream_add_point(info, current_ev,
770 end_buf, &iwe, buf);
771 kfree(buf);
772 }
773
774 ieee80211_scan_add_ies(info, &bss->pub, &current_ev, end_buf);
775
776 return current_ev;
777}
778
779
780static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
781 struct iw_request_info *info,
782 char *buf, size_t len)
783{
784 char *current_ev = buf;
785 char *end_buf = buf + len;
786 struct cfg80211_internal_bss *bss;
787
788 spin_lock_bh(&dev->bss_lock);
789 cfg80211_bss_expire(dev);
790
791 list_for_each_entry(bss, &dev->bss_list, list) {
792 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
793 spin_unlock_bh(&dev->bss_lock);
794 return -E2BIG;
795 }
796 current_ev = ieee80211_bss(info, bss,
797 current_ev, end_buf);
798 }
799 spin_unlock_bh(&dev->bss_lock);
800 return current_ev - buf;
801}
802
803
804int cfg80211_wext_giwscan(struct net_device *dev,
805 struct iw_request_info *info,
806 struct iw_point *data, char *extra)
807{
808 struct cfg80211_registered_device *rdev;
809 int res;
810
811 if (!netif_running(dev))
812 return -ENETDOWN;
813
814 rdev = cfg80211_get_dev_from_ifindex(dev->ifindex);
815
816 if (IS_ERR(rdev))
817 return PTR_ERR(rdev);
818
819 if (rdev->scan_req) {
820 res = -EAGAIN;
821 goto out;
822 }
823
824 res = ieee80211_scan_results(rdev, info, extra, data->length);
825 data->length = 0;
826 if (res >= 0) {
827 data->length = res;
828 res = 0;
829 }
830
831 out:
832 cfg80211_put_dev(rdev);
833 return res;
834}
835EXPORT_SYMBOL(cfg80211_wext_giwscan);
836#endif
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 79a38287764..26a72b0797a 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -55,6 +55,34 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
55} 55}
56#endif 56#endif
57 57
58static int wiphy_suspend(struct device *dev, pm_message_t state)
59{
60 struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
61 int ret = 0;
62
63 if (rdev->ops->suspend) {
64 rtnl_lock();
65 ret = rdev->ops->suspend(&rdev->wiphy);
66 rtnl_unlock();
67 }
68
69 return ret;
70}
71
72static int wiphy_resume(struct device *dev)
73{
74 struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
75 int ret = 0;
76
77 if (rdev->ops->resume) {
78 rtnl_lock();
79 ret = rdev->ops->resume(&rdev->wiphy);
80 rtnl_unlock();
81 }
82
83 return ret;
84}
85
58struct class ieee80211_class = { 86struct class ieee80211_class = {
59 .name = "ieee80211", 87 .name = "ieee80211",
60 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
@@ -63,6 +91,8 @@ struct class ieee80211_class = {
63#ifdef CONFIG_HOTPLUG 91#ifdef CONFIG_HOTPLUG
64 .dev_uevent = wiphy_uevent, 92 .dev_uevent = wiphy_uevent,
65#endif 93#endif
94 .suspend = wiphy_suspend,
95 .resume = wiphy_resume,
66}; 96};
67 97
68int wiphy_sysfs_init(void) 98int wiphy_sysfs_init(void)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e76cc28b034..487cdd9bcff 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -9,7 +9,7 @@
9 9
10struct ieee80211_rate * 10struct ieee80211_rate *
11ieee80211_get_response_rate(struct ieee80211_supported_band *sband, 11ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
12 u64 basic_rates, int bitrate) 12 u32 basic_rates, int bitrate)
13{ 13{
14 struct ieee80211_rate *result = &sband->bitrates[0]; 14 struct ieee80211_rate *result = &sband->bitrates[0];
15 int i; 15 int i;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 9fc5b023d11..8f76f4009c2 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1609,7 +1609,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
1609SOCKOPS_WRAP(x25_proto, AF_X25); 1609SOCKOPS_WRAP(x25_proto, AF_X25);
1610 1610
1611static struct packet_type x25_packet_type = { 1611static struct packet_type x25_packet_type = {
1612 .type = __constant_htons(ETH_P_X25), 1612 .type = cpu_to_be16(ETH_P_X25),
1613 .func = x25_lapb_receive_frame, 1613 .func = x25_lapb_receive_frame,
1614}; 1614};
1615 1615